repo_name
stringlengths
5
85
path
stringlengths
3
252
copies
stringlengths
1
5
size
stringlengths
4
6
content
stringlengths
922
999k
license
stringclasses
15 values
semdoc/linux-block
arch/arm64/kernel/efi-stub.c
549
1394
/* * Copyright (C) 2013, 2014 Linaro Ltd; <roy.franz@linaro.org> * * This file implements the EFI boot stub for the arm64 kernel. * Adapted from ARM version by Mark Salter <msalter@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/efi.h> #include <asm/efi.h> #include <asm/sections.h> efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table, unsigned long *image_addr, unsigned long *image_size, unsigned long *reserve_addr, unsigned long *reserve_size, unsigned long dram_base, efi_loaded_image_t *image) { efi_status_t status; unsigned long kernel_size, kernel_memsize = 0; /* Relocate the image, if required. */ kernel_size = _edata - _text; if (*image_addr != (dram_base + TEXT_OFFSET)) { kernel_memsize = kernel_size + (_end - _edata); status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET, SZ_2M, reserve_addr); if (status != EFI_SUCCESS) { pr_efi_err(sys_table, "Failed to relocate kernel\n"); return status; } memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr, kernel_size); *image_addr = *reserve_addr + TEXT_OFFSET; *reserve_size = kernel_memsize + TEXT_OFFSET; } return EFI_SUCCESS; }
gpl-2.0
muntahar/GT-S5660-KERNEL
arch/arm/mach-at91/board-sam9g20ek-2slot-mmc.c
1317
6527
/* * Copyright (C) 2005 SAN People * Copyright (C) 2008 Atmel * Copyright (C) 2009 Rob Emanuele * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/spi/at73c213.h> #include <linux/clk.h> #include <mach/hardware.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/at91sam9_smc.h> #include "sam9_smc.h" #include "generic.h" static void __init ek_map_io(void) { /* Initialize processor: 18.432 MHz crystal */ at91sam9260_initialize(18432000); /* DGBU on ttyS0. (Rx & Tx only) */ at91_register_uart(0, 0, 0); /* USART0 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */ at91_register_uart(AT91SAM9260_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); /* USART1 on ttyS2. (Rx, Tx, RTS, CTS) */ at91_register_uart(AT91SAM9260_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS); /* set serial console to ttyS0 (ie, DBGU) */ at91_set_serial_console(0); } static void __init ek_init_irq(void) { at91sam9260_init_interrupts(NULL); } /* * USB Host port */ static struct at91_usbh_data __initdata ek_usbh_data = { .ports = 2, }; /* * USB Device port */ static struct at91_udc_data __initdata ek_udc_data = { .vbus_pin = AT91_PIN_PC5, .pullup_pin = 0, /* pull-up driven by UDC */ }; /* * SPI devices. */ static struct spi_board_info ek_spi_devices[] = { #if !(defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_AT91)) { /* DataFlash chip */ .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #if defined(CONFIG_MTD_AT91_DATAFLASH_CARD) { /* DataFlash card */ .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 15 * 1000 * 1000, .bus_num = 0, }, #endif #endif }; /* * MACB Ethernet device */ static struct at91_eth_data __initdata ek_macb_data = { .phy_irq_pin = AT91_PIN_PB0, .is_rmii = 1, }; /* * NAND flash */ static struct mtd_partition __initdata ek_nand_partition[] = { { .name = "Bootstrap", .offset = 0, .size = 4 * SZ_1M, }, { .name = "Partition 1", .offset = MTDPART_OFS_NXTBLK, .size = 60 * SZ_1M, }, { .name = "Partition 2", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, }, }; static struct mtd_partition * __init nand_partitions(int size, int *num_partitions) { *num_partitions = ARRAY_SIZE(ek_nand_partition); return ek_nand_partition; } /* det_pin is not connected */ static struct atmel_nand_data __initdata ek_nand_data = { .ale = 21, .cle = 22, .rdy_pin = AT91_PIN_PC13, .enable_pin = AT91_PIN_PC14, .partition_info = nand_partitions, #if defined(CONFIG_MTD_NAND_ATMEL_BUSWIDTH_16) .bus_width_16 = 1, #else .bus_width_16 = 0, #endif }; static struct sam9_smc_config __initdata ek_nand_smc_config = { .ncs_read_setup = 0, .nrd_setup = 2, .ncs_write_setup = 0, .nwe_setup = 2, .ncs_read_pulse = 4, .nrd_pulse = 4, .ncs_write_pulse = 4, .nwe_pulse = 4, .read_cycle = 7, .write_cycle = 7, .mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE | AT91_SMC_EXNWMODE_DISABLE, .tdf_cycles = 3, }; static void __init ek_add_device_nand(void) { /* setup bus-width (8 or 16) */ if (ek_nand_data.bus_width_16) ek_nand_smc_config.mode |= AT91_SMC_DBW_16; else ek_nand_smc_config.mode |= AT91_SMC_DBW_8; /* configure chip-select 3 (NAND) */ sam9_smc_configure(3, &ek_nand_smc_config); at91_add_device_nand(&ek_nand_data); } /* * MCI (SD/MMC) * wp_pin is not connected */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) static struct mci_platform_data __initdata ek_mmc_data = { .slot[0] = { .bus_width = 4, .detect_pin = AT91_PIN_PC2, .wp_pin = -ENODEV, }, .slot[1] = { .bus_width = 4, .detect_pin = AT91_PIN_PC9, .wp_pin = -ENODEV, }, }; #else static struct at91_mmc_data __initdata ek_mmc_data = { .slot_b = 1, /* Only one slot so use slot B */ .wire4 = 1, .det_pin = AT91_PIN_PC9, }; #endif /* * LEDs */ static struct gpio_led ek_leds[] = { { /* "bottom" led, green, userled1 to be defined */ .name = "ds5", .gpio = AT91_PIN_PB8, .active_low = 1, .default_trigger = "none", }, { /* "power" led, yellow */ .name = "ds1", .gpio = AT91_PIN_PB9, .default_trigger = "heartbeat", } }; static struct i2c_board_info __initdata ek_i2c_devices[] = { { I2C_BOARD_INFO("24c512", 0x50), }, }; static void __init ek_board_init(void) { /* Serial */ at91_add_device_serial(); /* USB Host */ at91_add_device_usbh(&ek_usbh_data); /* USB Device */ at91_add_device_udc(&ek_udc_data); /* SPI */ at91_add_device_spi(ek_spi_devices, ARRAY_SIZE(ek_spi_devices)); /* NAND */ ek_add_device_nand(); /* Ethernet */ at91_add_device_eth(&ek_macb_data); /* MMC */ #if defined(CONFIG_MMC_ATMELMCI) || defined(CONFIG_MMC_ATMELMCI_MODULE) at91_add_device_mci(0, &ek_mmc_data); #else at91_add_device_mmc(0, &ek_mmc_data); #endif /* I2C */ at91_add_device_i2c(ek_i2c_devices, ARRAY_SIZE(ek_i2c_devices)); /* LEDs */ at91_gpio_leds(ek_leds, ARRAY_SIZE(ek_leds)); /* PCK0 provides MCLK to the WM8731 */ at91_set_B_periph(AT91_PIN_PC1, 0); /* SSC (for WM8731) */ at91_add_device_ssc(AT91SAM9260_ID_SSC, ATMEL_SSC_TX); } MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod") /* Maintainer: Rob Emanuele */ .phys_io = AT91_BASE_SYS, .io_pg_offst = (AT91_VA_BASE_SYS >> 18) & 0xfffc, .boot_params = AT91_SDRAM_BASE + 0x100, .timer = &at91sam926x_timer, .map_io = ek_map_io, .init_irq = ek_init_irq, .init_machine = ek_board_init, MACHINE_END
gpl-2.0
drsn0w/android_kernel_zte_msm7627
arch/powerpc/mm/tlb_hash32.c
1317
4951
/* * This file contains the routines for TLB flushing. * On machines where the MMU uses a hash table to store virtual to * physical translations, these routines flush entries from the * hash table also. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include "mmu_decl.h" /* * Called when unmapping pages to flush entries from the TLB/hash table. */ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr) { unsigned long ptephys; if (Hash != 0) { ptephys = __pa(ptep) & PAGE_MASK; flush_hash_pages(mm->context.id, addr, ptephys, 1); } } EXPORT_SYMBOL(flush_hash_entry); /* * Called by ptep_set_access_flags, must flush on CPUs for which the * DSI handler can't just "fixup" the TLB on a write fault */ void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr) { if (Hash != 0) return; _tlbie(addr); } /* * Called at the end of a mmu_gather operation to make sure the * TLB flush is completely done. */ void tlb_flush(struct mmu_gather *tlb) { if (Hash == 0) { /* * 603 needs to flush the whole TLB here since * it doesn't use a hash table. */ _tlbia(); } /* Push out batch of freed page tables */ pte_free_finish(); } /* * TLB flushing: * * - flush_tlb_mm(mm) flushes the specified mm context TLB's * - flush_tlb_page(vma, vmaddr) flushes one page * - flush_tlb_range(vma, start, end) flushes a range of pages * - flush_tlb_kernel_range(start, end) flushes kernel pages * * since the hardware hash table functions as an extension of the * tlb as far as the linux tables are concerned, flush it too. * -- Cort */ /* * 750 SMP is a Bad Idea because the 750 doesn't broadcast all * the cache operations on the bus. Hence we need to use an IPI * to get the other CPU(s) to invalidate their TLBs. */ #ifdef CONFIG_SMP_750 #define FINISH_FLUSH smp_send_tlb_invalidate(0) #else #define FINISH_FLUSH do { } while (0) #endif static void flush_range(struct mm_struct *mm, unsigned long start, unsigned long end) { pmd_t *pmd; unsigned long pmd_end; int count; unsigned int ctx = mm->context.id; if (Hash == 0) { _tlbia(); return; } start &= PAGE_MASK; if (start >= end) return; end = (end - 1) | ~PAGE_MASK; pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); for (;;) { pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; if (pmd_end > end) pmd_end = end; if (!pmd_none(*pmd)) { count = ((pmd_end - start) >> PAGE_SHIFT) + 1; flush_hash_pages(ctx, start, pmd_val(*pmd), count); } if (pmd_end == end) break; start = pmd_end + 1; ++pmd; } } /* * Flush kernel TLB entries in the given range */ void flush_tlb_kernel_range(unsigned long start, unsigned long end) { flush_range(&init_mm, start, end); FINISH_FLUSH; } EXPORT_SYMBOL(flush_tlb_kernel_range); /* * Flush all the (user) entries for the address space described by mm. */ void flush_tlb_mm(struct mm_struct *mm) { struct vm_area_struct *mp; if (Hash == 0) { _tlbia(); return; } /* * It is safe to go down the mm's list of vmas when called * from dup_mmap, holding mmap_sem. It would also be safe from * unmap_region or exit_mmap, but not from vmtruncate on SMP - * but it seems dup_mmap is the only SMP case which gets here. */ for (mp = mm->mmap; mp != NULL; mp = mp->vm_next) flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); FINISH_FLUSH; } EXPORT_SYMBOL(flush_tlb_mm); void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) { struct mm_struct *mm; pmd_t *pmd; if (Hash == 0) { _tlbie(vmaddr); return; } mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); if (!pmd_none(*pmd)) flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); FINISH_FLUSH; } EXPORT_SYMBOL(flush_tlb_page); /* * For each address in the range, find the pte for the address * and check _PAGE_HASHPTE bit; if it is set, find and destroy * the corresponding HPTE. */ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { flush_range(vma->vm_mm, start, end); FINISH_FLUSH; } EXPORT_SYMBOL(flush_tlb_range);
gpl-2.0
lyapota/s7e_marshmallow
kernel/sched/cpupri.c
1573
6959
/* * kernel/sched/cpupri.c * * CPU priority management * * Copyright (C) 2007-2008 Novell * * Author: Gregory Haskins <ghaskins@novell.com> * * This code tracks the priority of each CPU so that global migration * decisions are easy to calculate. Each CPU can be in a state as follows: * * (INVALID), IDLE, NORMAL, RT1, ... RT99 * * going from the lowest priority to the highest. CPUs in the INVALID state * are not eligible for routing. The system maintains this state with * a 2 dimensional bitmap (the first for priority class, the second for cpus * in that class). Therefore a typical application without affinity * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit * searches). For tasks with affinity restrictions, the algorithm has a * worst case complexity of O(min(102, nr_domcpus)), though the scenario that * yields the worst case search is fairly contrived. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ #include <linux/gfp.h> #include <linux/sched.h> #include <linux/sched/rt.h> #include <linux/slab.h> #include "cpupri.h" /* Convert between a 140 based task->prio, and our 102 based cpupri */ static int convert_prio(int prio) { int cpupri; if (prio == CPUPRI_INVALID) cpupri = CPUPRI_INVALID; else if (prio == MAX_PRIO) cpupri = CPUPRI_IDLE; else if (prio >= MAX_RT_PRIO) cpupri = CPUPRI_NORMAL; else cpupri = MAX_RT_PRIO - prio + 1; return cpupri; } /** * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context * @p: The task * @lowest_mask: A mask to fill in with selected CPUs (or NULL) * * Note: This function returns the recommended CPUs as calculated during the * current invocation. By the time the call returns, the CPUs may have in * fact changed priorities any number of times. While not ideal, it is not * an issue of correctness since the normal rebalancer logic will correct * any discrepancies created by racing against the uncertainty of the current * priority configuration. * * Return: (int)bool - CPUs were found */ int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { int idx = 0; int task_pri = convert_prio(p->prio); BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES); for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; int skip = 0; if (!atomic_read(&(vec)->count)) skip = 1; /* * When looking at the vector, we need to read the counter, * do a memory barrier, then read the mask. * * Note: This is still all racey, but we can deal with it. * Ideally, we only want to look at masks that are set. * * If a mask is not set, then the only thing wrong is that we * did a little more work than necessary. * * If we read a zero count but the mask is set, because of the * memory barriers, that can only happen when the highest prio * task for a run queue has left the run queue, in which case, * it will be followed by a pull. If the task we are processing * fails to find a proper place to go, that pull request will * pull this task if the run queue is running at a lower * priority. */ smp_rmb(); /* Need to do the rmb for every iteration */ if (skip) continue; if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; if (lowest_mask) { cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); /* * We have to ensure that we have at least one bit * still set in the array, since the map could have * been concurrently emptied between the first and * second reads of vec->mask. If we hit this * condition, simply act as though we never hit this * priority level and continue on. */ if (cpumask_any(lowest_mask) >= nr_cpu_ids) continue; } return 1; } return 0; } /** * cpupri_set - update the cpu priority setting * @cp: The cpupri context * @cpu: The target cpu * @newpri: The priority (INVALID-RT99) to assign to this CPU * * Note: Assumes cpu_rq(cpu)->lock is locked * * Returns: (void) */ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { int *currpri = &cp->cpu_to_pri[cpu]; int oldpri = *currpri; int do_mb = 0; newpri = convert_prio(newpri); BUG_ON(newpri >= CPUPRI_NR_PRIORITIES); if (newpri == oldpri) return; /* * If the cpu was currently mapped to a different value, we * need to map it to the new value then remove the old value. * Note, we must add the new value first, otherwise we risk the * cpu being missed by the priority loop in cpupri_find. */ if (likely(newpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; cpumask_set_cpu(cpu, vec->mask); /* * When adding a new vector, we update the mask first, * do a write memory barrier, and then update the count, to * make sure the vector is visible when count is set. */ smp_mb__before_atomic(); atomic_inc(&(vec)->count); do_mb = 1; } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; /* * Because the order of modification of the vec->count * is important, we must make sure that the update * of the new prio is seen before we decrement the * old prio. This makes sure that the loop sees * one or the other when we raise the priority of * the run queue. We don't care about when we lower the * priority, as that will trigger an rt pull anyway. * * We only need to do a memory barrier if we updated * the new priority vec. */ if (do_mb) smp_mb__after_atomic(); /* * When removing from the vector, we decrement the counter first * do a memory barrier and then clear the mask. */ atomic_dec(&(vec)->count); smp_mb__after_atomic(); cpumask_clear_cpu(cpu, vec->mask); } *currpri = newpri; } /** * cpupri_init - initialize the cpupri structure * @cp: The cpupri context * * Return: -ENOMEM on memory allocation failure. */ int cpupri_init(struct cpupri *cp) { int i; memset(cp, 0, sizeof(*cp)); for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { struct cpupri_vec *vec = &cp->pri_to_cpu[i]; atomic_set(&vec->count, 0); if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } cp->cpu_to_pri = kcalloc(nr_cpu_ids, sizeof(int), GFP_KERNEL); if (!cp->cpu_to_pri) goto cleanup; for_each_possible_cpu(i) cp->cpu_to_pri[i] = CPUPRI_INVALID; return 0; cleanup: for (i--; i >= 0; i--) free_cpumask_var(cp->pri_to_cpu[i].mask); return -ENOMEM; } /** * cpupri_cleanup - clean up the cpupri structure * @cp: The cpupri context */ void cpupri_cleanup(struct cpupri *cp) { int i; kfree(cp->cpu_to_pri); for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) free_cpumask_var(cp->pri_to_cpu[i].mask); }
gpl-2.0
Lmaths/linux-stable-rcn-ee
drivers/watchdog/pc87413_wdt.c
1829
14364
/* * NS pc87413-wdt Watchdog Timer driver for Linux 2.6.x.x * * This code is based on wdt.c with original copyright. * * (C) Copyright 2006 Sven Anders, <anders@anduras.de> * and Marcus Junker, <junker@anduras.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Neither Sven Anders, Marcus Junker nor ANDURAS AG * admit liability nor provide warranty for any of this software. * This material is provided "AS-IS" and at no charge. * * Release 1.1 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/notifier.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/io.h> #include <linux/uaccess.h> /* #define DEBUG 1 */ #define DEFAULT_TIMEOUT 1 /* 1 minute */ #define MAX_TIMEOUT 255 #define VERSION "1.1" #define MODNAME "pc87413 WDT" #define DPFX MODNAME " - DEBUG: " #define WDT_INDEX_IO_PORT (io+0) /* I/O port base (index register) */ #define WDT_DATA_IO_PORT (WDT_INDEX_IO_PORT+1) #define SWC_LDN 0x04 #define SIOCFG2 0x22 /* Serial IO register */ #define WDCTL 0x10 /* Watchdog-Timer-Control-Register */ #define WDTO 0x11 /* Watchdog timeout register */ #define WDCFG 0x12 /* Watchdog config register */ #define IO_DEFAULT 0x2E /* Address used on Portwell Boards */ static int io = IO_DEFAULT; static int swc_base_addr = -1; static int timeout = DEFAULT_TIMEOUT; /* timeout value */ static unsigned long timer_enabled; /* is the timer enabled? */ static char expect_close; /* is the close expected? */ static DEFINE_SPINLOCK(io_lock); /* to guard us from io races */ static bool nowayout = WATCHDOG_NOWAYOUT; /* -- Low level function ----------------------------------------*/ /* Select pins for Watchdog output */ static inline void pc87413_select_wdt_out(void) { unsigned int cr_data = 0; /* Step 1: Select multiple pin,pin55,as WDT output */ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x80; /* Set Bit7 to 1*/ outb_p(SIOCFG2, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); #ifdef DEBUG pr_info(DPFX "Select multiple pin,pin55,as WDT output: Bit7 to 1: %d\n", cr_data); #endif } /* Enable SWC functions */ static inline void pc87413_enable_swc(void) { unsigned int cr_data = 0; /* Step 2: Enable SWC functions */ outb_p(0x07, WDT_INDEX_IO_PORT); /* Point SWC_LDN (LDN=4) */ outb_p(SWC_LDN, WDT_DATA_IO_PORT); outb_p(0x30, WDT_INDEX_IO_PORT); /* Read Index 0x30 First */ cr_data = inb(WDT_DATA_IO_PORT); cr_data |= 0x01; /* Set Bit0 to 1 */ outb_p(0x30, WDT_INDEX_IO_PORT); outb_p(cr_data, WDT_DATA_IO_PORT); /* Index0x30_bit0P1 */ #ifdef DEBUG pr_info(DPFX "pc87413 - Enable SWC functions\n"); #endif } /* Read SWC I/O base address */ static void pc87413_get_swc_base_addr(void) { unsigned char addr_l, addr_h = 0; /* Step 3: Read SWC I/O Base Address */ outb_p(0x60, WDT_INDEX_IO_PORT); /* Read Index 0x60 */ addr_h = inb(WDT_DATA_IO_PORT); outb_p(0x61, WDT_INDEX_IO_PORT); /* Read Index 0x61 */ addr_l = inb(WDT_DATA_IO_PORT); swc_base_addr = (addr_h << 8) + addr_l; #ifdef DEBUG pr_info(DPFX "Read SWC I/O Base Address: low %d, high %d, res %d\n", addr_l, addr_h, swc_base_addr); #endif } /* Select Bank 3 of SWC */ static inline void pc87413_swc_bank3(void) { /* Step 4: Select Bank3 of SWC */ outb_p(inb(swc_base_addr + 0x0f) | 0x03, swc_base_addr + 0x0f); #ifdef DEBUG pr_info(DPFX "Select Bank3 of SWC\n"); #endif } /* Set watchdog timeout to x minutes */ static inline void pc87413_programm_wdto(char pc87413_time) { /* Step 5: Programm WDTO, Twd. */ outb_p(pc87413_time, swc_base_addr + WDTO); #ifdef DEBUG pr_info(DPFX "Set WDTO to %d minutes\n", pc87413_time); #endif } /* Enable WDEN */ static inline void pc87413_enable_wden(void) { /* Step 6: Enable WDEN */ outb_p(inb(swc_base_addr + WDCTL) | 0x01, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "Enable WDEN\n"); #endif } /* Enable SW_WD_TREN */ static inline void pc87413_enable_sw_wd_tren(void) { /* Enable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) | 0x80, swc_base_addr + WDCFG); #ifdef DEBUG pr_info(DPFX "Enable SW_WD_TREN\n"); #endif } /* Disable SW_WD_TREN */ static inline void pc87413_disable_sw_wd_tren(void) { /* Disable SW_WD_TREN */ outb_p(inb(swc_base_addr + WDCFG) & 0x7f, swc_base_addr + WDCFG); #ifdef DEBUG pr_info(DPFX "pc87413 - Disable SW_WD_TREN\n"); #endif } /* Enable SW_WD_TRG */ static inline void pc87413_enable_sw_wd_trg(void) { /* Enable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) | 0x80, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "pc87413 - Enable SW_WD_TRG\n"); #endif } /* Disable SW_WD_TRG */ static inline void pc87413_disable_sw_wd_trg(void) { /* Disable SW_WD_TRG */ outb_p(inb(swc_base_addr + WDCTL) & 0x7f, swc_base_addr + WDCTL); #ifdef DEBUG pr_info(DPFX "Disable SW_WD_TRG\n"); #endif } /* -- Higher level functions ------------------------------------*/ /* Enable the watchdog */ static void pc87413_enable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* Disable the watchdog */ static void pc87413_disable(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(0); spin_unlock(&io_lock); } /* Refresh the watchdog */ static void pc87413_refresh(void) { spin_lock(&io_lock); pc87413_swc_bank3(); pc87413_disable_sw_wd_tren(); pc87413_disable_sw_wd_trg(); pc87413_programm_wdto(timeout); pc87413_enable_wden(); pc87413_enable_sw_wd_tren(); pc87413_enable_sw_wd_trg(); spin_unlock(&io_lock); } /* -- File operations -------------------------------------------*/ /** * pc87413_open: * @inode: inode of device * @file: file handle to device * */ static int pc87413_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &timer_enabled)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); /* Reload and activate timer */ pc87413_refresh(); pr_info("Watchdog enabled. Timeout set to %d minute(s).\n", timeout); return nonseekable_open(inode, file); } /** * pc87413_release: * @inode: inode to board * @file: file handle to board * * The watchdog has a configurable API. There is a religious dispute * between people who want their watchdog to be able to shut down and * those who want to be sure if the watchdog manager dies the machine * reboots. In the former case we disable the counters, in the latter * case you have to open it again very soon. */ static int pc87413_release(struct inode *inode, struct file *file) { /* Shut off the timer. */ if (expect_close == 42) { pc87413_disable(); pr_info("Watchdog disabled, sleeping again...\n"); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); pc87413_refresh(); } clear_bit(0, &timer_enabled); expect_close = 0; return 0; } /** * pc87413_status: * * return, if the watchdog is enabled (timeout is set...) */ static int pc87413_status(void) { return 0; /* currently not supported */ } /** * pc87413_write: * @file: file handle to the watchdog * @data: data buffer to write * @len: length in bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we we don't define content meaning. */ static ssize_t pc87413_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* reset expect flag */ expect_close = 0; /* scan to see whether or not we got the magic character */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } /* someone wrote to us, we should reload the timer */ pc87413_refresh(); } return len; } /** * pc87413_ioctl: * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. We only actually usefully support * querying capabilities and current status. */ static long pc87413_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int new_timeout; union { struct watchdog_info __user *ident; int __user *i; } uarg; static const struct watchdog_info ident = { .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = "PC87413(HF/F) watchdog", }; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(pc87413_status(), uarg.i); case WDIOC_GETBOOTSTATUS: return put_user(0, uarg.i); case WDIOC_SETOPTIONS: { int options, retval = -EINVAL; if (get_user(options, uarg.i)) return -EFAULT; if (options & WDIOS_DISABLECARD) { pc87413_disable(); retval = 0; } if (options & WDIOS_ENABLECARD) { pc87413_enable(); retval = 0; } return retval; } case WDIOC_KEEPALIVE: pc87413_refresh(); #ifdef DEBUG pr_info(DPFX "keepalive\n"); #endif return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, uarg.i)) return -EFAULT; /* the API states this is given in secs */ new_timeout /= 60; if (new_timeout < 0 || new_timeout > MAX_TIMEOUT) return -EINVAL; timeout = new_timeout; pc87413_refresh(); /* fall through and return the new timeout... */ case WDIOC_GETTIMEOUT: new_timeout = timeout * 60; return put_user(new_timeout, uarg.i); default: return -ENOTTY; } } /* -- Notifier funtions -----------------------------------------*/ /** * notify_sys: * @this: our notifier block * @code: the event being reported * @unused: unused * * Our notifier is called on system shutdowns. We want to turn the card * off at reboot otherwise the machine will reboot again during memory * test or worse yet during the following fsck. This would suck, in fact * trust me - if it happens it does suck. */ static int pc87413_notify_sys(struct notifier_block *this, unsigned long code, void *unused) { if (code == SYS_DOWN || code == SYS_HALT) /* Turn the card off */ pc87413_disable(); return NOTIFY_DONE; } /* -- Module's structures ---------------------------------------*/ static const struct file_operations pc87413_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = pc87413_write, .unlocked_ioctl = pc87413_ioctl, .open = pc87413_open, .release = pc87413_release, }; static struct notifier_block pc87413_notifier = { .notifier_call = pc87413_notify_sys, }; static struct miscdevice pc87413_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &pc87413_fops, }; /* -- Module init functions -------------------------------------*/ /** * pc87413_init: module's "constructor" * * Set up the WDT watchdog board. All we have to do is grab the * resources we require and bitch if anyone beat us to them. * The open() function will actually kick the board off. */ static int __init pc87413_init(void) { int ret; pr_info("Version " VERSION " at io 0x%X\n", WDT_INDEX_IO_PORT); if (!request_muxed_region(io, 2, MODNAME)) return -EBUSY; ret = register_reboot_notifier(&pc87413_notifier); if (ret != 0) pr_err("cannot register reboot notifier (err=%d)\n", ret); ret = misc_register(&pc87413_miscdev); if (ret != 0) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", WATCHDOG_MINOR, ret); goto reboot_unreg; } pr_info("initialized. timeout=%d min\n", timeout); pc87413_select_wdt_out(); pc87413_enable_swc(); pc87413_get_swc_base_addr(); if (!request_region(swc_base_addr, 0x20, MODNAME)) { pr_err("cannot request SWC region at 0x%x\n", swc_base_addr); ret = -EBUSY; goto misc_unreg; } pc87413_enable(); release_region(io, 2); return 0; misc_unreg: misc_deregister(&pc87413_miscdev); reboot_unreg: unregister_reboot_notifier(&pc87413_notifier); release_region(io, 2); return ret; } /** * pc87413_exit: module's "destructor" * * Unload the watchdog. You cannot do this with any file handles open. * If your watchdog is set to continue ticking on close and you unload * it, well it keeps ticking. We won't get the interrupt but the board * will not touch PC memory so all is fine. You just have to load a new * module in 60 seconds or reboot. */ static void __exit pc87413_exit(void) { /* Stop the timer before we leave */ if (!nowayout) { pc87413_disable(); pr_info("Watchdog disabled\n"); } misc_deregister(&pc87413_miscdev); unregister_reboot_notifier(&pc87413_notifier); release_region(swc_base_addr, 0x20); pr_info("watchdog component driver removed\n"); } module_init(pc87413_init); module_exit(pc87413_exit); MODULE_AUTHOR("Sven Anders <anders@anduras.de>"); MODULE_AUTHOR("Marcus Junker <junker@anduras.de>"); MODULE_DESCRIPTION("PC87413 WDT driver"); MODULE_LICENSE("GPL"); module_param(io, int, 0); MODULE_PARM_DESC(io, MODNAME " I/O port (default: " __MODULE_STRING(IO_DEFAULT) ")."); module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in minutes (default=" __MODULE_STRING(DEFAULT_TIMEOUT) ")."); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
gpl-2.0
RobinSystems/linux-3.13
arch/x86/mm/setup_nx.c
2085
1286
#include <linux/spinlock.h> #include <linux/errno.h> #include <linux/init.h> #include <asm/pgtable.h> #include <asm/proto.h> static int disable_nx; /* * noexec = on|off * * Control non-executable mappings for processes. * * on Enable * off Disable */ static int __init noexec_setup(char *str) { if (!str) return -EINVAL; if (!strncmp(str, "on", 2)) { disable_nx = 0; } else if (!strncmp(str, "off", 3)) { disable_nx = 1; } x86_configure_nx(); return 0; } early_param("noexec", noexec_setup); void x86_configure_nx(void) { if (cpu_has_nx && !disable_nx) __supported_pte_mask |= _PAGE_NX; else __supported_pte_mask &= ~_PAGE_NX; } void __init x86_report_nx(void) { if (!cpu_has_nx) { printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " "missing in CPU!\n"); } else { #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) if (disable_nx) { printk(KERN_INFO "NX (Execute Disable) protection: " "disabled by kernel command line option\n"); } else { printk(KERN_INFO "NX (Execute Disable) protection: " "active\n"); } #else /* 32bit non-PAE kernel, NX cannot be used */ printk(KERN_NOTICE "Notice: NX (Execute Disable) protection " "cannot be enabled: non-PAE kernel!\n"); #endif } }
gpl-2.0
emxys1/imx6rex-sato-sdk-linux-3.10.17
drivers/staging/slicoss/slicoss.c
2085
102862
/************************************************************************** * * Copyright 2000-2006 Alacritech, Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The views and conclusions contained in the software and documentation * are those of the authors and should not be interpreted as representing * official policies, either expressed or implied, of Alacritech, Inc. * **************************************************************************/ /* * FILENAME: slicoss.c * * The SLICOSS driver for Alacritech's IS-NIC products. * * This driver is supposed to support: * * Mojave cards (single port PCI Gigabit) both copper and fiber * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber * * The driver was actually tested on Oasis and Kalahari cards. * * * NOTE: This is the standard, non-accelerated version of Alacritech's * IS-NIC driver. */ #define KLUDGE_FOR_4GB_BOUNDARY 1 #define DEBUG_MICROCODE 1 #define DBG 1 #define SLIC_INTERRUPT_PROCESS_LIMIT 1 #define SLIC_OFFLOAD_IP_CHECKSUM 1 #define STATS_TIMER_INTERVAL 2 #define PING_TIMER_INTERVAL 1 #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/io.h> #include <linux/netdevice.h> #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/mii.h> #include <linux/if_vlan.h> #include <asm/unaligned.h> #include <linux/ethtool.h> #include <linux/uaccess.h> #include "slichw.h" #include "slic.h" static uint slic_first_init = 1; static char *slic_banner = "Alacritech SLIC Technology(tm) Server "\ "and Storage Accelerator (Non-Accelerated)"; static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00"; static char *slic_product_name = "SLIC Technology(tm) Server "\ "and Storage Accelerator (Non-Accelerated)"; static char *slic_vendor = "Alacritech, Inc."; static int slic_debug = 1; static int debug = -1; static struct net_device *head_netdevice; static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL }; static int intagg_delay = 100; static u32 dynamic_intagg; static unsigned int rcv_count; static struct dentry *slic_debugfs; #define DRV_NAME "slicoss" #define DRV_VERSION "2.0.1" #define DRV_AUTHOR "Alacritech, Inc. Engineering" #define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\ "Non-Accelerated Driver" #define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\ "All rights reserved." #define PFX DRV_NAME " " MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_LICENSE("Dual BSD/GPL"); module_param(dynamic_intagg, int, 0); MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting"); module_param(intagg_delay, int, 0); MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay"); static DEFINE_PCI_DEVICE_TABLE(slic_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) }, { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) }, { 0 } }; MODULE_DEVICE_TABLE(pci, slic_pci_tbl); #define SLIC_GET_SLIC_HANDLE(_adapter, _pslic_handle) \ { \ spin_lock_irqsave(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ _pslic_handle = _adapter->pfree_slic_handles; \ if (_pslic_handle) { \ _adapter->pfree_slic_handles = _pslic_handle->next; \ } \ spin_unlock_irqrestore(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ } #define SLIC_FREE_SLIC_HANDLE(_adapter, _pslic_handle) \ { \ _pslic_handle->type = SLIC_HANDLE_FREE; \ spin_lock_irqsave(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ _pslic_handle->next = _adapter->pfree_slic_handles; \ _adapter->pfree_slic_handles = _pslic_handle; \ spin_unlock_irqrestore(&_adapter->handle_lock.lock, \ _adapter->handle_lock.flags); \ } static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush) { writel(value, reg); if (flush) mb(); } static inline void slic_reg64_write(struct adapter *adapter, void __iomem *reg, u32 value, void __iomem *regh, u32 paddrh, bool flush) { spin_lock_irqsave(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); if (paddrh != adapter->curaddrupper) { adapter->curaddrupper = paddrh; writel(paddrh, regh); } writel(value, reg); if (flush) mb(); spin_unlock_irqrestore(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); } static void slic_mcast_set_bit(struct adapter *adapter, char *address) { unsigned char crcpoly; /* Get the CRC polynomial for the mac address */ /* we use bits 1-8 (lsb), bitwise reversed, * msb (= lsb bit 0 before bitrev) is automatically discarded */ crcpoly = (ether_crc(ETH_ALEN, address)>>23); /* We only have space on the SLIC for 64 entries. Lop * off the top two bits. (2^6 = 64) */ crcpoly &= 0x3F; /* OR in the new bit into our 64 bit mask. */ adapter->mcastmask |= (u64) 1 << crcpoly; } static void slic_mcast_set_mask(struct adapter *adapter) { __iomem struct slic_regs *slic_regs = adapter->slic_regs; if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) { /* Turn on all multicast addresses. We have to do this for * promiscuous mode as well as ALLMCAST mode. It saves the * Microcode from having to keep state about the MAC * configuration. */ slic_reg32_write(&slic_regs->slic_mcastlow, 0xFFFFFFFF, FLUSH); slic_reg32_write(&slic_regs->slic_mcasthigh, 0xFFFFFFFF, FLUSH); } else { /* Commit our multicast mast to the SLIC by writing to the * multicast address mask registers */ slic_reg32_write(&slic_regs->slic_mcastlow, (u32)(adapter->mcastmask & 0xFFFFFFFF), FLUSH); slic_reg32_write(&slic_regs->slic_mcasthigh, (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF), FLUSH); } } static void slic_timer_ping(ulong dev) { struct adapter *adapter; struct sliccard *card; adapter = netdev_priv((struct net_device *)dev); card = adapter->card; adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ); add_timer(&adapter->pingtimer); } static void slic_unmap_mmio_space(struct adapter *adapter) { if (adapter->slic_regs) iounmap(adapter->slic_regs); adapter->slic_regs = NULL; } /* * slic_link_config * * Write phy control to configure link duplex/speed * */ static void slic_link_config(struct adapter *adapter, u32 linkspeed, u32 linkduplex) { u32 __iomem *wphy; u32 speed; u32 duplex; u32 phy_config; u32 phy_advreg; u32 phy_gctlreg; if (adapter->state != ADAPT_UP) return; if (linkspeed > LINK_1000MB) linkspeed = LINK_AUTOSPEED; if (linkduplex > LINK_AUTOD) linkduplex = LINK_AUTOD; wphy = &adapter->slic_regs->slic_wphy; if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) { if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) { /* We've got a fiber gigabit interface, and register * 4 is different in fiber mode than in copper mode */ /* advertise FD only @1000 Mb */ phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD)); /* enable PAUSE frames */ phy_advreg |= PAR_ASYMPAUSE_FIBER; slic_reg32_write(wphy, phy_advreg, FLUSH); if (linkspeed == LINK_AUTOSPEED) { /* reset phy, enable auto-neg */ phy_config = (MIICR_REG_PCR | (PCR_RESET | PCR_AUTONEG | PCR_AUTONEG_RST)); slic_reg32_write(wphy, phy_config, FLUSH); } else { /* forced 1000 Mb FD*/ /* power down phy to break link this may not work) */ phy_config = (MIICR_REG_PCR | PCR_POWERDOWN); slic_reg32_write(wphy, phy_config, FLUSH); /* wait, Marvell says 1 sec, try to get away with 10 ms */ mdelay(10); /* disable auto-neg, set speed/duplex, soft reset phy, powerup */ phy_config = (MIICR_REG_PCR | (PCR_RESET | PCR_SPEED_1000 | PCR_DUPLEX_FULL)); slic_reg32_write(wphy, phy_config, FLUSH); } } else { /* copper gigabit */ /* Auto-Negotiate or 1000 Mb must be auto negotiated * We've got a copper gigabit interface, and * register 4 is different in copper mode than * in fiber mode */ if (linkspeed == LINK_AUTOSPEED) { /* advertise 10/100 Mb modes */ phy_advreg = (MIICR_REG_4 | (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD | PAR_ADV10HD)); } else { /* linkspeed == LINK_1000MB - don't advertise 10/100 Mb modes */ phy_advreg = MIICR_REG_4; } /* enable PAUSE frames */ phy_advreg |= PAR_ASYMPAUSE; /* required by the Cicada PHY */ phy_advreg |= PAR_802_3; slic_reg32_write(wphy, phy_advreg, FLUSH); /* advertise FD only @1000 Mb */ phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD)); slic_reg32_write(wphy, phy_gctlreg, FLUSH); if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { /* if a Marvell PHY enable auto crossover */ phy_config = (MIICR_REG_16 | (MRV_REG16_XOVERON)); slic_reg32_write(wphy, phy_config, FLUSH); /* reset phy, enable auto-neg */ phy_config = (MIICR_REG_PCR | (PCR_RESET | PCR_AUTONEG | PCR_AUTONEG_RST)); slic_reg32_write(wphy, phy_config, FLUSH); } else { /* it's a Cicada PHY */ /* enable and restart auto-neg (don't reset) */ phy_config = (MIICR_REG_PCR | (PCR_AUTONEG | PCR_AUTONEG_RST)); slic_reg32_write(wphy, phy_config, FLUSH); } } } else { /* Forced 10/100 */ if (linkspeed == LINK_10MB) speed = 0; else speed = PCR_SPEED_100; if (linkduplex == LINK_HALFD) duplex = 0; else duplex = PCR_DUPLEX_FULL; if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { /* if a Marvell PHY disable auto crossover */ phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF)); slic_reg32_write(wphy, phy_config, FLUSH); } /* power down phy to break link (this may not work) */ phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex)); slic_reg32_write(wphy, phy_config, FLUSH); /* wait, Marvell says 1 sec, try to get away with 10 ms */ mdelay(10); if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) { /* if a Marvell PHY disable auto-neg, set speed, soft reset phy, powerup */ phy_config = (MIICR_REG_PCR | (PCR_RESET | speed | duplex)); slic_reg32_write(wphy, phy_config, FLUSH); } else { /* it's a Cicada PHY */ /* disable auto-neg, set speed, powerup */ phy_config = (MIICR_REG_PCR | (speed | duplex)); slic_reg32_write(wphy, phy_config, FLUSH); } } } static int slic_card_download_gbrcv(struct adapter *adapter) { const struct firmware *fw; const char *file = ""; int ret; __iomem struct slic_regs *slic_regs = adapter->slic_regs; u32 codeaddr; u32 instruction; int index = 0; u32 rcvucodelen = 0; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: file = "slicoss/oasisrcvucode.sys"; break; case SLIC_1GB_DEVICE_ID: file = "slicoss/gbrcvucode.sys"; break; default: return -ENOENT; } ret = request_firmware(&fw, file, &adapter->pcidev->dev); if (ret) { dev_err(&adapter->pcidev->dev, "SLICOSS: Failed to load firmware %s\n", file); return ret; } rcvucodelen = *(u32 *)(fw->data + index); index += 4; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: if (rcvucodelen != OasisRcvUCodeLen) { release_firmware(fw); return -EINVAL; } break; case SLIC_1GB_DEVICE_ID: if (rcvucodelen != GBRcvUCodeLen) { release_firmware(fw); return -EINVAL; } break; } /* start download */ slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_BEGIN, FLUSH); /* download the rcv sequencer ucode */ for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) { /* write out instruction address */ slic_reg32_write(&slic_regs->slic_rcv_wcs, codeaddr, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* write out the instruction data low addr */ slic_reg32_write(&slic_regs->slic_rcv_wcs, instruction, FLUSH); instruction = *(u8 *)(fw->data + index); index++; /* write out the instruction data high addr */ slic_reg32_write(&slic_regs->slic_rcv_wcs, (u8)instruction, FLUSH); } /* download finished */ release_firmware(fw); slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_FINISH, FLUSH); return 0; } MODULE_FIRMWARE("slicoss/oasisrcvucode.sys"); MODULE_FIRMWARE("slicoss/gbrcvucode.sys"); static int slic_card_download(struct adapter *adapter) { const struct firmware *fw; const char *file = ""; int ret; u32 section; int thissectionsize; int codeaddr; __iomem struct slic_regs *slic_regs = adapter->slic_regs; u32 instruction; u32 baseaddress; u32 i; u32 numsects = 0; u32 sectsize[3]; u32 sectstart[3]; int ucode_start, index = 0; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: file = "slicoss/oasisdownload.sys"; break; case SLIC_1GB_DEVICE_ID: file = "slicoss/gbdownload.sys"; break; default: return -ENOENT; } ret = request_firmware(&fw, file, &adapter->pcidev->dev); if (ret) { dev_err(&adapter->pcidev->dev, "SLICOSS: Failed to load firmware %s\n", file); return ret; } numsects = *(u32 *)(fw->data + index); index += 4; for (i = 0; i < numsects; i++) { sectsize[i] = *(u32 *)(fw->data + index); index += 4; } for (i = 0; i < numsects; i++) { sectstart[i] = *(u32 *)(fw->data + index); index += 4; } ucode_start = index; instruction = *(u32 *)(fw->data + index); index += 4; for (section = 0; section < numsects; section++) { baseaddress = sectstart[section]; thissectionsize = sectsize[section] >> 3; for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) { /* Write out instruction address */ slic_reg32_write(&slic_regs->slic_wcs, baseaddress + codeaddr, FLUSH); /* Write out instruction to low addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* Write out instruction to high addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; } } index = ucode_start; for (section = 0; section < numsects; section++) { instruction = *(u32 *)(fw->data + index); baseaddress = sectstart[section]; if (baseaddress < 0x8000) continue; thissectionsize = sectsize[section] >> 3; for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) { /* Write out instruction address */ slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_COMPARE | (baseaddress + codeaddr), FLUSH); /* Write out instruction to low addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* Write out instruction to high addr */ slic_reg32_write(&slic_regs->slic_wcs, instruction, FLUSH); instruction = *(u32 *)(fw->data + index); index += 4; /* Check SRAM location zero. If it is non-zero. Abort.*/ /* failure = readl((u32 __iomem *)&slic_regs->slic_reset); if (failure) { release_firmware(fw); return -EIO; }*/ } } release_firmware(fw); /* Everything OK, kick off the card */ mdelay(10); slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_START, FLUSH); /* stall for 20 ms, long enough for ucode to init card and reach mainloop */ mdelay(20); return 0; } MODULE_FIRMWARE("slicoss/oasisdownload.sys"); MODULE_FIRMWARE("slicoss/gbdownload.sys"); static void slic_adapter_set_hwaddr(struct adapter *adapter) { struct sliccard *card = adapter->card; if ((adapter->card) && (card->config_set)) { memcpy(adapter->macaddr, card->config.MacInfo[adapter->functionnumber].macaddrA, sizeof(struct slic_config_mac)); if (!(adapter->currmacaddr[0] || adapter->currmacaddr[1] || adapter->currmacaddr[2] || adapter->currmacaddr[3] || adapter->currmacaddr[4] || adapter->currmacaddr[5])) { memcpy(adapter->currmacaddr, adapter->macaddr, 6); } if (adapter->netdev) { memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6); } } } static void slic_intagg_set(struct adapter *adapter, u32 value) { slic_reg32_write(&adapter->slic_regs->slic_intagg, value, FLUSH); adapter->card->loadlevel_current = value; } static void slic_soft_reset(struct adapter *adapter) { if (adapter->card->state == CARD_UP) { slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH); mdelay(1); } slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC, FLUSH); mdelay(1); } static void slic_mac_address_config(struct adapter *adapter) { u32 value; u32 value2; __iomem struct slic_regs *slic_regs = adapter->slic_regs; value = *(u32 *) &adapter->currmacaddr[2]; value = ntohl(value); slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH); slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH); value2 = (u32) ((adapter->currmacaddr[0] << 8 | adapter->currmacaddr[1]) & 0xFFFF); slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH); slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH); /* Write our multicast mask out to the card. This is done */ /* here in addition to the slic_mcast_addr_set routine */ /* because ALL_MCAST may have been enabled or disabled */ slic_mcast_set_mask(adapter); } static void slic_mac_config(struct adapter *adapter) { u32 value; __iomem struct slic_regs *slic_regs = adapter->slic_regs; /* Setup GMAC gaps */ if (adapter->linkspeed == LINK_1000MB) { value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) | (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) | (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT)); } else { value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) | (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) | (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT)); } /* enable GMII */ if (adapter->linkspeed == LINK_1000MB) value |= GMCR_GBIT; /* enable fullduplex */ if ((adapter->linkduplex == LINK_FULLD) || (adapter->macopts & MAC_LOOPBACK)) { value |= GMCR_FULLD; } /* write mac config */ slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH); /* setup mac addresses */ slic_mac_address_config(adapter); } static void slic_config_set(struct adapter *adapter, bool linkchange) { u32 value; u32 RcrReset; __iomem struct slic_regs *slic_regs = adapter->slic_regs; if (linkchange) { /* Setup MAC */ slic_mac_config(adapter); RcrReset = GRCR_RESET; } else { slic_mac_address_config(adapter); RcrReset = 0; } if (adapter->linkduplex == LINK_FULLD) { /* setup xmtcfg */ value = (GXCR_RESET | /* Always reset */ GXCR_XMTEN | /* Enable transmit */ GXCR_PAUSEEN); /* Enable pause */ slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH); /* Setup rcvcfg last */ value = (RcrReset | /* Reset, if linkchange */ GRCR_CTLEN | /* Enable CTL frames */ GRCR_ADDRAEN | /* Address A enable */ GRCR_RCVBAD | /* Rcv bad frames */ (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); } else { /* setup xmtcfg */ value = (GXCR_RESET | /* Always reset */ GXCR_XMTEN); /* Enable transmit */ slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH); /* Setup rcvcfg last */ value = (RcrReset | /* Reset, if linkchange */ GRCR_ADDRAEN | /* Address A enable */ GRCR_RCVBAD | /* Rcv bad frames */ (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); } if (adapter->state != ADAPT_DOWN) { /* Only enable receive if we are restarting or running */ value |= GRCR_RCVEN; } if (adapter->macopts & MAC_PROMISC) value |= GRCR_RCVALL; slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH); } /* * Turn off RCV and XMT, power down PHY */ static void slic_config_clear(struct adapter *adapter) { u32 value; u32 phy_config; __iomem struct slic_regs *slic_regs = adapter->slic_regs; /* Setup xmtcfg */ value = (GXCR_RESET | /* Always reset */ GXCR_PAUSEEN); /* Enable pause */ slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH); value = (GRCR_RESET | /* Always reset */ GRCR_CTLEN | /* Enable CTL frames */ GRCR_ADDRAEN | /* Address A enable */ (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT)); slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH); /* power down phy */ phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN)); slic_reg32_write(&slic_regs->slic_wphy, phy_config, FLUSH); } static bool slic_mac_filter(struct adapter *adapter, struct ether_header *ether_frame) { struct net_device *netdev = adapter->netdev; u32 opts = adapter->macopts; u32 *dhost4 = (u32 *)&ether_frame->ether_dhost[0]; u16 *dhost2 = (u16 *)&ether_frame->ether_dhost[4]; if (opts & MAC_PROMISC) return true; if ((*dhost4 == 0xFFFFFFFF) && (*dhost2 == 0xFFFF)) { if (opts & MAC_BCAST) { adapter->rcv_broadcasts++; return true; } else { return false; } } if (ether_frame->ether_dhost[0] & 0x01) { if (opts & MAC_ALLMCAST) { adapter->rcv_multicasts++; netdev->stats.multicast++; return true; } if (opts & MAC_MCAST) { struct mcast_address *mcaddr = adapter->mcastaddrs; while (mcaddr) { if (!compare_ether_addr(mcaddr->address, ether_frame->ether_dhost)) { adapter->rcv_multicasts++; netdev->stats.multicast++; return true; } mcaddr = mcaddr->next; } return false; } else { return false; } } if (opts & MAC_DIRECTED) { adapter->rcv_unicasts++; return true; } return false; } static int slic_mac_set_address(struct net_device *dev, void *ptr) { struct adapter *adapter = netdev_priv(dev); struct sockaddr *addr = ptr; if (netif_running(dev)) return -EBUSY; if (!adapter) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len); slic_config_set(adapter, true); return 0; } static void slic_timer_load_check(ulong cardaddr) { struct sliccard *card = (struct sliccard *)cardaddr; struct adapter *adapter = card->master; u32 __iomem *intagg; u32 load = card->events; u32 level = 0; if ((adapter) && (adapter->state == ADAPT_UP) && (card->state == CARD_UP) && (slic_global.dynamic_intagg)) { intagg = &adapter->slic_regs->slic_intagg; if (adapter->devid == SLIC_1GB_DEVICE_ID) { if (adapter->linkspeed == LINK_1000MB) level = 100; else { if (load > SLIC_LOAD_5) level = SLIC_INTAGG_5; else if (load > SLIC_LOAD_4) level = SLIC_INTAGG_4; else if (load > SLIC_LOAD_3) level = SLIC_INTAGG_3; else if (load > SLIC_LOAD_2) level = SLIC_INTAGG_2; else if (load > SLIC_LOAD_1) level = SLIC_INTAGG_1; else level = SLIC_INTAGG_0; } if (card->loadlevel_current != level) { card->loadlevel_current = level; slic_reg32_write(intagg, level, FLUSH); } } else { if (load > SLIC_LOAD_5) level = SLIC_INTAGG_5; else if (load > SLIC_LOAD_4) level = SLIC_INTAGG_4; else if (load > SLIC_LOAD_3) level = SLIC_INTAGG_3; else if (load > SLIC_LOAD_2) level = SLIC_INTAGG_2; else if (load > SLIC_LOAD_1) level = SLIC_INTAGG_1; else level = SLIC_INTAGG_0; if (card->loadlevel_current != level) { card->loadlevel_current = level; slic_reg32_write(intagg, level, FLUSH); } } } card->events = 0; card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ); add_timer(&card->loadtimer); } static int slic_upr_queue_request(struct adapter *adapter, u32 upr_request, u32 upr_data, u32 upr_data_h, u32 upr_buffer, u32 upr_buffer_h) { struct slic_upr *upr; struct slic_upr *uprqueue; upr = kmalloc(sizeof(struct slic_upr), GFP_ATOMIC); if (!upr) return -ENOMEM; upr->adapter = adapter->port; upr->upr_request = upr_request; upr->upr_data = upr_data; upr->upr_buffer = upr_buffer; upr->upr_data_h = upr_data_h; upr->upr_buffer_h = upr_buffer_h; upr->next = NULL; if (adapter->upr_list) { uprqueue = adapter->upr_list; while (uprqueue->next) uprqueue = uprqueue->next; uprqueue->next = upr; } else { adapter->upr_list = upr; } return 0; } static void slic_upr_start(struct adapter *adapter) { struct slic_upr *upr; __iomem struct slic_regs *slic_regs = adapter->slic_regs; /* char * ptr1; char * ptr2; uint cmdoffset; */ upr = adapter->upr_list; if (!upr) return; if (adapter->upr_busy) return; adapter->upr_busy = 1; switch (upr->upr_request) { case SLIC_UPR_STATS: if (upr->upr_data_h == 0) { slic_reg32_write(&slic_regs->slic_stats, upr->upr_data, FLUSH); } else { slic_reg64_write(adapter, &slic_regs->slic_stats64, upr->upr_data, &slic_regs->slic_addr_upper, upr->upr_data_h, FLUSH); } break; case SLIC_UPR_RLSR: slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data, &slic_regs->slic_addr_upper, upr->upr_data_h, FLUSH); break; case SLIC_UPR_RCONFIG: slic_reg64_write(adapter, &slic_regs->slic_rconfig, upr->upr_data, &slic_regs->slic_addr_upper, upr->upr_data_h, FLUSH); break; case SLIC_UPR_PING: slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH); break; } } static int slic_upr_request(struct adapter *adapter, u32 upr_request, u32 upr_data, u32 upr_data_h, u32 upr_buffer, u32 upr_buffer_h) { int rc; spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags); rc = slic_upr_queue_request(adapter, upr_request, upr_data, upr_data_h, upr_buffer, upr_buffer_h); if (rc) goto err_unlock_irq; slic_upr_start(adapter); err_unlock_irq: spin_unlock_irqrestore(&adapter->upr_lock.lock, adapter->upr_lock.flags); return rc; } static void slic_link_upr_complete(struct adapter *adapter, u32 isr) { u32 linkstatus = adapter->pshmem->linkstatus; uint linkup; unsigned char linkspeed; unsigned char linkduplex; if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { struct slic_shmem *pshmem; pshmem = (struct slic_shmem *)(unsigned long) adapter->phys_shmem; #if BITS_PER_LONG == 64 slic_upr_queue_request(adapter, SLIC_UPR_RLSR, SLIC_GET_ADDR_LOW(&pshmem->linkstatus), SLIC_GET_ADDR_HIGH(&pshmem->linkstatus), 0, 0); #else slic_upr_queue_request(adapter, SLIC_UPR_RLSR, (u32) &pshmem->linkstatus, SLIC_GET_ADDR_HIGH(pshmem), 0, 0); #endif return; } if (adapter->state != ADAPT_UP) return; linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN; if (linkstatus & GIG_SPEED_1000) linkspeed = LINK_1000MB; else if (linkstatus & GIG_SPEED_100) linkspeed = LINK_100MB; else linkspeed = LINK_10MB; if (linkstatus & GIG_FULLDUPLEX) linkduplex = LINK_FULLD; else linkduplex = LINK_HALFD; if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN)) return; /* link up event, but nothing has changed */ if ((adapter->linkstate == LINK_UP) && (linkup == LINK_UP) && (adapter->linkspeed == linkspeed) && (adapter->linkduplex == linkduplex)) return; /* link has changed at this point */ /* link has gone from up to down */ if (linkup == LINK_DOWN) { adapter->linkstate = LINK_DOWN; return; } /* link has gone from down to up */ adapter->linkspeed = linkspeed; adapter->linkduplex = linkduplex; if (adapter->linkstate != LINK_UP) { /* setup the mac */ slic_config_set(adapter, true); adapter->linkstate = LINK_UP; netif_start_queue(adapter->netdev); } } static void slic_upr_request_complete(struct adapter *adapter, u32 isr) { struct sliccard *card = adapter->card; struct slic_upr *upr; spin_lock_irqsave(&adapter->upr_lock.lock, adapter->upr_lock.flags); upr = adapter->upr_list; if (!upr) { spin_unlock_irqrestore(&adapter->upr_lock.lock, adapter->upr_lock.flags); return; } adapter->upr_list = upr->next; upr->next = NULL; adapter->upr_busy = 0; switch (upr->upr_request) { case SLIC_UPR_STATS: { struct slic_stats *slicstats = (struct slic_stats *) &adapter->pshmem->inicstats; struct slic_stats *newstats = slicstats; struct slic_stats *old = &adapter->inicstats_prev; struct slicnet_stats *stst = &adapter->slic_stats; if (isr & ISR_UPCERR) { dev_err(&adapter->netdev->dev, "SLIC_UPR_STATS command failed isr[%x]\n", isr); break; } UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs, newstats->xmit_tcp_segs_gb, old->xmit_tcp_segs_gb); UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes, newstats->xmit_tcp_bytes_gb, old->xmit_tcp_bytes_gb); UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs, newstats->rcv_tcp_segs_gb, old->rcv_tcp_segs_gb); UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes, newstats->rcv_tcp_bytes_gb, old->rcv_tcp_bytes_gb); UPDATE_STATS_GB(stst->iface.xmt_bytes, newstats->xmit_bytes_gb, old->xmit_bytes_gb); UPDATE_STATS_GB(stst->iface.xmt_ucast, newstats->xmit_unicasts_gb, old->xmit_unicasts_gb); UPDATE_STATS_GB(stst->iface.rcv_bytes, newstats->rcv_bytes_gb, old->rcv_bytes_gb); UPDATE_STATS_GB(stst->iface.rcv_ucast, newstats->rcv_unicasts_gb, old->rcv_unicasts_gb); UPDATE_STATS_GB(stst->iface.xmt_errors, newstats->xmit_collisions_gb, old->xmit_collisions_gb); UPDATE_STATS_GB(stst->iface.xmt_errors, newstats->xmit_excess_collisions_gb, old->xmit_excess_collisions_gb); UPDATE_STATS_GB(stst->iface.xmt_errors, newstats->xmit_other_error_gb, old->xmit_other_error_gb); UPDATE_STATS_GB(stst->iface.rcv_errors, newstats->rcv_other_error_gb, old->rcv_other_error_gb); UPDATE_STATS_GB(stst->iface.rcv_discards, newstats->rcv_drops_gb, old->rcv_drops_gb); if (newstats->rcv_drops_gb > old->rcv_drops_gb) { adapter->rcv_drops += (newstats->rcv_drops_gb - old->rcv_drops_gb); } memcpy(old, newstats, sizeof(struct slic_stats)); break; } case SLIC_UPR_RLSR: slic_link_upr_complete(adapter, isr); break; case SLIC_UPR_RCONFIG: break; case SLIC_UPR_PING: card->pingstatus |= (isr & ISR_PINGDSMASK); break; } kfree(upr); slic_upr_start(adapter); spin_unlock_irqrestore(&adapter->upr_lock.lock, adapter->upr_lock.flags); } static void slic_config_get(struct adapter *adapter, u32 config, u32 config_h) { int status; status = slic_upr_request(adapter, SLIC_UPR_RCONFIG, (u32) config, (u32) config_h, 0, 0); } /* * this is here to checksum the eeprom, there is some ucode bug * which prevens us from using the ucode result. * remove this once ucode is fixed. */ static ushort slic_eeprom_cksum(char *m, int len) { #define ADDCARRY(x) (x > 65535 ? x -= 65535 : x) #define REDUCE {l_util.l = sum; sum = l_util.s[0] + l_util.s[1]; ADDCARRY(sum);\ } u16 *w; u32 sum = 0; u32 byte_swapped = 0; u32 w_int; union { char c[2]; ushort s; } s_util; union { ushort s[2]; int l; } l_util; l_util.l = 0; s_util.s = 0; w = (u16 *)m; #if BITS_PER_LONG == 64 w_int = (u32) ((ulong) w & 0x00000000FFFFFFFF); #else w_int = (u32) (w); #endif if ((1 & w_int) && (len > 0)) { REDUCE; sum <<= 8; s_util.c[0] = *(unsigned char *)w; w = (u16 *)((char *)w + 1); len--; byte_swapped = 1; } /* Unroll the loop to make overhead from branches &c small. */ while ((len -= 32) >= 0) { sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; sum += w[4]; sum += w[5]; sum += w[6]; sum += w[7]; sum += w[8]; sum += w[9]; sum += w[10]; sum += w[11]; sum += w[12]; sum += w[13]; sum += w[14]; sum += w[15]; w = (u16 *)((ulong) w + 16); /* verify */ } len += 32; while ((len -= 8) >= 0) { sum += w[0]; sum += w[1]; sum += w[2]; sum += w[3]; w = (u16 *)((ulong) w + 4); /* verify */ } len += 8; if (len != 0 || byte_swapped != 0) { REDUCE; while ((len -= 2) >= 0) sum += *w++; /* verify */ if (byte_swapped) { REDUCE; sum <<= 8; byte_swapped = 0; if (len == -1) { s_util.c[1] = *(char *) w; sum += s_util.s; len = 0; } else { len = -1; } } else if (len == -1) { s_util.c[0] = *(char *) w; } if (len == -1) { s_util.c[1] = 0; sum += s_util.s; } } REDUCE; return (ushort) sum; } static void slic_rspqueue_free(struct adapter *adapter) { int i; struct slic_rspqueue *rspq = &adapter->rspqueue; for (i = 0; i < rspq->num_pages; i++) { if (rspq->vaddr[i]) { pci_free_consistent(adapter->pcidev, PAGE_SIZE, rspq->vaddr[i], rspq->paddr[i]); } rspq->vaddr[i] = NULL; rspq->paddr[i] = 0; } rspq->offset = 0; rspq->pageindex = 0; rspq->rspbuf = NULL; } static int slic_rspqueue_init(struct adapter *adapter) { int i; struct slic_rspqueue *rspq = &adapter->rspqueue; __iomem struct slic_regs *slic_regs = adapter->slic_regs; u32 paddrh = 0; memset(rspq, 0, sizeof(struct slic_rspqueue)); rspq->num_pages = SLIC_RSPQ_PAGES_GB; for (i = 0; i < rspq->num_pages; i++) { rspq->vaddr[i] = pci_alloc_consistent(adapter->pcidev, PAGE_SIZE, &rspq->paddr[i]); if (!rspq->vaddr[i]) { dev_err(&adapter->pcidev->dev, "pci_alloc_consistent failed\n"); slic_rspqueue_free(adapter); return -ENOMEM; } /* FIXME: * do we really need this assertions (4K PAGE_SIZE aligned addr)? */ memset(rspq->vaddr[i], 0, PAGE_SIZE); if (paddrh == 0) { slic_reg32_write(&slic_regs->slic_rbar, (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE), DONT_FLUSH); } else { slic_reg64_write(adapter, &slic_regs->slic_rbar64, (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE), &slic_regs->slic_addr_upper, paddrh, DONT_FLUSH); } } rspq->offset = 0; rspq->pageindex = 0; rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0]; return 0; } static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter) { struct slic_rspqueue *rspq = &adapter->rspqueue; struct slic_rspbuf *buf; if (!(rspq->rspbuf->status)) return NULL; buf = rspq->rspbuf; if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) { rspq->rspbuf++; } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_rbar64, (rspq->paddr[rspq->pageindex] | SLIC_RSPQ_BUFSINPAGE), &adapter->slic_regs->slic_addr_upper, 0, DONT_FLUSH); rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages; rspq->offset = 0; rspq->rspbuf = (struct slic_rspbuf *) rspq->vaddr[rspq->pageindex]; } return buf; } static void slic_cmdqmem_free(struct adapter *adapter) { struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; int i; for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) { if (cmdqmem->pages[i]) { pci_free_consistent(adapter->pcidev, PAGE_SIZE, (void *) cmdqmem->pages[i], cmdqmem->dma_pages[i]); } } memset(cmdqmem, 0, sizeof(struct slic_cmdqmem)); } static u32 *slic_cmdqmem_addpage(struct adapter *adapter) { struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem; u32 *pageaddr; if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES) return NULL; pageaddr = pci_alloc_consistent(adapter->pcidev, PAGE_SIZE, &cmdqmem->dma_pages[cmdqmem->pagecnt]); if (!pageaddr) return NULL; cmdqmem->pages[cmdqmem->pagecnt] = pageaddr; cmdqmem->pagecnt++; return pageaddr; } static void slic_cmdq_free(struct adapter *adapter) { struct slic_hostcmd *cmd; cmd = adapter->cmdq_all.head; while (cmd) { if (cmd->busy) { struct sk_buff *tempskb; tempskb = cmd->skb; if (tempskb) { cmd->skb = NULL; dev_kfree_skb_irq(tempskb); } } cmd = cmd->next_all; } memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue)); slic_cmdqmem_free(adapter); } static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page) { struct slic_hostcmd *cmd; struct slic_hostcmd *prev; struct slic_hostcmd *tail; struct slic_cmdqueue *cmdq; int cmdcnt; void *cmdaddr; ulong phys_addr; u32 phys_addrl; u32 phys_addrh; struct slic_handle *pslic_handle; cmdaddr = page; cmd = (struct slic_hostcmd *)cmdaddr; cmdcnt = 0; phys_addr = virt_to_bus((void *)page); phys_addrl = SLIC_GET_ADDR_LOW(phys_addr); phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr); prev = NULL; tail = cmd; while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) && (adapter->slic_handle_ix < 256)) { /* Allocate and initialize a SLIC_HANDLE for this command */ SLIC_GET_SLIC_HANDLE(adapter, pslic_handle); pslic_handle->type = SLIC_HANDLE_CMD; pslic_handle->address = (void *) cmd; pslic_handle->offset = (ushort) adapter->slic_handle_ix++; pslic_handle->other_handle = NULL; pslic_handle->next = NULL; cmd->pslic_handle = pslic_handle; cmd->cmd64.hosthandle = pslic_handle->token.handle_token; cmd->busy = false; cmd->paddrl = phys_addrl; cmd->paddrh = phys_addrh; cmd->next_all = prev; cmd->next = prev; prev = cmd; phys_addrl += SLIC_HOSTCMD_SIZE; cmdaddr += SLIC_HOSTCMD_SIZE; cmd = (struct slic_hostcmd *)cmdaddr; cmdcnt++; } cmdq = &adapter->cmdq_all; cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */ tail->next_all = cmdq->head; cmdq->head = prev; cmdq = &adapter->cmdq_free; spin_lock_irqsave(&cmdq->lock.lock, cmdq->lock.flags); cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */ tail->next = cmdq->head; cmdq->head = prev; spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags); } static int slic_cmdq_init(struct adapter *adapter) { int i; u32 *pageaddr; memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue)); memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue)); spin_lock_init(&adapter->cmdq_all.lock.lock); spin_lock_init(&adapter->cmdq_free.lock.lock); spin_lock_init(&adapter->cmdq_done.lock.lock); memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem)); adapter->slic_handle_ix = 1; for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) { pageaddr = slic_cmdqmem_addpage(adapter); if (!pageaddr) { slic_cmdq_free(adapter); return -ENOMEM; } slic_cmdq_addcmdpage(adapter, pageaddr); } adapter->slic_handle_ix = 1; return 0; } static void slic_cmdq_reset(struct adapter *adapter) { struct slic_hostcmd *hcmd; struct sk_buff *skb; u32 outstanding; spin_lock_irqsave(&adapter->cmdq_free.lock.lock, adapter->cmdq_free.lock.flags); spin_lock_irqsave(&adapter->cmdq_done.lock.lock, adapter->cmdq_done.lock.flags); outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count; outstanding -= adapter->cmdq_free.count; hcmd = adapter->cmdq_all.head; while (hcmd) { if (hcmd->busy) { skb = hcmd->skb; hcmd->busy = 0; hcmd->skb = NULL; dev_kfree_skb_irq(skb); } hcmd = hcmd->next_all; } adapter->cmdq_free.count = 0; adapter->cmdq_free.head = NULL; adapter->cmdq_free.tail = NULL; adapter->cmdq_done.count = 0; adapter->cmdq_done.head = NULL; adapter->cmdq_done.tail = NULL; adapter->cmdq_free.head = adapter->cmdq_all.head; hcmd = adapter->cmdq_all.head; while (hcmd) { adapter->cmdq_free.count++; hcmd->next = hcmd->next_all; hcmd = hcmd->next_all; } if (adapter->cmdq_free.count != adapter->cmdq_all.count) { dev_err(&adapter->netdev->dev, "free_count %d != all count %d\n", adapter->cmdq_free.count, adapter->cmdq_all.count); } spin_unlock_irqrestore(&adapter->cmdq_done.lock.lock, adapter->cmdq_done.lock.flags); spin_unlock_irqrestore(&adapter->cmdq_free.lock.lock, adapter->cmdq_free.lock.flags); } static void slic_cmdq_getdone(struct adapter *adapter) { struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done; struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free; spin_lock_irqsave(&done_cmdq->lock.lock, done_cmdq->lock.flags); free_cmdq->head = done_cmdq->head; free_cmdq->count = done_cmdq->count; done_cmdq->head = NULL; done_cmdq->tail = NULL; done_cmdq->count = 0; spin_unlock_irqrestore(&done_cmdq->lock.lock, done_cmdq->lock.flags); } static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter) { struct slic_cmdqueue *cmdq = &adapter->cmdq_free; struct slic_hostcmd *cmd = NULL; lock_and_retry: spin_lock_irqsave(&cmdq->lock.lock, cmdq->lock.flags); retry: cmd = cmdq->head; if (cmd) { cmdq->head = cmd->next; cmdq->count--; spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags); } else { slic_cmdq_getdone(adapter); cmd = cmdq->head; if (cmd) { goto retry; } else { u32 *pageaddr; spin_unlock_irqrestore(&cmdq->lock.lock, cmdq->lock.flags); pageaddr = slic_cmdqmem_addpage(adapter); if (pageaddr) { slic_cmdq_addcmdpage(adapter, pageaddr); goto lock_and_retry; } } } return cmd; } static void slic_cmdq_putdone_irq(struct adapter *adapter, struct slic_hostcmd *cmd) { struct slic_cmdqueue *cmdq = &adapter->cmdq_done; spin_lock(&cmdq->lock.lock); cmd->busy = 0; cmd->next = cmdq->head; cmdq->head = cmd; cmdq->count++; if ((adapter->xmitq_full) && (cmdq->count > 10)) netif_wake_queue(adapter->netdev); spin_unlock(&cmdq->lock.lock); } static int slic_rcvqueue_fill(struct adapter *adapter) { void *paddr; u32 paddrl; u32 paddrh; struct slic_rcvqueue *rcvq = &adapter->rcvqueue; int i = 0; struct device *dev = &adapter->netdev->dev; while (i < SLIC_RCVQ_FILLENTRIES) { struct slic_rcvbuf *rcvbuf; struct sk_buff *skb; #ifdef KLUDGE_FOR_4GB_BOUNDARY retry_rcvqfill: #endif skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC); if (skb) { paddr = (void *)(unsigned long) pci_map_single(adapter->pcidev, skb->data, SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE); paddrl = SLIC_GET_ADDR_LOW(paddr); paddrh = SLIC_GET_ADDR_HIGH(paddr); skb->len = SLIC_RCVBUF_HEADSIZE; rcvbuf = (struct slic_rcvbuf *)skb->head; rcvbuf->status = 0; skb->next = NULL; #ifdef KLUDGE_FOR_4GB_BOUNDARY if (paddrl == 0) { dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", __func__); dev_err(dev, "skb[%p] PROBLEM\n", skb); dev_err(dev, " skbdata[%p]\n", skb->data); dev_err(dev, " skblen[%x]\n", skb->len); dev_err(dev, " paddr[%p]\n", paddr); dev_err(dev, " paddrl[%x]\n", paddrl); dev_err(dev, " paddrh[%x]\n", paddrh); dev_err(dev, " rcvq->head[%p]\n", rcvq->head); dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); dev_err(dev, " rcvq->count[%x]\n", rcvq->count); dev_err(dev, "SKIP THIS SKB!!!!!!!!\n"); goto retry_rcvqfill; } #else if (paddrl == 0) { dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", __func__); dev_err(dev, "skb[%p] PROBLEM\n", skb); dev_err(dev, " skbdata[%p]\n", skb->data); dev_err(dev, " skblen[%x]\n", skb->len); dev_err(dev, " paddr[%p]\n", paddr); dev_err(dev, " paddrl[%x]\n", paddrl); dev_err(dev, " paddrh[%x]\n", paddrh); dev_err(dev, " rcvq->head[%p]\n", rcvq->head); dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); dev_err(dev, " rcvq->count[%x]\n", rcvq->count); dev_err(dev, "GIVE TO CARD ANYWAY\n"); } #endif if (paddrh == 0) { slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl, DONT_FLUSH); } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64, paddrl, &adapter->slic_regs->slic_addr_upper, paddrh, DONT_FLUSH); } if (rcvq->head) rcvq->tail->next = skb; else rcvq->head = skb; rcvq->tail = skb; rcvq->count++; i++; } else { dev_err(&adapter->netdev->dev, "slic_rcvqueue_fill could only get [%d] skbuffs\n", i); break; } } return i; } static void slic_rcvqueue_free(struct adapter *adapter) { struct slic_rcvqueue *rcvq = &adapter->rcvqueue; struct sk_buff *skb; while (rcvq->head) { skb = rcvq->head; rcvq->head = rcvq->head->next; dev_kfree_skb(skb); } rcvq->tail = NULL; rcvq->head = NULL; rcvq->count = 0; } static int slic_rcvqueue_init(struct adapter *adapter) { int i, count; struct slic_rcvqueue *rcvq = &adapter->rcvqueue; rcvq->tail = NULL; rcvq->head = NULL; rcvq->size = SLIC_RCVQ_ENTRIES; rcvq->errors = 0; rcvq->count = 0; i = (SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES); count = 0; while (i) { count += slic_rcvqueue_fill(adapter); i--; } if (rcvq->count < SLIC_RCVQ_MINENTRIES) { slic_rcvqueue_free(adapter); return -ENOMEM; } return 0; } static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter) { struct slic_rcvqueue *rcvq = &adapter->rcvqueue; struct sk_buff *skb; struct slic_rcvbuf *rcvbuf; int count; if (rcvq->count) { skb = rcvq->head; rcvbuf = (struct slic_rcvbuf *)skb->head; if (rcvbuf->status & IRHDDR_SVALID) { rcvq->head = rcvq->head->next; skb->next = NULL; rcvq->count--; } else { skb = NULL; } } else { dev_err(&adapter->netdev->dev, "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count); skb = NULL; } while (rcvq->count < SLIC_RCVQ_FILLTHRESH) { count = slic_rcvqueue_fill(adapter); if (!count) break; } if (skb) rcvq->errors = 0; return skb; } static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb) { struct slic_rcvqueue *rcvq = &adapter->rcvqueue; void *paddr; u32 paddrl; u32 paddrh; struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head; struct device *dev; paddr = (void *)(unsigned long) pci_map_single(adapter->pcidev, skb->head, SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE); rcvbuf->status = 0; skb->next = NULL; paddrl = SLIC_GET_ADDR_LOW(paddr); paddrh = SLIC_GET_ADDR_HIGH(paddr); if (paddrl == 0) { dev = &adapter->netdev->dev; dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n", __func__); dev_err(dev, "skb[%p] PROBLEM\n", skb); dev_err(dev, " skbdata[%p]\n", skb->data); dev_err(dev, " skblen[%x]\n", skb->len); dev_err(dev, " paddr[%p]\n", paddr); dev_err(dev, " paddrl[%x]\n", paddrl); dev_err(dev, " paddrh[%x]\n", paddrh); dev_err(dev, " rcvq->head[%p]\n", rcvq->head); dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail); dev_err(dev, " rcvq->count[%x]\n", rcvq->count); } if (paddrh == 0) { slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl, DONT_FLUSH); } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64, paddrl, &adapter->slic_regs->slic_addr_upper, paddrh, DONT_FLUSH); } if (rcvq->head) rcvq->tail->next = skb; else rcvq->head = skb; rcvq->tail = skb; rcvq->count++; return rcvq->count; } static int slic_debug_card_show(struct seq_file *seq, void *v) { #ifdef MOOKTODO int i; struct sliccard *card = seq->private; struct slic_config *config = &card->config; unsigned char *fru = (unsigned char *)(&card->config.atk_fru); unsigned char *oemfru = (unsigned char *)(&card->config.OemFru); #endif seq_printf(seq, "driver_version : %s\n", slic_proc_version); seq_printf(seq, "Microcode versions: \n"); seq_printf(seq, " Gigabit (gb) : %s %s\n", MOJAVE_UCODE_VERS_STRING, MOJAVE_UCODE_VERS_DATE); seq_printf(seq, " Gigabit Receiver : %s %s\n", GB_RCVUCODE_VERS_STRING, GB_RCVUCODE_VERS_DATE); seq_printf(seq, "Vendor : %s\n", slic_vendor); seq_printf(seq, "Product Name : %s\n", slic_product_name); #ifdef MOOKTODO seq_printf(seq, "VendorId : %4.4X\n", config->VendorId); seq_printf(seq, "DeviceId : %4.4X\n", config->DeviceId); seq_printf(seq, "RevisionId : %2.2x\n", config->RevisionId); seq_printf(seq, "Bus # : %d\n", card->busnumber); seq_printf(seq, "Device # : %d\n", card->slotnumber); seq_printf(seq, "Interfaces : %d\n", card->card_size); seq_printf(seq, " Initialized : %d\n", card->adapters_activated); seq_printf(seq, " Allocated : %d\n", card->adapters_allocated); for (i = 0; i < card->card_size; i++) { seq_printf(seq, " MAC%d : %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", i, config->macinfo[i].macaddrA[0], config->macinfo[i].macaddrA[1], config->macinfo[i].macaddrA[2], config->macinfo[i].macaddrA[3], config->macinfo[i].macaddrA[4], config->macinfo[i].macaddrA[5]); } seq_printf(seq, " IF Init State Duplex/Speed irq\n"); seq_printf(seq, " -------------------------------\n"); for (i = 0; i < card->adapters_allocated; i++) { struct adapter *adapter; adapter = card->adapter[i]; if (adapter) { seq_printf(seq, " %d %d %s %s %s 0x%X\n", adapter->physport, adapter->state, SLIC_LINKSTATE(adapter->linkstate), SLIC_DUPLEX(adapter->linkduplex), SLIC_SPEED(adapter->linkspeed), (uint) adapter->irq); } } seq_printf(seq, "Generation # : %4.4X\n", card->gennumber); seq_printf(seq, "RcvQ max entries : %4.4X\n", SLIC_RCVQ_ENTRIES); seq_printf(seq, "Ping Status : %8.8X\n", card->pingstatus); seq_printf(seq, "Minimum grant : %2.2x\n", config->MinGrant); seq_printf(seq, "Maximum Latency : %2.2x\n", config->MaxLat); seq_printf(seq, "PciStatus : %4.4x\n", config->Pcistatus); seq_printf(seq, "Debug Device Id : %4.4x\n", config->DbgDevId); seq_printf(seq, "DRAM ROM Function : %4.4x\n", config->DramRomFn); seq_printf(seq, "Network interface Pin 1 : %2.2x\n", config->NetIntPin1); seq_printf(seq, "Network interface Pin 2 : %2.2x\n", config->NetIntPin1); seq_printf(seq, "Network interface Pin 3 : %2.2x\n", config->NetIntPin1); seq_printf(seq, "PM capabilities : %4.4X\n", config->PMECapab); seq_printf(seq, "Network Clock Controls : %4.4X\n", config->NwClkCtrls); switch (config->FruFormat) { case ATK_FRU_FORMAT: { seq_printf(seq, "Vendor : Alacritech, Inc.\n"); seq_printf(seq, "Assembly # : %c%c%c%c%c%c\n", fru[0], fru[1], fru[2], fru[3], fru[4], fru[5]); seq_printf(seq, "Revision # : %c%c\n", fru[6], fru[7]); if (config->OEMFruFormat == VENDOR4_FRU_FORMAT) { seq_printf(seq, "Serial # : " "%c%c%c%c%c%c%c%c%c%c%c%c\n", fru[8], fru[9], fru[10], fru[11], fru[12], fru[13], fru[16], fru[17], fru[18], fru[19], fru[20], fru[21]); } else { seq_printf(seq, "Serial # : " "%c%c%c%c%c%c%c%c%c%c%c%c%c%c\n", fru[8], fru[9], fru[10], fru[11], fru[12], fru[13], fru[14], fru[15], fru[16], fru[17], fru[18], fru[19], fru[20], fru[21]); } break; } default: { seq_printf(seq, "Vendor : Alacritech, Inc.\n"); seq_printf(seq, "Serial # : Empty FRU\n"); break; } } switch (config->OEMFruFormat) { case VENDOR1_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); seq_printf(seq, " Commodity # : %c\n", oemfru[0]); seq_printf(seq, " Assembly # : %c%c%c%c\n", oemfru[1], oemfru[2], oemfru[3], oemfru[4]); seq_printf(seq, " Revision # : %c%c\n", oemfru[5], oemfru[6]); seq_printf(seq, " Supplier # : %c%c\n", oemfru[7], oemfru[8]); seq_printf(seq, " Date : %c%c\n", oemfru[9], oemfru[10]); seq_sprintf(seq, " Sequence # : %c%c%c\n", oemfru[11], oemfru[12], oemfru[13]); break; } case VENDOR2_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); seq_printf(seq, " Part # : " "%c%c%c%c%c%c%c%c\n", oemfru[0], oemfru[1], oemfru[2], oemfru[3], oemfru[4], oemfru[5], oemfru[6], oemfru[7]); seq_printf(seq, " Supplier # : %c%c%c%c%c\n", oemfru[8], oemfru[9], oemfru[10], oemfru[11], oemfru[12]); seq_printf(seq, " Date : %c%c%c\n", oemfru[13], oemfru[14], oemfru[15]); seq_sprintf(seq, " Sequence # : %c%c%c%c\n", oemfru[16], oemfru[17], oemfru[18], oemfru[19]); break; } case VENDOR3_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); } case VENDOR4_FRU_FORMAT: { seq_printf(seq, "FRU Information:\n"); seq_printf(seq, " FRU Number : " "%c%c%c%c%c%c%c%c\n", oemfru[0], oemfru[1], oemfru[2], oemfru[3], oemfru[4], oemfru[5], oemfru[6], oemfru[7]); seq_sprintf(seq, " Part Number : " "%c%c%c%c%c%c%c%c\n", oemfru[8], oemfru[9], oemfru[10], oemfru[11], oemfru[12], oemfru[13], oemfru[14], oemfru[15]); seq_printf(seq, " EC Level : " "%c%c%c%c%c%c%c%c\n", oemfru[16], oemfru[17], oemfru[18], oemfru[19], oemfru[20], oemfru[21], oemfru[22], oemfru[23]); break; } default: break; } #endif return 0; } static int slic_debug_adapter_show(struct seq_file *seq, void *v) { struct adapter *adapter = seq->private; struct net_device *netdev = adapter->netdev; seq_printf(seq, "info: interface : %s\n", adapter->netdev->name); seq_printf(seq, "info: status : %s\n", SLIC_LINKSTATE(adapter->linkstate)); seq_printf(seq, "info: port : %d\n", adapter->physport); seq_printf(seq, "info: speed : %s\n", SLIC_SPEED(adapter->linkspeed)); seq_printf(seq, "info: duplex : %s\n", SLIC_DUPLEX(adapter->linkduplex)); seq_printf(seq, "info: irq : 0x%X\n", (uint) adapter->irq); seq_printf(seq, "info: Interrupt Agg Delay: %d usec\n", adapter->card->loadlevel_current); seq_printf(seq, "info: RcvQ max entries : %4.4X\n", SLIC_RCVQ_ENTRIES); seq_printf(seq, "info: RcvQ current : %4.4X\n", adapter->rcvqueue.count); seq_printf(seq, "rx stats: packets : %8.8lX\n", netdev->stats.rx_packets); seq_printf(seq, "rx stats: bytes : %8.8lX\n", netdev->stats.rx_bytes); seq_printf(seq, "rx stats: broadcasts : %8.8X\n", adapter->rcv_broadcasts); seq_printf(seq, "rx stats: multicasts : %8.8X\n", adapter->rcv_multicasts); seq_printf(seq, "rx stats: unicasts : %8.8X\n", adapter->rcv_unicasts); seq_printf(seq, "rx stats: errors : %8.8X\n", (u32) adapter->slic_stats.iface.rcv_errors); seq_printf(seq, "rx stats: Missed errors : %8.8X\n", (u32) adapter->slic_stats.iface.rcv_discards); seq_printf(seq, "rx stats: drops : %8.8X\n", (u32) adapter->rcv_drops); seq_printf(seq, "tx stats: packets : %8.8lX\n", netdev->stats.tx_packets); seq_printf(seq, "tx stats: bytes : %8.8lX\n", netdev->stats.tx_bytes); seq_printf(seq, "tx stats: errors : %8.8X\n", (u32) adapter->slic_stats.iface.xmt_errors); seq_printf(seq, "rx stats: multicasts : %8.8lX\n", netdev->stats.multicast); seq_printf(seq, "tx stats: collision errors : %8.8X\n", (u32) adapter->slic_stats.iface.xmit_collisions); seq_printf(seq, "perf: Max rcv frames/isr : %8.8X\n", adapter->max_isr_rcvs); seq_printf(seq, "perf: Rcv interrupt yields : %8.8X\n", adapter->rcv_interrupt_yields); seq_printf(seq, "perf: Max xmit complete/isr : %8.8X\n", adapter->max_isr_xmits); seq_printf(seq, "perf: error interrupts : %8.8X\n", adapter->error_interrupts); seq_printf(seq, "perf: error rmiss interrupts : %8.8X\n", adapter->error_rmiss_interrupts); seq_printf(seq, "perf: rcv interrupts : %8.8X\n", adapter->rcv_interrupts); seq_printf(seq, "perf: xmit interrupts : %8.8X\n", adapter->xmit_interrupts); seq_printf(seq, "perf: link event interrupts : %8.8X\n", adapter->linkevent_interrupts); seq_printf(seq, "perf: UPR interrupts : %8.8X\n", adapter->upr_interrupts); seq_printf(seq, "perf: interrupt count : %8.8X\n", adapter->num_isrs); seq_printf(seq, "perf: false interrupts : %8.8X\n", adapter->false_interrupts); seq_printf(seq, "perf: All register writes : %8.8X\n", adapter->all_reg_writes); seq_printf(seq, "perf: ICR register writes : %8.8X\n", adapter->icr_reg_writes); seq_printf(seq, "perf: ISR register writes : %8.8X\n", adapter->isr_reg_writes); seq_printf(seq, "ifevents: overflow 802 errors : %8.8X\n", adapter->if_events.oflow802); seq_printf(seq, "ifevents: transport overflow errors: %8.8X\n", adapter->if_events.Tprtoflow); seq_printf(seq, "ifevents: underflow errors : %8.8X\n", adapter->if_events.uflow802); seq_printf(seq, "ifevents: receive early : %8.8X\n", adapter->if_events.rcvearly); seq_printf(seq, "ifevents: buffer overflows : %8.8X\n", adapter->if_events.Bufov); seq_printf(seq, "ifevents: carrier errors : %8.8X\n", adapter->if_events.Carre); seq_printf(seq, "ifevents: Long : %8.8X\n", adapter->if_events.Longe); seq_printf(seq, "ifevents: invalid preambles : %8.8X\n", adapter->if_events.Invp); seq_printf(seq, "ifevents: CRC errors : %8.8X\n", adapter->if_events.Crc); seq_printf(seq, "ifevents: dribble nibbles : %8.8X\n", adapter->if_events.Drbl); seq_printf(seq, "ifevents: Code violations : %8.8X\n", adapter->if_events.Code); seq_printf(seq, "ifevents: TCP checksum errors : %8.8X\n", adapter->if_events.TpCsum); seq_printf(seq, "ifevents: TCP header short errors : %8.8X\n", adapter->if_events.TpHlen); seq_printf(seq, "ifevents: IP checksum errors : %8.8X\n", adapter->if_events.IpCsum); seq_printf(seq, "ifevents: IP frame incompletes : %8.8X\n", adapter->if_events.IpLen); seq_printf(seq, "ifevents: IP headers shorts : %8.8X\n", adapter->if_events.IpHlen); return 0; } static int slic_debug_adapter_open(struct inode *inode, struct file *file) { return single_open(file, slic_debug_adapter_show, inode->i_private); } static int slic_debug_card_open(struct inode *inode, struct file *file) { return single_open(file, slic_debug_card_show, inode->i_private); } static const struct file_operations slic_debug_adapter_fops = { .owner = THIS_MODULE, .open = slic_debug_adapter_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static const struct file_operations slic_debug_card_fops = { .owner = THIS_MODULE, .open = slic_debug_card_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void slic_debug_adapter_create(struct adapter *adapter) { struct dentry *d; char name[7]; struct sliccard *card = adapter->card; if (!card->debugfs_dir) return; sprintf(name, "port%d", adapter->port); d = debugfs_create_file(name, S_IRUGO, card->debugfs_dir, adapter, &slic_debug_adapter_fops); if (!d || IS_ERR(d)) pr_info(PFX "%s: debugfs create failed\n", name); else adapter->debugfs_entry = d; } static void slic_debug_adapter_destroy(struct adapter *adapter) { debugfs_remove(adapter->debugfs_entry); adapter->debugfs_entry = NULL; } static void slic_debug_card_create(struct sliccard *card) { struct dentry *d; char name[IFNAMSIZ]; snprintf(name, sizeof(name), "slic%d", card->cardnum); d = debugfs_create_dir(name, slic_debugfs); if (!d || IS_ERR(d)) pr_info(PFX "%s: debugfs create dir failed\n", name); else { card->debugfs_dir = d; d = debugfs_create_file("cardinfo", S_IRUGO, slic_debugfs, card, &slic_debug_card_fops); if (!d || IS_ERR(d)) pr_info(PFX "%s: debugfs create failed\n", name); else card->debugfs_cardinfo = d; } } static void slic_debug_card_destroy(struct sliccard *card) { int i; for (i = 0; i < card->card_size; i++) { struct adapter *adapter; adapter = card->adapter[i]; if (adapter) slic_debug_adapter_destroy(adapter); } if (card->debugfs_cardinfo) { debugfs_remove(card->debugfs_cardinfo); card->debugfs_cardinfo = NULL; } if (card->debugfs_dir) { debugfs_remove(card->debugfs_dir); card->debugfs_dir = NULL; } } static void slic_debug_init(void) { struct dentry *ent; ent = debugfs_create_dir("slic", NULL); if (!ent || IS_ERR(ent)) { pr_info(PFX "debugfs create directory failed\n"); return; } slic_debugfs = ent; } static void slic_debug_cleanup(void) { if (slic_debugfs) { debugfs_remove(slic_debugfs); slic_debugfs = NULL; } } /* * slic_link_event_handler - * * Initiate a link configuration sequence. The link configuration begins * by issuing a READ_LINK_STATUS command to the Utility Processor on the * SLIC. Since the command finishes asynchronously, the slic_upr_comlete * routine will follow it up witha UP configuration write command, which * will also complete asynchronously. * */ static void slic_link_event_handler(struct adapter *adapter) { int status; struct slic_shmem *pshmem; if (adapter->state != ADAPT_UP) { /* Adapter is not operational. Ignore. */ return; } pshmem = (struct slic_shmem *)(unsigned long)adapter->phys_shmem; #if BITS_PER_LONG == 64 status = slic_upr_request(adapter, SLIC_UPR_RLSR, SLIC_GET_ADDR_LOW(&pshmem->linkstatus), SLIC_GET_ADDR_HIGH(&pshmem->linkstatus), 0, 0); #else status = slic_upr_request(adapter, SLIC_UPR_RLSR, (u32) &pshmem->linkstatus, /* no 4GB wrap guaranteed */ 0, 0, 0); #endif } static void slic_init_cleanup(struct adapter *adapter) { if (adapter->intrregistered) { adapter->intrregistered = 0; free_irq(adapter->netdev->irq, adapter->netdev); } if (adapter->pshmem) { pci_free_consistent(adapter->pcidev, sizeof(struct slic_shmem), adapter->pshmem, adapter->phys_shmem); adapter->pshmem = NULL; adapter->phys_shmem = (dma_addr_t)(unsigned long)NULL; } if (adapter->pingtimerset) { adapter->pingtimerset = 0; del_timer(&adapter->pingtimer); } slic_rspqueue_free(adapter); slic_cmdq_free(adapter); slic_rcvqueue_free(adapter); } /* * Allocate a mcast_address structure to hold the multicast address. * Link it in. */ static int slic_mcast_add_list(struct adapter *adapter, char *address) { struct mcast_address *mcaddr, *mlist; /* Check to see if it already exists */ mlist = adapter->mcastaddrs; while (mlist) { if (!compare_ether_addr(mlist->address, address)) return 0; mlist = mlist->next; } /* Doesn't already exist. Allocate a structure to hold it */ mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC); if (mcaddr == NULL) return 1; memcpy(mcaddr->address, address, 6); mcaddr->next = adapter->mcastaddrs; adapter->mcastaddrs = mcaddr; return 0; } static void slic_mcast_set_list(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); int status = 0; char *addresses; struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, dev) { addresses = (char *) &ha->addr; status = slic_mcast_add_list(adapter, addresses); if (status != 0) break; slic_mcast_set_bit(adapter, addresses); } if (adapter->devflags_prev != dev->flags) { adapter->macopts = MAC_DIRECTED; if (dev->flags) { if (dev->flags & IFF_BROADCAST) adapter->macopts |= MAC_BCAST; if (dev->flags & IFF_PROMISC) adapter->macopts |= MAC_PROMISC; if (dev->flags & IFF_ALLMULTI) adapter->macopts |= MAC_ALLMCAST; if (dev->flags & IFF_MULTICAST) adapter->macopts |= MAC_MCAST; } adapter->devflags_prev = dev->flags; slic_config_set(adapter, true); } else { if (status == 0) slic_mcast_set_mask(adapter); } } #define XMIT_FAIL_LINK_STATE 1 #define XMIT_FAIL_ZERO_LENGTH 2 #define XMIT_FAIL_HOSTCMD_FAIL 3 static void slic_xmit_build_request(struct adapter *adapter, struct slic_hostcmd *hcmd, struct sk_buff *skb) { struct slic_host64_cmd *ihcmd; ulong phys_addr; ihcmd = &hcmd->cmd64; ihcmd->flags = (adapter->port << IHFLG_IFSHFT); ihcmd->command = IHCMD_XMT_REQ; ihcmd->u.slic_buffers.totlen = skb->len; phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE); ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr); ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr); ihcmd->u.slic_buffers.bufs[0].length = skb->len; #if BITS_PER_LONG == 64 hcmd->cmdsize = (u32) ((((u64)&ihcmd->u.slic_buffers.bufs[1] - (u64) hcmd) + 31) >> 5); #else hcmd->cmdsize = ((((u32) &ihcmd->u.slic_buffers.bufs[1] - (u32) hcmd) + 31) >> 5); #endif } static void slic_xmit_fail(struct adapter *adapter, struct sk_buff *skb, void *cmd, u32 skbtype, u32 status) { if (adapter->xmitq_full) netif_stop_queue(adapter->netdev); if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) { switch (status) { case XMIT_FAIL_LINK_STATE: dev_err(&adapter->netdev->dev, "reject xmit skb[%p: %x] linkstate[%s] " "adapter[%s:%d] card[%s:%d]\n", skb, skb->pkt_type, SLIC_LINKSTATE(adapter->linkstate), SLIC_ADAPTER_STATE(adapter->state), adapter->state, SLIC_CARD_STATE(adapter->card->state), adapter->card->state); break; case XMIT_FAIL_ZERO_LENGTH: dev_err(&adapter->netdev->dev, "xmit_start skb->len == 0 skb[%p] type[%x]\n", skb, skb->pkt_type); break; case XMIT_FAIL_HOSTCMD_FAIL: dev_err(&adapter->netdev->dev, "xmit_start skb[%p] type[%x] No host commands " "available\n", skb, skb->pkt_type); break; } } dev_kfree_skb(skb); adapter->netdev->stats.tx_dropped++; } static void slic_rcv_handle_error(struct adapter *adapter, struct slic_rcvbuf *rcvbuf) { struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data; struct net_device *netdev = adapter->netdev; if (adapter->devid != SLIC_1GB_DEVICE_ID) { if (hdr->frame_status14 & VRHSTAT_802OE) adapter->if_events.oflow802++; if (hdr->frame_status14 & VRHSTAT_TPOFLO) adapter->if_events.Tprtoflow++; if (hdr->frame_status_b14 & VRHSTATB_802UE) adapter->if_events.uflow802++; if (hdr->frame_status_b14 & VRHSTATB_RCVE) { adapter->if_events.rcvearly++; netdev->stats.rx_fifo_errors++; } if (hdr->frame_status_b14 & VRHSTATB_BUFF) { adapter->if_events.Bufov++; netdev->stats.rx_over_errors++; } if (hdr->frame_status_b14 & VRHSTATB_CARRE) { adapter->if_events.Carre++; netdev->stats.tx_carrier_errors++; } if (hdr->frame_status_b14 & VRHSTATB_LONGE) adapter->if_events.Longe++; if (hdr->frame_status_b14 & VRHSTATB_PREA) adapter->if_events.Invp++; if (hdr->frame_status_b14 & VRHSTATB_CRC) { adapter->if_events.Crc++; netdev->stats.rx_crc_errors++; } if (hdr->frame_status_b14 & VRHSTATB_DRBL) adapter->if_events.Drbl++; if (hdr->frame_status_b14 & VRHSTATB_CODE) adapter->if_events.Code++; if (hdr->frame_status_b14 & VRHSTATB_TPCSUM) adapter->if_events.TpCsum++; if (hdr->frame_status_b14 & VRHSTATB_TPHLEN) adapter->if_events.TpHlen++; if (hdr->frame_status_b14 & VRHSTATB_IPCSUM) adapter->if_events.IpCsum++; if (hdr->frame_status_b14 & VRHSTATB_IPLERR) adapter->if_events.IpLen++; if (hdr->frame_status_b14 & VRHSTATB_IPHERR) adapter->if_events.IpHlen++; } else { if (hdr->frame_statusGB & VGBSTAT_XPERR) { u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT; if (xerr == VGBSTAT_XCSERR) adapter->if_events.TpCsum++; if (xerr == VGBSTAT_XUFLOW) adapter->if_events.Tprtoflow++; if (xerr == VGBSTAT_XHLEN) adapter->if_events.TpHlen++; } if (hdr->frame_statusGB & VGBSTAT_NETERR) { u32 nerr = (hdr-> frame_statusGB >> VGBSTAT_NERRSHFT) & VGBSTAT_NERRMSK; if (nerr == VGBSTAT_NCSERR) adapter->if_events.IpCsum++; if (nerr == VGBSTAT_NUFLOW) adapter->if_events.IpLen++; if (nerr == VGBSTAT_NHLEN) adapter->if_events.IpHlen++; } if (hdr->frame_statusGB & VGBSTAT_LNKERR) { u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK; if (lerr == VGBSTAT_LDEARLY) adapter->if_events.rcvearly++; if (lerr == VGBSTAT_LBOFLO) adapter->if_events.Bufov++; if (lerr == VGBSTAT_LCODERR) adapter->if_events.Code++; if (lerr == VGBSTAT_LDBLNBL) adapter->if_events.Drbl++; if (lerr == VGBSTAT_LCRCERR) adapter->if_events.Crc++; if (lerr == VGBSTAT_LOFLO) adapter->if_events.oflow802++; if (lerr == VGBSTAT_LUFLO) adapter->if_events.uflow802++; } } return; } #define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000 #define M_FAST_PATH 0x0040 static void slic_rcv_handler(struct adapter *adapter) { struct net_device *netdev = adapter->netdev; struct sk_buff *skb; struct slic_rcvbuf *rcvbuf; u32 frames = 0; while ((skb = slic_rcvqueue_getnext(adapter))) { u32 rx_bytes; rcvbuf = (struct slic_rcvbuf *)skb->head; adapter->card->events++; if (rcvbuf->status & IRHDDR_ERR) { adapter->rx_errors++; slic_rcv_handle_error(adapter, rcvbuf); slic_rcvqueue_reinsert(adapter, skb); continue; } if (!slic_mac_filter(adapter, (struct ether_header *) rcvbuf->data)) { slic_rcvqueue_reinsert(adapter, skb); continue; } skb_pull(skb, SLIC_RCVBUF_HEADSIZE); rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK); skb_put(skb, rx_bytes); netdev->stats.rx_packets++; netdev->stats.rx_bytes += rx_bytes; #if SLIC_OFFLOAD_IP_CHECKSUM skb->ip_summed = CHECKSUM_UNNECESSARY; #endif skb->dev = adapter->netdev; skb->protocol = eth_type_trans(skb, skb->dev); netif_rx(skb); ++frames; #if SLIC_INTERRUPT_PROCESS_LIMIT if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) { adapter->rcv_interrupt_yields++; break; } #endif } adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames); } static void slic_xmit_complete(struct adapter *adapter) { struct slic_hostcmd *hcmd; struct slic_rspbuf *rspbuf; u32 frames = 0; struct slic_handle_word slic_handle_word; do { rspbuf = slic_rspqueue_getnext(adapter); if (!rspbuf) break; adapter->xmit_completes++; adapter->card->events++; /* Get the complete host command buffer */ slic_handle_word.handle_token = rspbuf->hosthandle; hcmd = (struct slic_hostcmd *) adapter->slic_handles[slic_handle_word.handle_index]. address; /* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */ if (hcmd->type == SLIC_CMD_DUMB) { if (hcmd->skb) dev_kfree_skb_irq(hcmd->skb); slic_cmdq_putdone_irq(adapter, hcmd); } rspbuf->status = 0; rspbuf->hosthandle = 0; frames++; } while (1); adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames); } static irqreturn_t slic_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct adapter *adapter = netdev_priv(dev); u32 isr; if ((adapter->pshmem) && (adapter->pshmem->isr)) { slic_reg32_write(&adapter->slic_regs->slic_icr, ICR_INT_MASK, FLUSH); isr = adapter->isrcopy = adapter->pshmem->isr; adapter->pshmem->isr = 0; adapter->num_isrs++; switch (adapter->card->state) { case CARD_UP: if (isr & ~ISR_IO) { if (isr & ISR_ERR) { adapter->error_interrupts++; if (isr & ISR_RMISS) { int count; int pre_count; int errors; struct slic_rcvqueue *rcvq = &adapter->rcvqueue; adapter-> error_rmiss_interrupts++; if (!rcvq->errors) rcv_count = rcvq->count; pre_count = rcvq->count; errors = rcvq->errors; while (rcvq->count < SLIC_RCVQ_FILLTHRESH) { count = slic_rcvqueue_fill (adapter); if (!count) break; } } else if (isr & ISR_XDROP) { dev_err(&dev->dev, "isr & ISR_ERR [%x] " "ISR_XDROP \n", isr); } else { dev_err(&dev->dev, "isr & ISR_ERR [%x]\n", isr); } } if (isr & ISR_LEVENT) { adapter->linkevent_interrupts++; slic_link_event_handler(adapter); } if ((isr & ISR_UPC) || (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { adapter->upr_interrupts++; slic_upr_request_complete(adapter, isr); } } if (isr & ISR_RCV) { adapter->rcv_interrupts++; slic_rcv_handler(adapter); } if (isr & ISR_CMD) { adapter->xmit_interrupts++; slic_xmit_complete(adapter); } break; case CARD_DOWN: if ((isr & ISR_UPC) || (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) { adapter->upr_interrupts++; slic_upr_request_complete(adapter, isr); } break; } adapter->isrcopy = 0; adapter->all_reg_writes += 2; adapter->isr_reg_writes++; slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH); } else { adapter->false_interrupts++; } return IRQ_HANDLED; } #define NORMAL_ETHFRAME 0 static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev) { struct sliccard *card; struct adapter *adapter = netdev_priv(dev); struct slic_hostcmd *hcmd = NULL; u32 status = 0; void *offloadcmd = NULL; card = adapter->card; if ((adapter->linkstate != LINK_UP) || (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) { status = XMIT_FAIL_LINK_STATE; goto xmit_fail; } else if (skb->len == 0) { status = XMIT_FAIL_ZERO_LENGTH; goto xmit_fail; } hcmd = slic_cmdq_getfree(adapter); if (!hcmd) { adapter->xmitq_full = 1; status = XMIT_FAIL_HOSTCMD_FAIL; goto xmit_fail; } hcmd->skb = skb; hcmd->busy = 1; hcmd->type = SLIC_CMD_DUMB; slic_xmit_build_request(adapter, hcmd, skb); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; #ifdef DEBUG_DUMP if (adapter->kill_card) { struct slic_host64_cmd ihcmd; ihcmd = &hcmd->cmd64; ihcmd->flags |= 0x40; adapter->kill_card = 0; /* only do this once */ } #endif if (hcmd->paddrh == 0) { slic_reg32_write(&adapter->slic_regs->slic_cbar, (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH); } else { slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64, (hcmd->paddrl | hcmd->cmdsize), &adapter->slic_regs->slic_addr_upper, hcmd->paddrh, DONT_FLUSH); } xmit_done: return NETDEV_TX_OK; xmit_fail: slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status); goto xmit_done; } static void slic_adapter_freeresources(struct adapter *adapter) { slic_init_cleanup(adapter); adapter->error_interrupts = 0; adapter->rcv_interrupts = 0; adapter->xmit_interrupts = 0; adapter->linkevent_interrupts = 0; adapter->upr_interrupts = 0; adapter->num_isrs = 0; adapter->xmit_completes = 0; adapter->rcv_broadcasts = 0; adapter->rcv_multicasts = 0; adapter->rcv_unicasts = 0; } static int slic_adapter_allocresources(struct adapter *adapter) { if (!adapter->intrregistered) { int retval; spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); retval = request_irq(adapter->netdev->irq, &slic_interrupt, IRQF_SHARED, adapter->netdev->name, adapter->netdev); spin_lock_irqsave(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); if (retval) { dev_err(&adapter->netdev->dev, "request_irq (%s) FAILED [%x]\n", adapter->netdev->name, retval); return retval; } adapter->intrregistered = 1; } return 0; } /* * slic_if_init * * Perform initialization of our slic interface. * */ static int slic_if_init(struct adapter *adapter) { struct sliccard *card = adapter->card; struct net_device *dev = adapter->netdev; __iomem struct slic_regs *slic_regs = adapter->slic_regs; struct slic_shmem *pshmem; int rc; /* adapter should be down at this point */ if (adapter->state != ADAPT_DOWN) { dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n", __func__); rc = -EIO; goto err; } adapter->devflags_prev = dev->flags; adapter->macopts = MAC_DIRECTED; if (dev->flags) { if (dev->flags & IFF_BROADCAST) adapter->macopts |= MAC_BCAST; if (dev->flags & IFF_PROMISC) adapter->macopts |= MAC_PROMISC; if (dev->flags & IFF_ALLMULTI) adapter->macopts |= MAC_ALLMCAST; if (dev->flags & IFF_MULTICAST) adapter->macopts |= MAC_MCAST; } rc = slic_adapter_allocresources(adapter); if (rc) { dev_err(&dev->dev, "%s: slic_adapter_allocresources FAILED %x\n", __func__, rc); slic_adapter_freeresources(adapter); goto err; } if (!adapter->queues_initialized) { rc = slic_rspqueue_init(adapter); if (rc) goto err; rc = slic_cmdq_init(adapter); if (rc) goto err; rc = slic_rcvqueue_init(adapter); if (rc) goto err; adapter->queues_initialized = 1; } slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH); mdelay(1); if (!adapter->isp_initialized) { pshmem = (struct slic_shmem *)(unsigned long) adapter->phys_shmem; spin_lock_irqsave(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); #if BITS_PER_LONG == 64 slic_reg32_write(&slic_regs->slic_addr_upper, SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH); slic_reg32_write(&slic_regs->slic_isp, SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH); #else slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH); slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr, FLUSH); #endif spin_unlock_irqrestore(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); adapter->isp_initialized = 1; } adapter->state = ADAPT_UP; if (!card->loadtimerset) { init_timer(&card->loadtimer); card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ); card->loadtimer.data = (ulong) card; card->loadtimer.function = &slic_timer_load_check; add_timer(&card->loadtimer); card->loadtimerset = 1; } if (!adapter->pingtimerset) { init_timer(&adapter->pingtimer); adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ); adapter->pingtimer.data = (ulong) dev; adapter->pingtimer.function = &slic_timer_ping; add_timer(&adapter->pingtimer); adapter->pingtimerset = 1; adapter->card->pingstatus = ISR_PINGMASK; } /* * clear any pending events, then enable interrupts */ adapter->isrcopy = 0; adapter->pshmem->isr = 0; slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH); slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH); slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD); slic_link_event_handler(adapter); err: return rc; } static int slic_entry_open(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); struct sliccard *card = adapter->card; int status; netif_stop_queue(adapter->netdev); spin_lock_irqsave(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); if (!adapter->activated) { card->adapters_activated++; slic_global.num_slic_ports_active++; adapter->activated = 1; } status = slic_if_init(adapter); if (status != 0) { if (adapter->activated) { card->adapters_activated--; slic_global.num_slic_ports_active--; adapter->activated = 0; } goto spin_unlock; } if (!card->master) card->master = adapter; spin_unlock: spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); return status; } static void slic_card_cleanup(struct sliccard *card) { if (card->loadtimerset) { card->loadtimerset = 0; del_timer(&card->loadtimer); } slic_debug_card_destroy(card); kfree(card); } static void slic_entry_remove(struct pci_dev *pcidev) { struct net_device *dev = pci_get_drvdata(pcidev); u32 mmio_start = 0; uint mmio_len = 0; struct adapter *adapter = netdev_priv(dev); struct sliccard *card; struct mcast_address *mcaddr, *mlist; slic_adapter_freeresources(adapter); slic_unmap_mmio_space(adapter); unregister_netdev(dev); mmio_start = pci_resource_start(pcidev, 0); mmio_len = pci_resource_len(pcidev, 0); release_mem_region(mmio_start, mmio_len); iounmap((void __iomem *)dev->base_addr); /* free multicast addresses */ mlist = adapter->mcastaddrs; while (mlist) { mcaddr = mlist; mlist = mlist->next; kfree(mcaddr); } card = adapter->card; card->adapters_allocated--; adapter->allocated = 0; if (!card->adapters_allocated) { struct sliccard *curr_card = slic_global.slic_card; if (curr_card == card) { slic_global.slic_card = card->next; } else { while (curr_card->next != card) curr_card = curr_card->next; curr_card->next = card->next; } slic_global.num_slic_cards--; slic_card_cleanup(card); } free_netdev(dev); pci_release_regions(pcidev); pci_disable_device(pcidev); } static int slic_entry_halt(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); struct sliccard *card = adapter->card; __iomem struct slic_regs *slic_regs = adapter->slic_regs; spin_lock_irqsave(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); netif_stop_queue(adapter->netdev); adapter->state = ADAPT_DOWN; adapter->linkstate = LINK_DOWN; adapter->upr_list = NULL; adapter->upr_busy = 0; adapter->devflags_prev = 0; slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH); adapter->all_reg_writes++; adapter->icr_reg_writes++; slic_config_clear(adapter); if (adapter->activated) { card->adapters_activated--; slic_global.num_slic_ports_active--; adapter->activated = 0; } #ifdef AUTOMATIC_RESET slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH); #endif /* * Reset the adapter's cmd queues */ slic_cmdq_reset(adapter); #ifdef AUTOMATIC_RESET if (!card->adapters_activated) slic_card_init(card, adapter); #endif spin_unlock_irqrestore(&slic_global.driver_lock.lock, slic_global.driver_lock.flags); return 0; } static struct net_device_stats *slic_get_stats(struct net_device *dev) { struct adapter *adapter = netdev_priv(dev); dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions; dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors; dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors; dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards; dev->stats.tx_heartbeat_errors = 0; dev->stats.tx_aborted_errors = 0; dev->stats.tx_window_errors = 0; dev->stats.tx_fifo_errors = 0; dev->stats.rx_frame_errors = 0; dev->stats.rx_length_errors = 0; return &dev->stats; } static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct adapter *adapter = netdev_priv(dev); struct ethtool_cmd edata; struct ethtool_cmd ecmd; u32 data[7]; u32 intagg; switch (cmd) { case SIOCSLICSETINTAGG: if (copy_from_user(data, rq->ifr_data, 28)) return -EFAULT; intagg = data[0]; dev_err(&dev->dev, "%s: set interrupt aggregation to %d\n", __func__, intagg); slic_intagg_set(adapter, intagg); return 0; #ifdef SLIC_TRACE_DUMP_ENABLED case SIOCSLICTRACEDUMP: { u32 value; DBG_IOCTL("slic_ioctl SIOCSLIC_TRACE_DUMP\n"); if (copy_from_user(data, rq->ifr_data, 28)) { PRINT_ERROR ("slic: copy_from_user FAILED getting initial simba param\n"); return -EFAULT; } value = data[0]; if (tracemon_request == SLIC_DUMP_DONE) { PRINT_ERROR ("ATK Diagnostic Trace Dump Requested\n"); tracemon_request = SLIC_DUMP_REQUESTED; tracemon_request_type = value; tracemon_timestamp = jiffies; } else if ((tracemon_request == SLIC_DUMP_REQUESTED) || (tracemon_request == SLIC_DUMP_IN_PROGRESS)) { PRINT_ERROR ("ATK Diagnostic Trace Dump Requested but already in progress... ignore\n"); } else { PRINT_ERROR ("ATK Diagnostic Trace Dump Requested\n"); tracemon_request = SLIC_DUMP_REQUESTED; tracemon_request_type = value; tracemon_timestamp = jiffies; } return 0; } #endif case SIOCETHTOOL: if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd))) return -EFAULT; if (ecmd.cmd == ETHTOOL_GSET) { memset(&edata, 0, sizeof(edata)); edata.supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII); edata.port = PORT_MII; edata.transceiver = XCVR_INTERNAL; edata.phy_address = 0; if (adapter->linkspeed == LINK_100MB) edata.speed = SPEED_100; else if (adapter->linkspeed == LINK_10MB) edata.speed = SPEED_10; else edata.speed = 0; if (adapter->linkduplex == LINK_FULLD) edata.duplex = DUPLEX_FULL; else edata.duplex = DUPLEX_HALF; edata.autoneg = AUTONEG_ENABLE; edata.maxtxpkt = 1; edata.maxrxpkt = 1; if (copy_to_user(rq->ifr_data, &edata, sizeof(edata))) return -EFAULT; } else if (ecmd.cmd == ETHTOOL_SSET) { if (!capable(CAP_NET_ADMIN)) return -EPERM; if (adapter->linkspeed == LINK_100MB) edata.speed = SPEED_100; else if (adapter->linkspeed == LINK_10MB) edata.speed = SPEED_10; else edata.speed = 0; if (adapter->linkduplex == LINK_FULLD) edata.duplex = DUPLEX_FULL; else edata.duplex = DUPLEX_HALF; edata.autoneg = AUTONEG_ENABLE; edata.maxtxpkt = 1; edata.maxrxpkt = 1; if ((ecmd.speed != edata.speed) || (ecmd.duplex != edata.duplex)) { u32 speed; u32 duplex; if (ecmd.speed == SPEED_10) speed = 0; else speed = PCR_SPEED_100; if (ecmd.duplex == DUPLEX_FULL) duplex = PCR_DUPLEX_FULL; else duplex = 0; slic_link_config(adapter, speed, duplex); slic_link_event_handler(adapter); } } return 0; default: return -EOPNOTSUPP; } } static void slic_config_pci(struct pci_dev *pcidev) { u16 pci_command; u16 new_command; pci_read_config_word(pcidev, PCI_COMMAND, &pci_command); new_command = pci_command | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK; if (pci_command != new_command) pci_write_config_word(pcidev, PCI_COMMAND, new_command); } static int slic_card_init(struct sliccard *card, struct adapter *adapter) { __iomem struct slic_regs *slic_regs = adapter->slic_regs; struct slic_eeprom *peeprom; struct oslic_eeprom *pOeeprom; dma_addr_t phys_config; u32 phys_configh; u32 phys_configl; u32 i = 0; struct slic_shmem *pshmem; int status; uint macaddrs = card->card_size; ushort eecodesize; ushort dramsize; ushort ee_chksum; ushort calc_chksum; struct slic_config_mac *pmac; unsigned char fruformat; unsigned char oemfruformat; struct atk_fru *patkfru; union oemfru *poemfru; /* Reset everything except PCI configuration space */ slic_soft_reset(adapter); /* Download the microcode */ status = slic_card_download(adapter); if (status != 0) { dev_err(&adapter->pcidev->dev, "download failed bus %d slot %d\n", adapter->busnumber, adapter->slotnumber); return status; } if (!card->config_set) { peeprom = pci_alloc_consistent(adapter->pcidev, sizeof(struct slic_eeprom), &phys_config); phys_configl = SLIC_GET_ADDR_LOW(phys_config); phys_configh = SLIC_GET_ADDR_HIGH(phys_config); if (!peeprom) { dev_err(&adapter->pcidev->dev, "eeprom read failed to get memory " "bus %d slot %d\n", adapter->busnumber, adapter->slotnumber); return -ENOMEM; } else { memset(peeprom, 0, sizeof(struct slic_eeprom)); } slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH); mdelay(1); pshmem = (struct slic_shmem *)(unsigned long) adapter->phys_shmem; spin_lock_irqsave(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH); slic_reg32_write(&slic_regs->slic_isp, SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH); spin_unlock_irqrestore(&adapter->bit64reglock.lock, adapter->bit64reglock.flags); slic_config_get(adapter, phys_configl, phys_configh); for (;;) { if (adapter->pshmem->isr) { if (adapter->pshmem->isr & ISR_UPC) { adapter->pshmem->isr = 0; slic_reg64_write(adapter, &slic_regs->slic_isp, 0, &slic_regs->slic_addr_upper, 0, FLUSH); slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH); slic_upr_request_complete(adapter, 0); break; } else { adapter->pshmem->isr = 0; slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH); } } else { mdelay(1); i++; if (i > 5000) { dev_err(&adapter->pcidev->dev, "%d config data fetch timed out!\n", adapter->port); slic_reg64_write(adapter, &slic_regs->slic_isp, 0, &slic_regs->slic_addr_upper, 0, FLUSH); return -EINVAL; } } } switch (adapter->devid) { /* Oasis card */ case SLIC_2GB_DEVICE_ID: /* extract EEPROM data and pointers to EEPROM data */ pOeeprom = (struct oslic_eeprom *) peeprom; eecodesize = pOeeprom->EecodeSize; dramsize = pOeeprom->DramSize; pmac = pOeeprom->MacInfo; fruformat = pOeeprom->FruFormat; patkfru = &pOeeprom->AtkFru; oemfruformat = pOeeprom->OemFruFormat; poemfru = &pOeeprom->OemFru; macaddrs = 2; /* Minor kludge for Oasis card get 2 MAC addresses from the EEPROM to ensure that function 1 gets the Port 1 MAC address */ break; default: /* extract EEPROM data and pointers to EEPROM data */ eecodesize = peeprom->EecodeSize; dramsize = peeprom->DramSize; pmac = peeprom->u2.mac.MacInfo; fruformat = peeprom->FruFormat; patkfru = &peeprom->AtkFru; oemfruformat = peeprom->OemFruFormat; poemfru = &peeprom->OemFru; break; } card->config.EepromValid = false; /* see if the EEPROM is valid by checking it's checksum */ if ((eecodesize <= MAX_EECODE_SIZE) && (eecodesize >= MIN_EECODE_SIZE)) { ee_chksum = *(u16 *) ((char *) peeprom + (eecodesize - 2)); /* calculate the EEPROM checksum */ calc_chksum = ~slic_eeprom_cksum((char *) peeprom, (eecodesize - 2)); /* if the ucdoe chksum flag bit worked, we wouldn't need this */ if (ee_chksum == calc_chksum) card->config.EepromValid = true; } /* copy in the DRAM size */ card->config.DramSize = dramsize; /* copy in the MAC address(es) */ for (i = 0; i < macaddrs; i++) { memcpy(&card->config.MacInfo[i], &pmac[i], sizeof(struct slic_config_mac)); } /* copy the Alacritech FRU information */ card->config.FruFormat = fruformat; memcpy(&card->config.AtkFru, patkfru, sizeof(struct atk_fru)); pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom), peeprom, phys_config); if ((!card->config.EepromValid) && (adapter->reg_params.fail_on_bad_eeprom)) { slic_reg64_write(adapter, &slic_regs->slic_isp, 0, &slic_regs->slic_addr_upper, 0, FLUSH); dev_err(&adapter->pcidev->dev, "unsupported CONFIGURATION EEPROM invalid\n"); return -EINVAL; } card->config_set = 1; } if (slic_card_download_gbrcv(adapter)) { dev_err(&adapter->pcidev->dev, "unable to download GB receive microcode\n"); return -EINVAL; } if (slic_global.dynamic_intagg) slic_intagg_set(adapter, 0); else slic_intagg_set(adapter, intagg_delay); /* * Initialize ping status to "ok" */ card->pingstatus = ISR_PINGMASK; /* * Lastly, mark our card state as up and return success */ card->state = CARD_UP; card->reset_in_progress = 0; return 0; } static void slic_init_driver(void) { if (slic_first_init) { slic_first_init = 0; spin_lock_init(&slic_global.driver_lock.lock); slic_debug_init(); } } static void slic_init_adapter(struct net_device *netdev, struct pci_dev *pcidev, const struct pci_device_id *pci_tbl_entry, void __iomem *memaddr, int chip_idx) { ushort index; struct slic_handle *pslic_handle; struct adapter *adapter = netdev_priv(netdev); /* adapter->pcidev = pcidev;*/ adapter->vendid = pci_tbl_entry->vendor; adapter->devid = pci_tbl_entry->device; adapter->subsysid = pci_tbl_entry->subdevice; adapter->busnumber = pcidev->bus->number; adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F); adapter->functionnumber = (pcidev->devfn & 0x7); adapter->memorylength = pci_resource_len(pcidev, 0); adapter->slic_regs = (__iomem struct slic_regs *)memaddr; adapter->irq = pcidev->irq; /* adapter->netdev = netdev;*/ adapter->next_netdevice = head_netdevice; head_netdevice = netdev; adapter->chipid = chip_idx; adapter->port = 0; /*adapter->functionnumber;*/ adapter->cardindex = adapter->port; adapter->memorybase = memaddr; spin_lock_init(&adapter->upr_lock.lock); spin_lock_init(&adapter->bit64reglock.lock); spin_lock_init(&adapter->adapter_lock.lock); spin_lock_init(&adapter->reset_lock.lock); spin_lock_init(&adapter->handle_lock.lock); adapter->card_size = 1; /* Initialize slic_handle array */ /* Start with 1. 0 is an invalid host handle. */ for (index = 1, pslic_handle = &adapter->slic_handles[1]; index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) { pslic_handle->token.handle_index = index; pslic_handle->type = SLIC_HANDLE_FREE; pslic_handle->next = adapter->pfree_slic_handles; adapter->pfree_slic_handles = pslic_handle; } adapter->pshmem = (struct slic_shmem *) pci_alloc_consistent(adapter->pcidev, sizeof(struct slic_shmem), &adapter-> phys_shmem); if (adapter->pshmem) memset(adapter->pshmem, 0, sizeof(struct slic_shmem)); } static const struct net_device_ops slic_netdev_ops = { .ndo_open = slic_entry_open, .ndo_stop = slic_entry_halt, .ndo_start_xmit = slic_xmit_start, .ndo_do_ioctl = slic_ioctl, .ndo_set_mac_address = slic_mac_set_address, .ndo_get_stats = slic_get_stats, .ndo_set_rx_mode = slic_mcast_set_list, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static u32 slic_card_locate(struct adapter *adapter) { struct sliccard *card = slic_global.slic_card; struct physcard *physcard = slic_global.phys_card; ushort card_hostid; u16 __iomem *hostid_reg; uint i; uint rdhostid_offset = 0; switch (adapter->devid) { case SLIC_2GB_DEVICE_ID: rdhostid_offset = SLIC_RDHOSTID_2GB; break; case SLIC_1GB_DEVICE_ID: rdhostid_offset = SLIC_RDHOSTID_1GB; break; default: return -ENODEV; } hostid_reg = (u16 __iomem *) (((u8 __iomem *) (adapter->slic_regs)) + rdhostid_offset); /* read the 16 bit hostid from SRAM */ card_hostid = (ushort) readw(hostid_reg); /* Initialize a new card structure if need be */ if (card_hostid == SLIC_HOSTID_DEFAULT) { card = kzalloc(sizeof(struct sliccard), GFP_KERNEL); if (card == NULL) return -ENOMEM; card->next = slic_global.slic_card; slic_global.slic_card = card; card->busnumber = adapter->busnumber; card->slotnumber = adapter->slotnumber; /* Find an available cardnum */ for (i = 0; i < SLIC_MAX_CARDS; i++) { if (slic_global.cardnuminuse[i] == 0) { slic_global.cardnuminuse[i] = 1; card->cardnum = i; break; } } slic_global.num_slic_cards++; slic_debug_card_create(card); } else { /* Card exists, find the card this adapter belongs to */ while (card) { if (card->cardnum == card_hostid) break; card = card->next; } } if (!card) return -ENXIO; /* Put the adapter in the card's adapter list */ if (!card->adapter[adapter->port]) { card->adapter[adapter->port] = adapter; adapter->card = card; } card->card_size = 1; /* one port per *logical* card */ while (physcard) { for (i = 0; i < SLIC_MAX_PORTS; i++) { if (physcard->adapter[i]) break; } if (i == SLIC_MAX_PORTS) break; if (physcard->adapter[i]->slotnumber == adapter->slotnumber) break; physcard = physcard->next; } if (!physcard) { /* no structure allocated for this physical card yet */ physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC); if (!physcard) { if (card_hostid == SLIC_HOSTID_DEFAULT) kfree(card); return -ENOMEM; } physcard->next = slic_global.phys_card; slic_global.phys_card = physcard; physcard->adapters_allocd = 1; } else { physcard->adapters_allocd++; } /* Note - this is ZERO relative */ adapter->physport = physcard->adapters_allocd - 1; physcard->adapter[adapter->physport] = adapter; adapter->physcard = physcard; return 0; } static int slic_entry_probe(struct pci_dev *pcidev, const struct pci_device_id *pci_tbl_entry) { static int cards_found; static int did_version; int err = -ENODEV; struct net_device *netdev; struct adapter *adapter; void __iomem *memmapped_ioaddr = NULL; u32 status = 0; ulong mmio_start = 0; ulong mmio_len = 0; struct sliccard *card = NULL; int pci_using_dac = 0; slic_global.dynamic_intagg = dynamic_intagg; err = pci_enable_device(pcidev); if (err) return err; if (slic_debug > 0 && did_version++ == 0) { printk(KERN_DEBUG "%s\n", slic_banner); printk(KERN_DEBUG "%s\n", slic_proc_version); } if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) { pci_using_dac = 1; if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) { dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for " "consistent allocations\n"); goto err_out_disable_pci; } } else if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(32))) { pci_using_dac = 0; pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32)); } else { dev_err(&pcidev->dev, "no usable DMA configuration\n"); goto err_out_disable_pci; } err = pci_request_regions(pcidev, DRV_NAME); if (err) { dev_err(&pcidev->dev, "can't obtain PCI resources\n"); goto err_out_disable_pci; } pci_set_master(pcidev); netdev = alloc_etherdev(sizeof(struct adapter)); if (!netdev) { err = -ENOMEM; goto err_out_exit_slic_probe; } SET_NETDEV_DEV(netdev, &pcidev->dev); pci_set_drvdata(pcidev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pcidev = pcidev; if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; mmio_start = pci_resource_start(pcidev, 0); mmio_len = pci_resource_len(pcidev, 0); /* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/ memmapped_ioaddr = ioremap(mmio_start, mmio_len); if (!memmapped_ioaddr) { dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n", mmio_len, mmio_start); goto err_out_free_netdev; } slic_config_pci(pcidev); slic_init_driver(); slic_init_adapter(netdev, pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found); status = slic_card_locate(adapter); if (status) { dev_err(&pcidev->dev, "cannot locate card\n"); goto err_out_free_mmio_region; } card = adapter->card; if (!adapter->allocated) { card->adapters_allocated++; adapter->allocated = 1; } status = slic_card_init(card, adapter); if (status != 0) { card->state = CARD_FAIL; adapter->state = ADAPT_FAIL; adapter->linkstate = LINK_DOWN; dev_err(&pcidev->dev, "FAILED status[%x]\n", status); } else { slic_adapter_set_hwaddr(adapter); } netdev->base_addr = (unsigned long)adapter->memorybase; netdev->irq = adapter->irq; netdev->netdev_ops = &slic_netdev_ops; slic_debug_adapter_create(adapter); strcpy(netdev->name, "eth%d"); err = register_netdev(netdev); if (err) { dev_err(&pcidev->dev, "Cannot register net device, aborting.\n"); goto err_out_unmap; } cards_found++; return status; err_out_unmap: iounmap(memmapped_ioaddr); err_out_free_mmio_region: release_mem_region(mmio_start, mmio_len); err_out_free_netdev: free_netdev(netdev); err_out_exit_slic_probe: pci_release_regions(pcidev); err_out_disable_pci: pci_disable_device(pcidev); return err; } static struct pci_driver slic_driver = { .name = DRV_NAME, .id_table = slic_pci_tbl, .probe = slic_entry_probe, .remove = slic_entry_remove, }; static int __init slic_module_init(void) { slic_init_driver(); if (debug >= 0 && slic_debug != debug) printk(KERN_DEBUG KBUILD_MODNAME ": debug level is %d.\n", debug); if (debug >= 0) slic_debug = debug; return pci_register_driver(&slic_driver); } static void __exit slic_module_cleanup(void) { pci_unregister_driver(&slic_driver); slic_debug_cleanup(); } module_init(slic_module_init); module_exit(slic_module_cleanup);
gpl-2.0
THEindian/glacier_kernel
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2597
17294
/************************************************************************** * * Copyright © 2007 David Airlie * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "drmP.h" #include "vmwgfx_drv.h" #include "ttm/ttm_placement.h" #define VMW_DIRTY_DELAY (HZ / 30) struct vmw_fb_par { struct vmw_private *vmw_priv; void *vmalloc; struct vmw_dma_buffer *vmw_bo; struct ttm_bo_kmap_obj map; u32 pseudo_palette[17]; unsigned depth; unsigned bpp; unsigned max_width; unsigned max_height; void *bo_ptr; unsigned bo_size; bool bo_iowrite; struct { spinlock_t lock; bool active; unsigned x1; unsigned y1; unsigned x2; unsigned y2; } dirty; }; static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct vmw_fb_par *par = info->par; u32 *pal = par->pseudo_palette; if (regno > 15) { DRM_ERROR("Bad regno %u.\n", regno); return 1; } switch (par->depth) { case 24: case 32: pal[regno] = ((red & 0xff00) << 8) | (green & 0xff00) | ((blue & 0xff00) >> 8); break; default: DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp); return 1; } return 0; } static int vmw_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int depth = var->bits_per_pixel; struct vmw_fb_par *par = info->par; struct vmw_private *vmw_priv = par->vmw_priv; switch (var->bits_per_pixel) { case 32: depth = (var->transp.length > 0) ? 32 : 24; break; default: DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel); return -EINVAL; } switch (depth) { case 24: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 0; var->transp.offset = 0; break; case 32: var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; var->transp.length = 8; var->transp.offset = 24; break; default: DRM_ERROR("Bad depth %u.\n", depth); return -EINVAL; } if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && (var->xoffset != 0 || var->yoffset != 0)) { DRM_ERROR("Can not handle panning without display topology\n"); return -EINVAL; } if ((var->xoffset + var->xres) > par->max_width || (var->yoffset + var->yres) > par->max_height) { DRM_ERROR("Requested geom can not fit in framebuffer\n"); return -EINVAL; } if (!vmw_kms_validate_mode_vram(vmw_priv, info->fix.line_length, var->yoffset + var->yres)) { DRM_ERROR("Requested geom can not fit in framebuffer\n"); return -EINVAL; } return 0; } static int vmw_fb_set_par(struct fb_info *info) { struct vmw_fb_par *par = info->par; struct vmw_private *vmw_priv = par->vmw_priv; vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres, info->fix.line_length, par->bpp, par->depth); if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) { /* TODO check if pitch and offset changes */ vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0); vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true); vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset); vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); } /* This is really helpful since if this fails the user * can probably not see anything on the screen. */ WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0); return 0; } static int vmw_fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { return 0; } static int vmw_fb_blank(int blank, struct fb_info *info) { return 0; } /* * Dirty code */ static void vmw_fb_dirty_flush(struct vmw_fb_par *par) { struct vmw_private *vmw_priv = par->vmw_priv; struct fb_info *info = vmw_priv->fb_info; int stride = (info->fix.line_length / 4); int *src = (int *)info->screen_base; __le32 __iomem *vram_mem = par->bo_ptr; unsigned long flags; unsigned x, y, w, h; int i, k; struct { uint32_t header; SVGAFifoCmdUpdate body; } *cmd; if (vmw_priv->suspended) return; spin_lock_irqsave(&par->dirty.lock, flags); if (!par->dirty.active) { spin_unlock_irqrestore(&par->dirty.lock, flags); return; } x = par->dirty.x1; y = par->dirty.y1; w = min(par->dirty.x2, info->var.xres) - x; h = min(par->dirty.y2, info->var.yres) - y; par->dirty.x1 = par->dirty.x2 = 0; par->dirty.y1 = par->dirty.y2 = 0; spin_unlock_irqrestore(&par->dirty.lock, flags); for (i = y * stride; i < info->fix.smem_len / 4; i += stride) { for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++) iowrite32(src[k], vram_mem + k); } #if 0 DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h); #endif cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { DRM_ERROR("Fifo reserve failed.\n"); return; } cmd->header = cpu_to_le32(SVGA_CMD_UPDATE); cmd->body.x = cpu_to_le32(x); cmd->body.y = cpu_to_le32(y); cmd->body.width = cpu_to_le32(w); cmd->body.height = cpu_to_le32(h); vmw_fifo_commit(vmw_priv, sizeof(*cmd)); } static void vmw_fb_dirty_mark(struct vmw_fb_par *par, unsigned x1, unsigned y1, unsigned width, unsigned height) { struct fb_info *info = par->vmw_priv->fb_info; unsigned long flags; unsigned x2 = x1 + width; unsigned y2 = y1 + height; spin_lock_irqsave(&par->dirty.lock, flags); if (par->dirty.x1 == par->dirty.x2) { par->dirty.x1 = x1; par->dirty.y1 = y1; par->dirty.x2 = x2; par->dirty.y2 = y2; /* if we are active start the dirty work * we share the work with the defio system */ if (par->dirty.active) schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY); } else { if (x1 < par->dirty.x1) par->dirty.x1 = x1; if (y1 < par->dirty.y1) par->dirty.y1 = y1; if (x2 > par->dirty.x2) par->dirty.x2 = x2; if (y2 > par->dirty.y2) par->dirty.y2 = y2; } spin_unlock_irqrestore(&par->dirty.lock, flags); } static void vmw_deferred_io(struct fb_info *info, struct list_head *pagelist) { struct vmw_fb_par *par = info->par; unsigned long start, end, min, max; unsigned long flags; struct page *page; int y1, y2; min = ULONG_MAX; max = 0; list_for_each_entry(page, pagelist, lru) { start = page->index << PAGE_SHIFT; end = start + PAGE_SIZE - 1; min = min(min, start); max = max(max, end); } if (min < max) { y1 = min / info->fix.line_length; y2 = (max / info->fix.line_length) + 1; spin_lock_irqsave(&par->dirty.lock, flags); par->dirty.x1 = 0; par->dirty.y1 = y1; par->dirty.x2 = info->var.xres; par->dirty.y2 = y2; spin_unlock_irqrestore(&par->dirty.lock, flags); } vmw_fb_dirty_flush(par); }; struct fb_deferred_io vmw_defio = { .delay = VMW_DIRTY_DELAY, .deferred_io = vmw_deferred_io, }; /* * Draw code */ static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { cfb_fillrect(info, rect); vmw_fb_dirty_mark(info->par, rect->dx, rect->dy, rect->width, rect->height); } static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region) { cfb_copyarea(info, region); vmw_fb_dirty_mark(info->par, region->dx, region->dy, region->width, region->height); } static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image) { cfb_imageblit(info, image); vmw_fb_dirty_mark(info->par, image->dx, image->dy, image->width, image->height); } /* * Bring up code */ static struct fb_ops vmw_fb_ops = { .owner = THIS_MODULE, .fb_check_var = vmw_fb_check_var, .fb_set_par = vmw_fb_set_par, .fb_setcolreg = vmw_fb_setcolreg, .fb_fillrect = vmw_fb_fillrect, .fb_copyarea = vmw_fb_copyarea, .fb_imageblit = vmw_fb_imageblit, .fb_pan_display = vmw_fb_pan_display, .fb_blank = vmw_fb_blank, }; static int vmw_fb_create_bo(struct vmw_private *vmw_priv, size_t size, struct vmw_dma_buffer **out) { struct vmw_dma_buffer *vmw_bo; struct ttm_placement ne_placement = vmw_vram_ne_placement; int ret; ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; /* interuptable? */ ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false); if (unlikely(ret != 0)) return ret; vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL); if (!vmw_bo) goto err_unlock; ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size, &ne_placement, false, &vmw_dmabuf_bo_free); if (unlikely(ret != 0)) goto err_unlock; /* init frees the buffer on failure */ *out = vmw_bo; ttm_write_unlock(&vmw_priv->fbdev_master.lock); return 0; err_unlock: ttm_write_unlock(&vmw_priv->fbdev_master.lock); return ret; } int vmw_fb_init(struct vmw_private *vmw_priv) { struct device *device = &vmw_priv->dev->pdev->dev; struct vmw_fb_par *par; struct fb_info *info; unsigned initial_width, initial_height; unsigned fb_width, fb_height; unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size; int ret; /* XXX These shouldn't be hardcoded. */ initial_width = 800; initial_height = 600; fb_bbp = 32; fb_depth = 24; /* XXX As shouldn't these be as well. */ fb_width = min(vmw_priv->fb_max_width, (unsigned)2048); fb_height = min(vmw_priv->fb_max_height, (unsigned)2048); initial_width = min(fb_width, initial_width); initial_height = min(fb_height, initial_height); fb_pitch = fb_width * fb_bbp / 8; fb_size = fb_pitch * fb_height; fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET); info = framebuffer_alloc(sizeof(*par), device); if (!info) return -ENOMEM; /* * Par */ vmw_priv->fb_info = info; par = info->par; par->vmw_priv = vmw_priv; par->depth = fb_depth; par->bpp = fb_bbp; par->vmalloc = NULL; par->max_width = fb_width; par->max_height = fb_height; /* * Create buffers and alloc memory */ par->vmalloc = vmalloc(fb_size); if (unlikely(par->vmalloc == NULL)) { ret = -ENOMEM; goto err_free; } ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo); if (unlikely(ret != 0)) goto err_free; ret = ttm_bo_kmap(&par->vmw_bo->base, 0, par->vmw_bo->base.num_pages, &par->map); if (unlikely(ret != 0)) goto err_unref; par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite); par->bo_size = fb_size; /* * Fixed and var */ strcpy(info->fix.id, "svgadrmfb"); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.type_aux = 0; info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ info->fix.ywrapstep = 0; info->fix.accel = FB_ACCEL_NONE; info->fix.line_length = fb_pitch; info->fix.smem_start = 0; info->fix.smem_len = fb_size; info->pseudo_palette = par->pseudo_palette; info->screen_base = par->vmalloc; info->screen_size = fb_size; info->flags = FBINFO_DEFAULT; info->fbops = &vmw_fb_ops; /* 24 depth per default */ info->var.red.offset = 16; info->var.green.offset = 8; info->var.blue.offset = 0; info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; info->var.transp.offset = 0; info->var.transp.length = 0; info->var.xres_virtual = fb_width; info->var.yres_virtual = fb_height; info->var.bits_per_pixel = par->bpp; info->var.xoffset = 0; info->var.yoffset = 0; info->var.activate = FB_ACTIVATE_NOW; info->var.height = -1; info->var.width = -1; info->var.xres = initial_width; info->var.yres = initial_height; #if 0 info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; #else info->pixmap.size = 0; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; #endif info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto err_aper; } info->apertures->ranges[0].base = vmw_priv->vram_start; info->apertures->ranges[0].size = vmw_priv->vram_size; /* * Dirty & Deferred IO */ par->dirty.x1 = par->dirty.x2 = 0; par->dirty.y1 = par->dirty.y2 = 0; par->dirty.active = true; spin_lock_init(&par->dirty.lock); info->fbdefio = &vmw_defio; fb_deferred_io_init(info); ret = register_framebuffer(info); if (unlikely(ret != 0)) goto err_defio; return 0; err_defio: fb_deferred_io_cleanup(info); err_aper: ttm_bo_kunmap(&par->map); err_unref: ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo); err_free: vfree(par->vmalloc); framebuffer_release(info); vmw_priv->fb_info = NULL; return ret; } int vmw_fb_close(struct vmw_private *vmw_priv) { struct fb_info *info; struct vmw_fb_par *par; struct ttm_buffer_object *bo; if (!vmw_priv->fb_info) return 0; info = vmw_priv->fb_info; par = info->par; bo = &par->vmw_bo->base; par->vmw_bo = NULL; /* ??? order */ fb_deferred_io_cleanup(info); unregister_framebuffer(info); ttm_bo_kunmap(&par->map); ttm_bo_unref(&bo); vfree(par->vmalloc); framebuffer_release(info); return 0; } int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv, struct vmw_dma_buffer *vmw_bo) { struct ttm_buffer_object *bo = &vmw_bo->base; int ret = 0; ret = ttm_bo_reserve(bo, false, false, false, 0); if (unlikely(ret != 0)) return ret; ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); ttm_bo_unreserve(bo); return ret; } int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, struct vmw_dma_buffer *vmw_bo) { struct ttm_buffer_object *bo = &vmw_bo->base; struct ttm_placement ne_placement = vmw_vram_ne_placement; int ret = 0; ne_placement.lpfn = bo->num_pages; /* interuptable? */ ret = ttm_write_lock(&vmw_priv->active_master->lock, false); if (unlikely(ret != 0)) return ret; ret = ttm_bo_reserve(bo, false, false, false, 0); if (unlikely(ret != 0)) goto err_unlock; if (bo->mem.mem_type == TTM_PL_VRAM && bo->mem.start < bo->num_pages && bo->mem.start > 0) (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false, false); ret = ttm_bo_validate(bo, &ne_placement, false, false, false); /* Could probably bug on */ WARN_ON(bo->offset != 0); ttm_bo_unreserve(bo); err_unlock: ttm_write_unlock(&vmw_priv->active_master->lock); return ret; } int vmw_fb_off(struct vmw_private *vmw_priv) { struct fb_info *info; struct vmw_fb_par *par; unsigned long flags; if (!vmw_priv->fb_info) return -EINVAL; info = vmw_priv->fb_info; par = info->par; spin_lock_irqsave(&par->dirty.lock, flags); par->dirty.active = false; spin_unlock_irqrestore(&par->dirty.lock, flags); flush_delayed_work_sync(&info->deferred_work); par->bo_ptr = NULL; ttm_bo_kunmap(&par->map); vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo); return 0; } int vmw_fb_on(struct vmw_private *vmw_priv) { struct fb_info *info; struct vmw_fb_par *par; unsigned long flags; bool dummy; int ret; if (!vmw_priv->fb_info) return -EINVAL; info = vmw_priv->fb_info; par = info->par; /* we are already active */ if (par->bo_ptr != NULL) return 0; /* Make sure that all overlays are stoped when we take over */ vmw_overlay_stop_all(vmw_priv); ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("could not move buffer to start of VRAM\n"); goto err_no_buffer; } ret = ttm_bo_kmap(&par->vmw_bo->base, 0, par->vmw_bo->base.num_pages, &par->map); BUG_ON(ret != 0); par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy); spin_lock_irqsave(&par->dirty.lock, flags); par->dirty.active = true; spin_unlock_irqrestore(&par->dirty.lock, flags); err_no_buffer: vmw_fb_set_par(info); vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres); /* If there already was stuff dirty we wont * schedule a new work, so lets do it now */ schedule_delayed_work(&info->deferred_work, 0); return 0; }
gpl-2.0
Hadramos/android_sony_xperiaz_kernel_sources
net/bridge/br_if.c
2853
9423
/* * Userspace interface * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/netpoll.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/init.h> #include <linux/rtnetlink.h> #include <linux/if_ether.h> #include <linux/slab.h> #include <net/sock.h> #include "br_private.h" /* * Determine initial path cost based on speed. * using recommendations from 802.1d standard * * Since driver might sleep need to not be holding any locks. */ static int port_cost(struct net_device *dev) { struct ethtool_cmd ecmd; if (!__ethtool_get_settings(dev, &ecmd)) { switch (ethtool_cmd_speed(&ecmd)) { case SPEED_10000: return 2; case SPEED_1000: return 4; case SPEED_100: return 19; case SPEED_10: return 100; } } /* Old silly heuristics based on name */ if (!strncmp(dev->name, "lec", 3)) return 7; if (!strncmp(dev->name, "plip", 4)) return 2500; return 100; /* assume old 10Mbps */ } /* Check for port carrier transistions. */ void br_port_carrier_check(struct net_bridge_port *p) { struct net_device *dev = p->dev; struct net_bridge *br = p->br; if (netif_running(dev) && netif_carrier_ok(dev)) p->path_cost = port_cost(dev); if (!netif_running(br->dev)) return; spin_lock_bh(&br->lock); if (netif_running(dev) && netif_carrier_ok(dev)) { if (p->state == BR_STATE_DISABLED) br_stp_enable_port(p); } else { if (p->state != BR_STATE_DISABLED) br_stp_disable_port(p); } spin_unlock_bh(&br->lock); } static void release_nbp(struct kobject *kobj) { struct net_bridge_port *p = container_of(kobj, struct net_bridge_port, kobj); kfree(p); } static struct kobj_type brport_ktype = { #ifdef CONFIG_SYSFS .sysfs_ops = &brport_sysfs_ops, #endif .release = release_nbp, }; static void destroy_nbp(struct net_bridge_port *p) { struct net_device *dev = p->dev; p->br = NULL; p->dev = NULL; dev_put(dev); kobject_put(&p->kobj); } static void destroy_nbp_rcu(struct rcu_head *head) { struct net_bridge_port *p = container_of(head, struct net_bridge_port, rcu); destroy_nbp(p); } /* Delete port(interface) from bridge is done in two steps. * via RCU. First step, marks device as down. That deletes * all the timers and stops new packets from flowing through. * * Final cleanup doesn't occur until after all CPU's finished * processing packets. * * Protected from multiple admin operations by RTNL mutex */ static void del_nbp(struct net_bridge_port *p) { struct net_bridge *br = p->br; struct net_device *dev = p->dev; sysfs_remove_link(br->ifobj, p->dev->name); dev_set_promiscuity(dev, -1); spin_lock_bh(&br->lock); br_stp_disable_port(p); spin_unlock_bh(&br->lock); br_ifinfo_notify(RTM_DELLINK, p); br_fdb_delete_by_port(br, p, 1); list_del_rcu(&p->list); dev->priv_flags &= ~IFF_BRIDGE_PORT; netdev_rx_handler_unregister(dev); synchronize_net(); netdev_set_master(dev, NULL); br_multicast_del_port(p); kobject_uevent(&p->kobj, KOBJ_REMOVE); kobject_del(&p->kobj); br_netpoll_disable(p); call_rcu(&p->rcu, destroy_nbp_rcu); } /* Delete bridge device */ void br_dev_delete(struct net_device *dev, struct list_head *head) { struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *p, *n; list_for_each_entry_safe(p, n, &br->port_list, list) { del_nbp(p); } del_timer_sync(&br->gc_timer); br_sysfs_delbr(br->dev); unregister_netdevice_queue(br->dev, head); } /* find an available port number */ static int find_portno(struct net_bridge *br) { int index; struct net_bridge_port *p; unsigned long *inuse; inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long), GFP_KERNEL); if (!inuse) return -ENOMEM; set_bit(0, inuse); /* zero is reserved */ list_for_each_entry(p, &br->port_list, list) { set_bit(p->port_no, inuse); } index = find_first_zero_bit(inuse, BR_MAX_PORTS); kfree(inuse); return (index >= BR_MAX_PORTS) ? -EXFULL : index; } /* called with RTNL but without bridge lock */ static struct net_bridge_port *new_nbp(struct net_bridge *br, struct net_device *dev) { int index; struct net_bridge_port *p; index = find_portno(br); if (index < 0) return ERR_PTR(index); p = kzalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) return ERR_PTR(-ENOMEM); p->br = br; dev_hold(dev); p->dev = dev; p->path_cost = port_cost(dev); p->priority = 0x8000 >> BR_PORT_BITS; p->port_no = index; p->flags = 0; br_init_port(p); p->state = BR_STATE_DISABLED; br_stp_port_timer_init(p); br_multicast_add_port(p); return p; } int br_add_bridge(struct net *net, const char *name) { struct net_device *dev; int res; dev = alloc_netdev(sizeof(struct net_bridge), name, br_dev_setup); if (!dev) return -ENOMEM; dev_net_set(dev, net); res = register_netdev(dev); if (res) free_netdev(dev); return res; } int br_del_bridge(struct net *net, const char *name) { struct net_device *dev; int ret = 0; rtnl_lock(); dev = __dev_get_by_name(net, name); if (dev == NULL) ret = -ENXIO; /* Could not find device */ else if (!(dev->priv_flags & IFF_EBRIDGE)) { /* Attempt to delete non bridge device! */ ret = -EPERM; } else if (dev->flags & IFF_UP) { /* Not shutdown yet. */ ret = -EBUSY; } else br_dev_delete(dev, NULL); rtnl_unlock(); return ret; } /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */ int br_min_mtu(const struct net_bridge *br) { const struct net_bridge_port *p; int mtu = 0; ASSERT_RTNL(); if (list_empty(&br->port_list)) mtu = ETH_DATA_LEN; else { list_for_each_entry(p, &br->port_list, list) { if (!mtu || p->dev->mtu < mtu) mtu = p->dev->mtu; } } return mtu; } /* * Recomputes features using slave's features */ netdev_features_t br_features_recompute(struct net_bridge *br, netdev_features_t features) { struct net_bridge_port *p; netdev_features_t mask; if (list_empty(&br->port_list)) return features; mask = features; features &= ~NETIF_F_ONE_FOR_ALL; list_for_each_entry(p, &br->port_list, list) { features = netdev_increment_features(features, p->dev->features, mask); } return features; } /* called with RTNL */ int br_add_if(struct net_bridge *br, struct net_device *dev) { struct net_bridge_port *p; int err = 0; bool changed_addr; /* Don't allow bridging non-ethernet like devices */ if ((dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr)) return -EINVAL; /* No bridging of bridges */ if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit) return -ELOOP; /* Device is already being bridged */ if (br_port_exists(dev)) return -EBUSY; /* No bridging devices that dislike that (e.g. wireless) */ if (dev->priv_flags & IFF_DONT_BRIDGE) return -EOPNOTSUPP; p = new_nbp(br, dev); if (IS_ERR(p)) return PTR_ERR(p); call_netdevice_notifiers(NETDEV_JOIN, dev); err = dev_set_promiscuity(dev, 1); if (err) goto put_back; err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj), SYSFS_BRIDGE_PORT_ATTR); if (err) goto err1; err = br_sysfs_addif(p); if (err) goto err2; if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) goto err3; err = netdev_set_master(dev, br->dev); if (err) goto err3; err = netdev_rx_handler_register(dev, br_handle_frame, p); if (err) goto err4; dev->priv_flags |= IFF_BRIDGE_PORT; dev_disable_lro(dev); list_add_rcu(&p->list, &br->port_list); netdev_update_features(br->dev); spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && (br->dev->flags & IFF_UP)) br_stp_enable_port(p); spin_unlock_bh(&br->lock); br_ifinfo_notify(RTM_NEWLINK, p); if (changed_addr) call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); dev_set_mtu(br->dev, br_min_mtu(br)); if (br_fdb_insert(br, p, dev->dev_addr)) netdev_err(dev, "failed insert local address bridge forwarding table\n"); kobject_uevent(&p->kobj, KOBJ_ADD); return 0; err4: netdev_set_master(dev, NULL); err3: sysfs_remove_link(br->ifobj, p->dev->name); err2: kobject_put(&p->kobj); p = NULL; /* kobject_put frees */ err1: dev_set_promiscuity(dev, -1); put_back: dev_put(dev); kfree(p); return err; } /* called with RTNL */ int br_del_if(struct net_bridge *br, struct net_device *dev) { struct net_bridge_port *p; bool changed_addr; p = br_port_get_rtnl(dev); if (!p || p->br != br) return -EINVAL; del_nbp(p); spin_lock_bh(&br->lock); changed_addr = br_stp_recalculate_bridge_id(br); spin_unlock_bh(&br->lock); if (changed_addr) call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev); netdev_update_features(br->dev); return 0; } void __net_exit br_net_exit(struct net *net) { struct net_device *dev; LIST_HEAD(list); rtnl_lock(); for_each_netdev(net, dev) if (dev->priv_flags & IFF_EBRIDGE) br_dev_delete(dev, &list); unregister_netdevice_many(&list); rtnl_unlock(); }
gpl-2.0
agat63/E4GT_ICS_kernel
arch/powerpc/platforms/52xx/lite5200_pm.c
3109
6358
#include <linux/init.h> #include <linux/suspend.h> #include <asm/io.h> #include <asm/time.h> #include <asm/mpc52xx.h> /* defined in lite5200_sleep.S and only used here */ extern void lite5200_low_power(void __iomem *sram, void __iomem *mbar); static struct mpc52xx_cdm __iomem *cdm; static struct mpc52xx_intr __iomem *pic; static struct mpc52xx_sdma __iomem *bes; static struct mpc52xx_xlb __iomem *xlb; static struct mpc52xx_gpio __iomem *gps; static struct mpc52xx_gpio_wkup __iomem *gpw; static void __iomem *pci; static void __iomem *sram; static const int sram_size = 0x4000; /* 16 kBytes */ static void __iomem *mbar; static suspend_state_t lite5200_pm_target_state; static int lite5200_pm_valid(suspend_state_t state) { switch (state) { case PM_SUSPEND_STANDBY: case PM_SUSPEND_MEM: return 1; default: return 0; } } static int lite5200_pm_begin(suspend_state_t state) { if (lite5200_pm_valid(state)) { lite5200_pm_target_state = state; return 0; } return -EINVAL; } static int lite5200_pm_prepare(void) { struct device_node *np; const struct of_device_id immr_ids[] = { { .compatible = "fsl,mpc5200-immr", }, { .compatible = "fsl,mpc5200b-immr", }, { .type = "soc", .compatible = "mpc5200", }, /* lite5200 */ { .type = "builtin", .compatible = "mpc5200", }, /* efika */ {} }; u64 regaddr64 = 0; const u32 *regaddr_p; /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) return mpc52xx_pm_prepare(); if (lite5200_pm_target_state != PM_SUSPEND_MEM) return -EINVAL; /* map registers */ np = of_find_matching_node(NULL, immr_ids); regaddr_p = of_get_address(np, 0, NULL, NULL); if (regaddr_p) regaddr64 = of_translate_address(np, regaddr_p); of_node_put(np); mbar = ioremap((u32) regaddr64, 0xC000); if (!mbar) { printk(KERN_ERR "%s:%i Error mapping registers\n", __func__, __LINE__); return -ENOSYS; } cdm = mbar + 0x200; pic = mbar + 0x500; gps = mbar + 0xb00; gpw = mbar + 0xc00; pci = mbar + 0xd00; bes = mbar + 0x1200; xlb = mbar + 0x1f00; sram = mbar + 0x8000; return 0; } /* save and restore registers not bound to any real devices */ static struct mpc52xx_cdm scdm; static struct mpc52xx_intr spic; static struct mpc52xx_sdma sbes; static struct mpc52xx_xlb sxlb; static struct mpc52xx_gpio sgps; static struct mpc52xx_gpio_wkup sgpw; static char spci[0x200]; static void lite5200_save_regs(void) { _memcpy_fromio(&spic, pic, sizeof(*pic)); _memcpy_fromio(&sbes, bes, sizeof(*bes)); _memcpy_fromio(&scdm, cdm, sizeof(*cdm)); _memcpy_fromio(&sxlb, xlb, sizeof(*xlb)); _memcpy_fromio(&sgps, gps, sizeof(*gps)); _memcpy_fromio(&sgpw, gpw, sizeof(*gpw)); _memcpy_fromio(spci, pci, 0x200); _memcpy_fromio(saved_sram, sram, sram_size); } static void lite5200_restore_regs(void) { int i; _memcpy_toio(sram, saved_sram, sram_size); /* PCI Configuration */ _memcpy_toio(pci, spci, 0x200); /* * GPIOs. Interrupt Master Enable has higher address then other * registers, so just memcpy is ok. */ _memcpy_toio(gpw, &sgpw, sizeof(*gpw)); _memcpy_toio(gps, &sgps, sizeof(*gps)); /* XLB Arbitrer */ out_be32(&xlb->snoop_window, sxlb.snoop_window); out_be32(&xlb->master_priority, sxlb.master_priority); out_be32(&xlb->master_pri_enable, sxlb.master_pri_enable); /* enable */ out_be32(&xlb->int_enable, sxlb.int_enable); out_be32(&xlb->config, sxlb.config); /* CDM - Clock Distribution Module */ out_8(&cdm->ipb_clk_sel, scdm.ipb_clk_sel); out_8(&cdm->pci_clk_sel, scdm.pci_clk_sel); out_8(&cdm->ext_48mhz_en, scdm.ext_48mhz_en); out_8(&cdm->fd_enable, scdm.fd_enable); out_be16(&cdm->fd_counters, scdm.fd_counters); out_be32(&cdm->clk_enables, scdm.clk_enables); out_8(&cdm->osc_disable, scdm.osc_disable); out_be16(&cdm->mclken_div_psc1, scdm.mclken_div_psc1); out_be16(&cdm->mclken_div_psc2, scdm.mclken_div_psc2); out_be16(&cdm->mclken_div_psc3, scdm.mclken_div_psc3); out_be16(&cdm->mclken_div_psc6, scdm.mclken_div_psc6); /* BESTCOMM */ out_be32(&bes->taskBar, sbes.taskBar); out_be32(&bes->currentPointer, sbes.currentPointer); out_be32(&bes->endPointer, sbes.endPointer); out_be32(&bes->variablePointer, sbes.variablePointer); out_8(&bes->IntVect1, sbes.IntVect1); out_8(&bes->IntVect2, sbes.IntVect2); out_be16(&bes->PtdCntrl, sbes.PtdCntrl); for (i=0; i<32; i++) out_8(&bes->ipr[i], sbes.ipr[i]); out_be32(&bes->cReqSelect, sbes.cReqSelect); out_be32(&bes->task_size0, sbes.task_size0); out_be32(&bes->task_size1, sbes.task_size1); out_be32(&bes->MDEDebug, sbes.MDEDebug); out_be32(&bes->ADSDebug, sbes.ADSDebug); out_be32(&bes->Value1, sbes.Value1); out_be32(&bes->Value2, sbes.Value2); out_be32(&bes->Control, sbes.Control); out_be32(&bes->Status, sbes.Status); out_be32(&bes->PTDDebug, sbes.PTDDebug); /* restore tasks */ for (i=0; i<16; i++) out_be16(&bes->tcr[i], sbes.tcr[i]); /* enable interrupts */ out_be32(&bes->IntPend, sbes.IntPend); out_be32(&bes->IntMask, sbes.IntMask); /* PIC */ out_be32(&pic->per_pri1, spic.per_pri1); out_be32(&pic->per_pri2, spic.per_pri2); out_be32(&pic->per_pri3, spic.per_pri3); out_be32(&pic->main_pri1, spic.main_pri1); out_be32(&pic->main_pri2, spic.main_pri2); out_be32(&pic->enc_status, spic.enc_status); /* unmask and enable interrupts */ out_be32(&pic->per_mask, spic.per_mask); out_be32(&pic->main_mask, spic.main_mask); out_be32(&pic->ctrl, spic.ctrl); } static int lite5200_pm_enter(suspend_state_t state) { /* deep sleep? let mpc52xx code handle that */ if (state == PM_SUSPEND_STANDBY) { return mpc52xx_pm_enter(state); } lite5200_save_regs(); /* effectively save FP regs */ enable_kernel_fp(); lite5200_low_power(sram, mbar); lite5200_restore_regs(); iounmap(mbar); return 0; } static void lite5200_pm_finish(void) { /* deep sleep? let mpc52xx code handle that */ if (lite5200_pm_target_state == PM_SUSPEND_STANDBY) mpc52xx_pm_finish(); } static void lite5200_pm_end(void) { lite5200_pm_target_state = PM_SUSPEND_ON; } static const struct platform_suspend_ops lite5200_pm_ops = { .valid = lite5200_pm_valid, .begin = lite5200_pm_begin, .prepare = lite5200_pm_prepare, .enter = lite5200_pm_enter, .finish = lite5200_pm_finish, .end = lite5200_pm_end, }; int __init lite5200_pm_init(void) { suspend_set_ops(&lite5200_pm_ops); return 0; }
gpl-2.0
omnirom/android_kernel_samsung_n1
net/sctp/sm_statetable.c
7973
32141
/* SCTP kernel implementation * (C) Copyright IBM Corp. 2001, 2004 * Copyright (c) 1999-2000 Cisco, Inc. * Copyright (c) 1999-2001 Motorola, Inc. * Copyright (c) 2001 Intel Corp. * Copyright (c) 2001 Nokia, Inc. * * This file is part of the SCTP kernel implementation * * These are the state tables for the SCTP state machine. * * This SCTP implementation is free software; * you can redistribute it and/or modify it under the terms of * the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This SCTP implementation is distributed in the hope that it * will be useful, but WITHOUT ANY WARRANTY; without even the implied * ************************ * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU CC; see the file COPYING. If not, write to * the Free Software Foundation, 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. * * Please send any bug reports or fixes you make to the * email address(es): * lksctp developers <lksctp-developers@lists.sourceforge.net> * * Or submit a bug report through the following website: * http://www.sf.net/projects/lksctp * * Written or modified by: * La Monte H.P. Yarroll <piggy@acm.org> * Karl Knutson <karl@athena.chicago.il.us> * Jon Grimm <jgrimm@us.ibm.com> * Hui Huang <hui.huang@nokia.com> * Daisy Chang <daisyc@us.ibm.com> * Ardelle Fan <ardelle.fan@intel.com> * Sridhar Samudrala <sri@us.ibm.com> * * Any bugs reported given to us we will try to fix... any fixes shared will * be incorporated into the next SCTP release. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/skbuff.h> #include <net/sctp/sctp.h> #include <net/sctp/sm.h> static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES]; static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES]; static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES]; static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, sctp_state_t state); static const sctp_sm_table_entry_t bug = { .fn = sctp_sf_bug, .name = "sctp_sf_bug" }; #define DO_LOOKUP(_max, _type, _table) \ ({ \ const sctp_sm_table_entry_t *rtn; \ \ if ((event_subtype._type > (_max))) { \ pr_warn("table %p possible attack: event %d exceeds max %d\n", \ _table, event_subtype._type, _max); \ rtn = &bug; \ } else \ rtn = &_table[event_subtype._type][(int)state]; \ \ rtn; \ }) const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, sctp_state_t state, sctp_subtype_t event_subtype) { switch (event_type) { case SCTP_EVENT_T_CHUNK: return sctp_chunk_event_lookup(event_subtype.chunk, state); case SCTP_EVENT_T_TIMEOUT: return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout, timeout_event_table); case SCTP_EVENT_T_OTHER: return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, other_event_table); case SCTP_EVENT_T_PRIMITIVE: return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive, primitive_event_table); default: /* Yikes! We got an illegal event type. */ return &bug; } } #define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} #define TYPE_SCTP_DATA { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_eat_data_6_2), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_eat_data_fast_4_4), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_DATA */ #define TYPE_SCTP_INIT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_1_siminit), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_2_dupinit), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_reshutack), \ } /* TYPE_SCTP_INIT */ #define TYPE_SCTP_INIT_ACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_1C_ack), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_INIT_ACK */ #define TYPE_SCTP_SACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_sack_6_2), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_SACK */ #define TYPE_SCTP_HEARTBEAT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ /* This should not happen, but we are nice. */ \ TYPE_SCTP_FUNC(sctp_sf_beat_8_3), \ } /* TYPE_SCTP_HEARTBEAT */ #define TYPE_SCTP_HEARTBEAT_ACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_violation), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_backbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_HEARTBEAT_ACK */ #define TYPE_SCTP_ABORT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_abort), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_abort), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_abort), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_abort), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_1_abort), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_abort), \ } /* TYPE_SCTP_ABORT */ #define TYPE_SCTP_SHUTDOWN { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shut_ctsn), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_SHUTDOWN */ #define TYPE_SCTP_SHUTDOWN_ACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_8_5_1_E_sa), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_violation), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_violation), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_violation), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_final), \ } /* TYPE_SCTP_SHUTDOWN_ACK */ #define TYPE_SCTP_ERROR { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_err), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_operr_notify), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_ERROR */ #define TYPE_SCTP_COOKIE_ECHO { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_2_4_dupcook), \ } /* TYPE_SCTP_COOKIE_ECHO */ #define TYPE_SCTP_COOKIE_ACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_5_1E_ca), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_COOKIE_ACK */ #define TYPE_SCTP_ECN_ECNE { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecne), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_ECN_ECNE */ #define TYPE_SCTP_ECN_CWR { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_ecn_cwr), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_ECN_CWR */ #define TYPE_SCTP_SHUTDOWN_COMPLETE { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_4_C), \ } /* TYPE_SCTP_SHUTDOWN_COMPLETE */ /* The primary index for this table is the chunk type. * The secondary index for this table is the state. * * For base protocol (RFC 2960). */ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_DATA, TYPE_SCTP_INIT, TYPE_SCTP_INIT_ACK, TYPE_SCTP_SACK, TYPE_SCTP_HEARTBEAT, TYPE_SCTP_HEARTBEAT_ACK, TYPE_SCTP_ABORT, TYPE_SCTP_SHUTDOWN, TYPE_SCTP_SHUTDOWN_ACK, TYPE_SCTP_ERROR, TYPE_SCTP_COOKIE_ECHO, TYPE_SCTP_COOKIE_ACK, TYPE_SCTP_ECN_ECNE, TYPE_SCTP_ECN_CWR, TYPE_SCTP_SHUTDOWN_COMPLETE, }; /* state_fn_t chunk_event_table[][] */ #define TYPE_SCTP_ASCONF { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_ASCONF */ #define TYPE_SCTP_ASCONF_ACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_asconf_ack), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_ASCONF_ACK */ /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_ASCONF, TYPE_SCTP_ASCONF_ACK, }; /*state_fn_t addip_chunk_event_table[][] */ #define TYPE_SCTP_FWD_TSN { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_eat_fwd_tsn_fast), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ } /* TYPE_SCTP_FWD_TSN */ /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_FWD_TSN, }; /*state_fn_t prsctp_chunk_event_table[][] */ #define TYPE_SCTP_AUTH { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ootb), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_eat_auth), \ } /* TYPE_SCTP_AUTH */ /* The primary index for this table is the chunk type. * The secondary index for this table is the state. */ static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_AUTH, }; /*state_fn_t auth_chunk_event_table[][] */ static const sctp_sm_table_entry_t chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { /* SCTP_STATE_CLOSED */ TYPE_SCTP_FUNC(sctp_sf_ootb), /* SCTP_STATE_COOKIE_WAIT */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_COOKIE_ECHOED */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_ESTABLISHED */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_SHUTDOWN_PENDING */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_SHUTDOWN_SENT */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_SHUTDOWN_RECEIVED */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), /* SCTP_STATE_SHUTDOWN_ACK_SENT */ TYPE_SCTP_FUNC(sctp_sf_unk_chunk), }; /* chunk unknown */ #define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_not_impl), \ } /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ #define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_shutdown), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_shutdown),\ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_prm_shutdown), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_primitive), \ } /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ #define TYPE_SCTP_PRIMITIVE_ABORT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_prm_abort), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_echoed_prm_abort), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_shutdown_pending_prm_abort), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_shutdown_sent_prm_abort), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_1_prm_abort), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_shutdown_ack_sent_prm_abort), \ } /* TYPE_SCTP_PRIMITIVE_ABORT */ #define TYPE_SCTP_PRIMITIVE_SEND { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_send), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ } /* TYPE_SCTP_PRIMITIVE_SEND */ #define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_requestheartbeat), \ } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ #define TYPE_SCTP_PRIMITIVE_ASCONF { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_error_closed), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_prm_asconf), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \ } /* TYPE_SCTP_PRIMITIVE_ASCONF */ /* The primary index for this table is the primitive type. * The secondary index for this table is the state. */ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_PRIMITIVE_ASSOCIATE, TYPE_SCTP_PRIMITIVE_SHUTDOWN, TYPE_SCTP_PRIMITIVE_ABORT, TYPE_SCTP_PRIMITIVE_SEND, TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT, TYPE_SCTP_PRIMITIVE_ASCONF, }; #define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_9_2_shutdown_ack), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ } #define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_cookie_wait_icmp_abort), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ } static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_OTHER_NO_PENDING_TSN, TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH, }; #define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ } #define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_bug), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_t1_cookie_timer_expire), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_t1_init_timer_expire), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_t2_timer_expire), \ } #define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_3_3_rtx), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_t4_timer_expire), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_sendbeat_8_3), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_do_6_2_sack), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } #define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ /* SCTP_STATE_CLOSED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_WAIT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_COOKIE_ECHOED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_ESTABLISHED */ \ TYPE_SCTP_FUNC(sctp_sf_autoclose_timer_expire), \ /* SCTP_STATE_SHUTDOWN_PENDING */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ } static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { TYPE_SCTP_EVENT_TIMEOUT_NONE, TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE, TYPE_SCTP_EVENT_TIMEOUT_T1_INIT, TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN, TYPE_SCTP_EVENT_TIMEOUT_T3_RTX, TYPE_SCTP_EVENT_TIMEOUT_T4_RTO, TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT, TYPE_SCTP_EVENT_TIMEOUT_SACK, TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, }; static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, sctp_state_t state) { if (state > SCTP_STATE_MAX) return &bug; if (cid <= SCTP_CID_BASE_MAX) return &chunk_event_table[cid][state]; if (sctp_prsctp_enable) { if (cid == SCTP_CID_FWD_TSN) return &prsctp_chunk_event_table[0][state]; } if (sctp_addip_enable) { if (cid == SCTP_CID_ASCONF) return &addip_chunk_event_table[0][state]; if (cid == SCTP_CID_ASCONF_ACK) return &addip_chunk_event_table[1][state]; } if (sctp_auth_enable) { if (cid == SCTP_CID_AUTH) return &auth_chunk_event_table[0][state]; } return &chunk_event_table_unknown[state]; }
gpl-2.0
G2Mini-DevTeam/android_kernel_lge_msm8226
arch/parisc/math-emu/fpudispatch.c
8741
39225
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/fp/fpudispatch.c $Revision: 1.1 $ * * Purpose: * <<please update with a synopsis of the functionality provided by this file>> * * External Interfaces: * <<the following list was autogenerated, please review>> * emfpudispatch(ir, dummy1, dummy2, fpregs) * fpudispatch(ir, excp_code, holder, fpregs) * * Internal Interfaces: * <<the following list was autogenerated, please review>> * static u_int decode_06(u_int, u_int *) * static u_int decode_0c(u_int, u_int, u_int, u_int *) * static u_int decode_0e(u_int, u_int, u_int, u_int *) * static u_int decode_26(u_int, u_int *) * static u_int decode_2e(u_int, u_int *) * static void update_status_cbit(u_int *, u_int, u_int, u_int) * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #define FPUDEBUG 0 #include "float.h" #include <linux/bug.h> #include <linux/kernel.h> #include <asm/processor.h> /* #include <sys/debug.h> */ /* #include <machine/sys/mdep_private.h> */ #define COPR_INST 0x30000000 /* * definition of extru macro. If pos and len are constants, the compiler * will generate an extru instruction when optimized */ #define extru(r,pos,len) (((r) >> (31-(pos))) & (( 1 << (len)) - 1)) /* definitions of bit field locations in the instruction */ #define fpmajorpos 5 #define fpr1pos 10 #define fpr2pos 15 #define fptpos 31 #define fpsubpos 18 #define fpclass1subpos 16 #define fpclasspos 22 #define fpfmtpos 20 #define fpdfpos 18 #define fpnulpos 26 /* * the following are the extra bits for the 0E major op */ #define fpxr1pos 24 #define fpxr2pos 19 #define fpxtpos 25 #define fpxpos 23 #define fp0efmtpos 20 /* * the following are for the multi-ops */ #define fprm1pos 10 #define fprm2pos 15 #define fptmpos 31 #define fprapos 25 #define fptapos 20 #define fpmultifmt 26 /* * the following are for the fused FP instructions */ /* fprm1pos 10 */ /* fprm2pos 15 */ #define fpraupos 18 #define fpxrm2pos 19 /* fpfmtpos 20 */ #define fpralpos 23 #define fpxrm1pos 24 /* fpxtpos 25 */ #define fpfusedsubop 26 /* fptpos 31 */ /* * offset to constant zero in the FP emulation registers */ #define fpzeroreg (32*sizeof(double)/sizeof(u_int)) /* * extract the major opcode from the instruction */ #define get_major(op) extru(op,fpmajorpos,6) /* * extract the two bit class field from the FP instruction. The class is at bit * positions 21-22 */ #define get_class(op) extru(op,fpclasspos,2) /* * extract the 3 bit subop field. For all but class 1 instructions, it is * located at bit positions 16-18 */ #define get_subop(op) extru(op,fpsubpos,3) /* * extract the 2 or 3 bit subop field from class 1 instructions. It is located * at bit positions 15-16 (PA1.1) or 14-16 (PA2.0) */ #define get_subop1_PA1_1(op) extru(op,fpclass1subpos,2) /* PA89 (1.1) fmt */ #define get_subop1_PA2_0(op) extru(op,fpclass1subpos,3) /* PA 2.0 fmt */ /* definitions of unimplemented exceptions */ #define MAJOR_0C_EXCP 0x09 #define MAJOR_0E_EXCP 0x0b #define MAJOR_06_EXCP 0x03 #define MAJOR_26_EXCP 0x23 #define MAJOR_2E_EXCP 0x2b #define PA83_UNIMP_EXCP 0x01 /* * Special Defines for TIMEX specific code */ #define FPU_TYPE_FLAG_POS (EM_FPU_TYPE_OFFSET>>2) #define TIMEX_ROLEX_FPU_MASK (TIMEX_EXTEN_FLAG|ROLEX_EXTEN_FLAG) /* * Static function definitions */ #define _PROTOTYPES #if defined(_PROTOTYPES) || defined(_lint) static u_int decode_0c(u_int, u_int, u_int, u_int *); static u_int decode_0e(u_int, u_int, u_int, u_int *); static u_int decode_06(u_int, u_int *); static u_int decode_26(u_int, u_int *); static u_int decode_2e(u_int, u_int *); static void update_status_cbit(u_int *, u_int, u_int, u_int); #else /* !_PROTOTYPES&&!_lint */ static u_int decode_0c(); static u_int decode_0e(); static u_int decode_06(); static u_int decode_26(); static u_int decode_2e(); static void update_status_cbit(); #endif /* _PROTOTYPES&&!_lint */ #define VASSERT(x) static void parisc_linux_get_fpu_type(u_int fpregs[]) { /* on pa-linux the fpu type is not filled in by the * caller; it is constructed here */ if (boot_cpu_data.cpu_type == pcxs) fpregs[FPU_TYPE_FLAG_POS] = TIMEX_EXTEN_FLAG; else if (boot_cpu_data.cpu_type == pcxt || boot_cpu_data.cpu_type == pcxt_) fpregs[FPU_TYPE_FLAG_POS] = ROLEX_EXTEN_FLAG; else if (boot_cpu_data.cpu_type >= pcxu) fpregs[FPU_TYPE_FLAG_POS] = PA2_0_FPU_FLAG; } /* * this routine will decode the excepting floating point instruction and * call the approiate emulation routine. * It is called by decode_fpu with the following parameters: * fpudispatch(current_ir, unimplemented_code, 0, &Fpu_register) * where current_ir is the instruction to be emulated, * unimplemented_code is the exception_code that the hardware generated * and &Fpu_register is the address of emulated FP reg 0. */ u_int fpudispatch(u_int ir, u_int excp_code, u_int holder, u_int fpregs[]) { u_int class, subop; u_int fpu_type_flags; /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); parisc_linux_get_fpu_type(fpregs); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ class = get_class(ir); if (class == 1) { if (fpu_type_flags & PA2_0_FPU_FLAG) subop = get_subop1_PA2_0(ir); else subop = get_subop1_PA1_1(ir); } else subop = get_subop(ir); if (FPUDEBUG) printk("class %d subop %d\n", class, subop); switch (excp_code) { case MAJOR_0C_EXCP: case PA83_UNIMP_EXCP: return(decode_0c(ir,class,subop,fpregs)); case MAJOR_0E_EXCP: return(decode_0e(ir,class,subop,fpregs)); case MAJOR_06_EXCP: return(decode_06(ir,fpregs)); case MAJOR_26_EXCP: return(decode_26(ir,fpregs)); case MAJOR_2E_EXCP: return(decode_2e(ir,fpregs)); default: /* "crashme Night Gallery painting nr 2. (asm_crash.s). * This was fixed for multi-user kernels, but * workstation kernels had a panic here. This allowed * any arbitrary user to panic the kernel by executing * setting the FP exception registers to strange values * and generating an emulation trap. The emulation and * exception code must never be able to panic the * kernel. */ return(UNIMPLEMENTEDEXCEPTION); } } /* * this routine is called by $emulation_trap to emulate a coprocessor * instruction if one doesn't exist */ u_int emfpudispatch(u_int ir, u_int dummy1, u_int dummy2, u_int fpregs[]) { u_int class, subop, major; u_int fpu_type_flags; /* All FP emulation code assumes that ints are 4-bytes in length */ VASSERT(sizeof(int) == 4); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ major = get_major(ir); class = get_class(ir); if (class == 1) { if (fpu_type_flags & PA2_0_FPU_FLAG) subop = get_subop1_PA2_0(ir); else subop = get_subop1_PA1_1(ir); } else subop = get_subop(ir); switch (major) { case 0x0C: return(decode_0c(ir,class,subop,fpregs)); case 0x0E: return(decode_0e(ir,class,subop,fpregs)); case 0x06: return(decode_06(ir,fpregs)); case 0x26: return(decode_26(ir,fpregs)); case 0x2E: return(decode_2e(ir,fpregs)); default: return(PA83_UNIMP_EXCP); } } static u_int decode_0c(u_int ir, u_int class, u_int subop, u_int fpregs[]) { u_int r1,r2,t; /* operand register offsets */ u_int fmt; /* also sf for class 1 conversions */ u_int df; /* for class 1 conversions */ u_int *status; u_int retval, local_status; u_int fpu_type_flags; if (ir == COPR_INST) { fpregs[0] = EMULATION_VERSION << 11; return(NOEXCEPTION); } status = &fpregs[0]; /* fp status register */ local_status = fpregs[0]; /* and local copy */ r1 = extru(ir,fpr1pos,5) * sizeof(double)/sizeof(u_int); if (r1 == 0) /* map fr0 source to constant zero */ r1 = fpzeroreg; t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int); if (t == 0 && class != 2) /* don't allow fr0 as a dest */ return(MAJOR_0C_EXCP); fmt = extru(ir,fpfmtpos,2); /* get fmt completer */ switch (class) { case 0: switch (subop) { case 0: /* COPR 0,0 emulated above*/ case 1: return(MAJOR_0C_EXCP); case 2: /* FCPY */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } case 3: /* FABS */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and clear sign bit */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } case 6: /* FNEG */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and invert sign bit */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } case 7: /* FNEGABS */ switch (fmt) { case 2: /* illegal */ return(MAJOR_0C_EXCP); case 3: /* quad */ t &= ~3; /* force to even reg #s */ r1 &= ~3; fpregs[t+3] = fpregs[r1+3]; fpregs[t+2] = fpregs[r1+2]; case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ /* copy and set sign bit */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } case 4: /* FSQRT */ switch (fmt) { case 0: return(sgl_fsqrt(&fpregs[r1],0, &fpregs[t],status)); case 1: return(dbl_fsqrt(&fpregs[r1],0, &fpregs[t],status)); case 2: case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 5: /* FRND */ switch (fmt) { case 0: return(sgl_frnd(&fpregs[r1],0, &fpregs[t],status)); case 1: return(dbl_frnd(&fpregs[r1],0, &fpregs[t],status)); case 2: case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } /* end of switch (subop) */ case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ if ((df & 2) || (fmt & 2)) { /* * fmt's 2 and 3 are illegal of not implemented * quad conversions */ return(MAJOR_0C_EXCP); } /* * encode source and dest formats into 2 bits. * high bit is source, low bit is dest. * bit = 1 --> double precision */ fmt = (fmt << 1) | df; switch (subop) { case 0: /* FCNVFF */ switch(fmt) { case 0: /* sgl/sgl */ return(MAJOR_0C_EXCP); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(MAJOR_0C_EXCP); } case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ case 2: /* class 2 */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; r2 = extru(ir, fpr2pos, 5) * sizeof(double)/sizeof(u_int); if (r2 == 0) r2 = fpzeroreg; if (fpu_type_flags & PA2_0_FPU_FLAG) { /* FTEST if nullify bit set, otherwise FCMP */ if (extru(ir, fpnulpos, 1)) { /* FTEST */ switch (fmt) { case 0: /* * arg0 is not used * second param is the t field used for * ftest,acc and ftest,rej * third param is the subop (y-field) */ BUG(); /* Unsupported * return(ftest(0L,extru(ir,fptpos,5), * &fpregs[0],subop)); */ case 1: case 2: case 3: return(MAJOR_0C_EXCP); } } else { /* FCMP */ switch (fmt) { case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } } /* end of if for PA2.0 */ else { /* PA1.0 & PA1.1 */ switch (subop) { case 2: case 3: case 4: case 5: case 6: case 7: return(MAJOR_0C_EXCP); case 0: /* FCMP */ switch (fmt) { case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 1: /* FTEST */ switch (fmt) { case 0: /* * arg0 is not used * second param is the t field used for * ftest,acc and ftest,rej * third param is the subop (y-field) */ BUG(); /* unsupported * return(ftest(0L,extru(ir,fptpos,5), * &fpregs[0],subop)); */ case 1: case 2: case 3: return(MAJOR_0C_EXCP); } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ case 3: /* class 3 */ r2 = extru(ir,fpr2pos,5) * sizeof(double)/sizeof(u_int); if (r2 == 0) r2 = fpzeroreg; switch (subop) { case 5: case 6: case 7: return(MAJOR_0C_EXCP); case 0: /* FADD */ switch (fmt) { case 0: return(sgl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 1: /* FSUB */ switch (fmt) { case 0: return(sgl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 2: /* FMPY */ switch (fmt) { case 0: return(sgl_fmpy(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fmpy(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 3: /* FDIV */ switch (fmt) { case 0: return(sgl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } case 4: /* FREM */ switch (fmt) { case 0: return(sgl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 2: /* illegal */ case 3: /* quad not implemented */ return(MAJOR_0C_EXCP); } } /* end of class 3 switch */ } /* end of switch(class) */ /* If we get here, something is really wrong! */ return(MAJOR_0C_EXCP); } static u_int decode_0e(ir,class,subop,fpregs) u_int ir,class,subop; u_int fpregs[]; { u_int r1,r2,t; /* operand register offsets */ u_int fmt; /* also sf for class 1 conversions */ u_int df; /* dest format for class 1 conversions */ u_int *status; u_int retval, local_status; u_int fpu_type_flags; status = &fpregs[0]; local_status = fpregs[0]; r1 = ((extru(ir,fpr1pos,5)<<1)|(extru(ir,fpxr1pos,1))); if (r1 == 0) r1 = fpzeroreg; t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1))); if (t == 0 && class != 2) return(MAJOR_0E_EXCP); if (class < 2) /* class 0 or 1 has 2 bit fmt */ fmt = extru(ir,fpfmtpos,2); else /* class 2 and 3 have 1 bit fmt */ fmt = extru(ir,fp0efmtpos,1); /* * An undefined combination, double precision accessing the * right half of a FPR, can get us into trouble. * Let's just force proper alignment on it. */ if (fmt == DBL) { r1 &= ~1; if (class != 1) t &= ~1; } switch (class) { case 0: switch (subop) { case 0: /* unimplemented */ case 1: return(MAJOR_0E_EXCP); case 2: /* FCPY */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1]; return(NOEXCEPTION); } case 3: /* FABS */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] & 0x7fffffff; return(NOEXCEPTION); } case 6: /* FNEG */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] ^ 0x80000000; return(NOEXCEPTION); } case 7: /* FNEGABS */ switch (fmt) { case 2: case 3: return(MAJOR_0E_EXCP); case 1: /* double */ fpregs[t+1] = fpregs[r1+1]; case 0: /* single */ fpregs[t] = fpregs[r1] | 0x80000000; return(NOEXCEPTION); } case 4: /* FSQRT */ switch (fmt) { case 0: return(sgl_fsqrt(&fpregs[r1],0, &fpregs[t], status)); case 1: return(dbl_fsqrt(&fpregs[r1],0, &fpregs[t], status)); case 2: case 3: return(MAJOR_0E_EXCP); } case 5: /* FRMD */ switch (fmt) { case 0: return(sgl_frnd(&fpregs[r1],0, &fpregs[t], status)); case 1: return(dbl_frnd(&fpregs[r1],0, &fpregs[t], status)); case 2: case 3: return(MAJOR_0E_EXCP); } } /* end of switch (subop */ case 1: /* class 1 */ df = extru(ir,fpdfpos,2); /* get dest format */ /* * Fix Crashme problem (writing to 31R in double precision) * here too. */ if (df == DBL) { t &= ~1; } if ((df & 2) || (fmt & 2)) return(MAJOR_0E_EXCP); fmt = (fmt << 1) | df; switch (subop) { case 0: /* FCNVFF */ switch(fmt) { case 0: /* sgl/sgl */ return(MAJOR_0E_EXCP); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvff(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(MAJOR_0E_EXCP); } case 1: /* FCNVXF */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvxf(&fpregs[r1],0, &fpregs[t],status)); } case 2: /* FCNVFX */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfx(&fpregs[r1],0, &fpregs[t],status)); } case 3: /* FCNVFXT */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfxt(&fpregs[r1],0, &fpregs[t],status)); } case 5: /* FCNVUF (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvuf(&fpregs[r1],0, &fpregs[t],status)); } case 6: /* FCNVFU (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfu(&fpregs[r1],0, &fpregs[t],status)); } case 7: /* FCNVFUT (PA2.0 only) */ switch(fmt) { case 0: /* sgl/sgl */ return(sgl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 1: /* sgl/dbl */ return(sgl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 2: /* dbl/sgl */ return(dbl_to_sgl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); case 3: /* dbl/dbl */ return(dbl_to_dbl_fcnvfut(&fpregs[r1],0, &fpregs[t],status)); } case 4: /* undefined */ return(MAJOR_0C_EXCP); } /* end of switch subop */ case 2: /* class 2 */ /* * Be careful out there. * Crashme can generate cases where FR31R is specified * as the source or target of a double precision operation. * Since we just pass the address of the floating-point * register to the emulation routines, this can cause * corruption of fpzeroreg. */ if (fmt == DBL) r2 = (extru(ir,fpr2pos,5)<<1); else r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1))); fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; if (r2 == 0) r2 = fpzeroreg; if (fpu_type_flags & PA2_0_FPU_FLAG) { /* FTEST if nullify bit set, otherwise FCMP */ if (extru(ir, fpnulpos, 1)) { /* FTEST */ /* not legal */ return(MAJOR_0E_EXCP); } else { /* FCMP */ switch (fmt) { /* * fmt is only 1 bit long */ case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); } } } /* end of if for PA2.0 */ else { /* PA1.0 & PA1.1 */ switch (subop) { case 1: case 2: case 3: case 4: case 5: case 6: case 7: return(MAJOR_0E_EXCP); case 0: /* FCMP */ switch (fmt) { /* * fmt is only 1 bit long */ case 0: retval = sgl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); case 1: retval = dbl_fcmp(&fpregs[r1], &fpregs[r2],extru(ir,fptpos,5), &local_status); update_status_cbit(status,local_status, fpu_type_flags, subop); return(retval); } } /* end of switch subop */ } /* end of else for PA1.0 & PA1.1 */ case 3: /* class 3 */ /* * Be careful out there. * Crashme can generate cases where FR31R is specified * as the source or target of a double precision operation. * Since we just pass the address of the floating-point * register to the emulation routines, this can cause * corruption of fpzeroreg. */ if (fmt == DBL) r2 = (extru(ir,fpr2pos,5)<<1); else r2 = ((extru(ir,fpr2pos,5)<<1)|(extru(ir,fpxr2pos,1))); if (r2 == 0) r2 = fpzeroreg; switch (subop) { case 5: case 6: case 7: return(MAJOR_0E_EXCP); /* * Note that fmt is only 1 bit for class 3 */ case 0: /* FADD */ switch (fmt) { case 0: return(sgl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fadd(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 1: /* FSUB */ switch (fmt) { case 0: return(sgl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fsub(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 2: /* FMPY or XMPYU */ /* * check for integer multiply (x bit set) */ if (extru(ir,fpxpos,1)) { /* * emulate XMPYU */ switch (fmt) { case 0: /* * bad instruction if t specifies * the right half of a register */ if (t & 1) return(MAJOR_0E_EXCP); BUG(); /* unsupported * impyu(&fpregs[r1],&fpregs[r2], * &fpregs[t]); */ return(NOEXCEPTION); case 1: return(MAJOR_0E_EXCP); } } else { /* FMPY */ switch (fmt) { case 0: return(sgl_fmpy(&fpregs[r1], &fpregs[r2],&fpregs[t],status)); case 1: return(dbl_fmpy(&fpregs[r1], &fpregs[r2],&fpregs[t],status)); } } case 3: /* FDIV */ switch (fmt) { case 0: return(sgl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_fdiv(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } case 4: /* FREM */ switch (fmt) { case 0: return(sgl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); case 1: return(dbl_frem(&fpregs[r1],&fpregs[r2], &fpregs[t],status)); } } /* end of class 3 switch */ } /* end of switch(class) */ /* If we get here, something is really wrong! */ return(MAJOR_0E_EXCP); } /* * routine to decode the 06 (FMPYADD and FMPYCFXT) instruction */ static u_int decode_06(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, tm, ra, ta; /* operands */ u_int fmt; u_int error = 0; u_int status; u_int fpu_type_flags; union { double dbl; float flt; struct { u_int i1; u_int i2; } ints; } mtmp, atmp; status = fpregs[0]; /* use a local copy of status reg */ fpu_type_flags=fpregs[FPU_TYPE_FLAG_POS]; /* get fpu type flags */ fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */ if (fmt == 0) { /* DBL */ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int); if (tm == 0) return(MAJOR_06_EXCP); ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int); ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int); if (ta == 0) return(MAJOR_06_EXCP); if (fpu_type_flags & TIMEX_ROLEX_FPU_MASK) { if (ra == 0) { /* special case FMPYCFXT, see sgl case below */ if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2], &mtmp.ints.i1,&status)) error = 1; if (dbl_to_sgl_fcnvfxt(&fpregs[ta], &atmp.ints.i1,&atmp.ints.i1,&status)) error = 1; } else { if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } } else { if (ra == 0) ra = fpzeroreg; if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (dbl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } if (error) return(MAJOR_06_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[tm+1] = mtmp.ints.i2; fpregs[ta] = atmp.ints.i1; fpregs[ta+1] = atmp.ints.i2; fpregs[0] = status; return(NOEXCEPTION); } } else { /* SGL */ /* * calculate offsets for single precision numbers * See table 6-14 in PA-89 architecture for mapping */ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */ tm |= extru(ir,fptmpos-4,1); /* add right word offset */ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */ ra |= extru(ir,fprapos-4,1); /* add right word offset */ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */ ta |= extru(ir,fptapos-4,1); /* add right word offset */ if (ra == 0x20 &&(fpu_type_flags & TIMEX_ROLEX_FPU_MASK)) { /* special case FMPYCFXT (really 0) * This instruction is only present on the Timex and * Rolex fpu's in so if it is the special case and * one of these fpu's we run the FMPYCFXT instruction */ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (sgl_to_sgl_fcnvfxt(&fpregs[ta],&atmp.ints.i1, &atmp.ints.i1,&status)) error = 1; } else { if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1, &status)) error = 1; if (sgl_fadd(&fpregs[ta], &fpregs[ra], &atmp.ints.i1, &status)) error = 1; } if (error) return(MAJOR_06_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[ta] = atmp.ints.i1; fpregs[0] = status; return(NOEXCEPTION); } } } /* * routine to decode the 26 (FMPYSUB) instruction */ static u_int decode_26(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, tm, ra, ta; /* operands */ u_int fmt; u_int error = 0; u_int status; union { double dbl; float flt; struct { u_int i1; u_int i2; } ints; } mtmp, atmp; status = fpregs[0]; fmt = extru(ir, fpmultifmt, 1); /* get sgl/dbl flag */ if (fmt == 0) { /* DBL */ rm1 = extru(ir, fprm1pos, 5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir, fprm2pos, 5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; tm = extru(ir, fptmpos, 5) * sizeof(double)/sizeof(u_int); if (tm == 0) return(MAJOR_26_EXCP); ra = extru(ir, fprapos, 5) * sizeof(double)/sizeof(u_int); if (ra == 0) return(MAJOR_26_EXCP); ta = extru(ir, fptapos, 5) * sizeof(double)/sizeof(u_int); if (ta == 0) return(MAJOR_26_EXCP); if (dbl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status)) error = 1; if (dbl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status)) error = 1; if (error) return(MAJOR_26_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[tm+1] = mtmp.ints.i2; fpregs[ta] = atmp.ints.i1; fpregs[ta+1] = atmp.ints.i2; fpregs[0] = status; return(NOEXCEPTION); } } else { /* SGL */ /* * calculate offsets for single precision numbers * See table 6-14 in PA-89 architecture for mapping */ rm1 = (extru(ir,fprm1pos,4) | 0x10 ) << 1; /* get offset */ rm1 |= extru(ir,fprm1pos-4,1); /* add right word offset */ rm2 = (extru(ir,fprm2pos,4) | 0x10 ) << 1; /* get offset */ rm2 |= extru(ir,fprm2pos-4,1); /* add right word offset */ tm = (extru(ir,fptmpos,4) | 0x10 ) << 1; /* get offset */ tm |= extru(ir,fptmpos-4,1); /* add right word offset */ ra = (extru(ir,fprapos,4) | 0x10 ) << 1; /* get offset */ ra |= extru(ir,fprapos-4,1); /* add right word offset */ ta = (extru(ir,fptapos,4) | 0x10 ) << 1; /* get offset */ ta |= extru(ir,fptapos-4,1); /* add right word offset */ if (sgl_fmpy(&fpregs[rm1],&fpregs[rm2],&mtmp.ints.i1,&status)) error = 1; if (sgl_fsub(&fpregs[ta], &fpregs[ra], &atmp.ints.i1,&status)) error = 1; if (error) return(MAJOR_26_EXCP); else { /* copy results */ fpregs[tm] = mtmp.ints.i1; fpregs[ta] = atmp.ints.i1; fpregs[0] = status; return(NOEXCEPTION); } } } /* * routine to decode the 2E (FMPYFADD,FMPYNFADD) instructions */ static u_int decode_2e(ir,fpregs) u_int ir; u_int fpregs[]; { u_int rm1, rm2, ra, t; /* operands */ u_int fmt; fmt = extru(ir,fpfmtpos,1); /* get fmt completer */ if (fmt == DBL) { /* DBL */ rm1 = extru(ir,fprm1pos,5) * sizeof(double)/sizeof(u_int); if (rm1 == 0) rm1 = fpzeroreg; rm2 = extru(ir,fprm2pos,5) * sizeof(double)/sizeof(u_int); if (rm2 == 0) rm2 = fpzeroreg; ra = ((extru(ir,fpraupos,3)<<2)|(extru(ir,fpralpos,3)>>1)) * sizeof(double)/sizeof(u_int); if (ra == 0) ra = fpzeroreg; t = extru(ir,fptpos,5) * sizeof(double)/sizeof(u_int); if (t == 0) return(MAJOR_2E_EXCP); if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */ return(dbl_fmpynfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } else { return(dbl_fmpyfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } } /* end DBL */ else { /* SGL */ rm1 = (extru(ir,fprm1pos,5)<<1)|(extru(ir,fpxrm1pos,1)); if (rm1 == 0) rm1 = fpzeroreg; rm2 = (extru(ir,fprm2pos,5)<<1)|(extru(ir,fpxrm2pos,1)); if (rm2 == 0) rm2 = fpzeroreg; ra = (extru(ir,fpraupos,3)<<3)|extru(ir,fpralpos,3); if (ra == 0) ra = fpzeroreg; t = ((extru(ir,fptpos,5)<<1)|(extru(ir,fpxtpos,1))); if (t == 0) return(MAJOR_2E_EXCP); if (extru(ir,fpfusedsubop,1)) { /* fmpyfadd or fmpynfadd? */ return(sgl_fmpynfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } else { return(sgl_fmpyfadd(&fpregs[rm1], &fpregs[rm2], &fpregs[ra], &fpregs[0], &fpregs[t])); } } /* end SGL */ } /* * update_status_cbit * * This routine returns the correct FP status register value in * *status, based on the C-bit & V-bit returned by the FCMP * emulation routine in new_status. The architecture type * (PA83, PA89 or PA2.0) is available in fpu_type. The y_field * and the architecture type are used to determine what flavor * of FCMP is being emulated. */ static void update_status_cbit(status, new_status, fpu_type, y_field) u_int *status, new_status; u_int fpu_type; u_int y_field; { /* * For PA89 FPU's which implement the Compare Queue and * for PA2.0 FPU's, update the Compare Queue if the y-field = 0, * otherwise update the specified bit in the Compare Array. * Note that the y-field will always be 0 for non-PA2.0 FPU's. */ if ((fpu_type & TIMEX_EXTEN_FLAG) || (fpu_type & ROLEX_EXTEN_FLAG) || (fpu_type & PA2_0_FPU_FLAG)) { if (y_field == 0) { *status = ((*status & 0x04000000) >> 5) | /* old Cbit */ ((*status & 0x003ff000) >> 1) | /* old CQ */ (new_status & 0xffc007ff); /* all other bits*/ } else { *status = (*status & 0x04000000) | /* old Cbit */ ((new_status & 0x04000000) >> (y_field+4)) | (new_status & ~0x04000000 & /* other bits */ ~(0x04000000 >> (y_field+4))); } } /* if PA83, just update the C-bit */ else { *status = new_status; } }
gpl-2.0
zhuguihua/linux
net/ipv4/tcp_offload.c
38
7869
/* * IPV4 GSO/GRO offload support * Linux INET implementation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * TCPv4 GSO/GRO support */ #include <linux/skbuff.h> #include <net/tcp.h> #include <net/protocol.h> static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, unsigned int seq, unsigned int mss) { while (skb) { if (before(ts_seq, seq + mss)) { skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; skb_shinfo(skb)->tskey = ts_seq; return; } skb = skb->next; seq += mss; } } static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, netdev_features_t features) { if (!pskb_may_pull(skb, sizeof(struct tcphdr))) return ERR_PTR(-EINVAL); if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = tcp_hdr(skb); /* Set up checksum pseudo header, usually expect stack to * have done this already. */ th->check = 0; skb->ip_summed = CHECKSUM_PARTIAL; __tcp_v4_send_check(skb, iph->saddr, iph->daddr); } return tcp_gso_segment(skb, features); } struct sk_buff *tcp_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int sum_truesize = 0; struct tcphdr *th; unsigned int thlen; unsigned int seq; __be32 delta; unsigned int oldlen; unsigned int mss; struct sk_buff *gso_skb = skb; __sum16 newcheck; bool ooo_okay, copy_destructor; th = tcp_hdr(skb); thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; if (!pskb_may_pull(skb, thlen)) goto out; oldlen = (u16)~skb->len; __skb_pull(skb, thlen); mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ int type = skb_shinfo(skb)->gso_type; if (unlikely(type & ~(SKB_GSO_TCPV4 | SKB_GSO_DODGY | SKB_GSO_TCP_ECN | SKB_GSO_TCPV6 | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | SKB_GSO_TUNNEL_REMCSUM | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) goto out; skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); segs = NULL; goto out; } copy_destructor = gso_skb->destructor == tcp_wfree; ooo_okay = gso_skb->ooo_okay; /* All segments but the first should have ooo_okay cleared */ skb->ooo_okay = 0; segs = skb_segment(skb, features); if (IS_ERR(segs)) goto out; /* Only first segment might have ooo_okay set */ segs->ooo_okay = ooo_okay; delta = htonl(oldlen + (thlen + mss)); skb = segs; th = tcp_hdr(skb); seq = ntohl(th->seq); if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); do { th->fin = th->psh = 0; th->check = newcheck; if (skb->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(skb, ~th->check); else th->check = gso_make_checksum(skb, ~th->check); seq += mss; if (copy_destructor) { skb->destructor = gso_skb->destructor; skb->sk = gso_skb->sk; sum_truesize += skb->truesize; } skb = skb->next; th = tcp_hdr(skb); th->seq = htonl(seq); th->cwr = 0; } while (skb->next); /* Following permits TCP Small Queues to work well with GSO : * The callback to TCP stack will be called at the time last frag * is freed at TX completion, and not right now when gso_skb * is freed by GSO engine */ if (copy_destructor) { swap(gso_skb->sk, skb->sk); swap(gso_skb->destructor, skb->destructor); sum_truesize += skb->truesize; atomic_add(sum_truesize - gso_skb->truesize, &skb->sk->sk_wmem_alloc); } delta = htonl(oldlen + (skb_tail_pointer(skb) - skb_transport_header(skb)) + skb->data_len); th->check = ~csum_fold((__force __wsum)((__force u32)th->check + (__force u32)delta)); if (skb->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(skb, ~th->check); else th->check = gso_make_checksum(skb, ~th->check); out: return segs; } struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) { struct sk_buff **pp = NULL; struct sk_buff *p; struct tcphdr *th; struct tcphdr *th2; unsigned int len; unsigned int thlen; __be32 flags; unsigned int mss = 1; unsigned int hlen; unsigned int off; int flush = 1; int i; off = skb_gro_offset(skb); hlen = off + sizeof(*th); th = skb_gro_header_fast(skb, off); if (skb_gro_header_hard(skb, hlen)) { th = skb_gro_header_slow(skb, hlen, off); if (unlikely(!th)) goto out; } thlen = th->doff * 4; if (thlen < sizeof(*th)) goto out; hlen = off + thlen; if (skb_gro_header_hard(skb, hlen)) { th = skb_gro_header_slow(skb, hlen, off); if (unlikely(!th)) goto out; } skb_gro_pull(skb, thlen); len = skb_gro_len(skb); flags = tcp_flag_word(th); for (; (p = *head); head = &p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; th2 = tcp_hdr(p); if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { NAPI_GRO_CB(p)->same_flow = 0; continue; } goto found; } goto out_check_final; found: /* Include the IP ID check below from the inner most IP hdr */ flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id; flush |= (__force int)(flags & TCP_FLAG_CWR); flush |= (__force int)((flags ^ tcp_flag_word(th2)) & ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); flush |= (__force int)(th->ack_seq ^ th2->ack_seq); for (i = sizeof(*th); i < thlen; i += 4) flush |= *(u32 *)((u8 *)th + i) ^ *(u32 *)((u8 *)th2 + i); mss = skb_shinfo(p)->gso_size; flush |= (len - 1) >= mss; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); if (flush || skb_gro_receive(head, skb)) { mss = 1; goto out_check_final; } p = *head; th2 = tcp_hdr(p); tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); out_check_final: flush = len < mss; flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | TCP_FLAG_SYN | TCP_FLAG_FIN)); if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) pp = head; out: NAPI_GRO_CB(skb)->flush |= (flush != 0); return pp; } int tcp_gro_complete(struct sk_buff *skb) { struct tcphdr *th = tcp_hdr(skb); skb->csum_start = (unsigned char *)th - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; if (th->cwr) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; return 0; } EXPORT_SYMBOL(tcp_gro_complete); static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) { /* Don't bother verifying checksum if we're going to flush anyway. */ if (!NAPI_GRO_CB(skb)->flush && skb_gro_checksum_validate(skb, IPPROTO_TCP, inet_gro_compute_pseudo)) { NAPI_GRO_CB(skb)->flush = 1; return NULL; } return tcp_gro_receive(head, skb); } static int tcp4_gro_complete(struct sk_buff *skb, int thoff) { const struct iphdr *iph = ip_hdr(skb); struct tcphdr *th = tcp_hdr(skb); th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, iph->daddr, 0); skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; return tcp_gro_complete(skb); } static const struct net_offload tcpv4_offload = { .callbacks = { .gso_segment = tcp4_gso_segment, .gro_receive = tcp4_gro_receive, .gro_complete = tcp4_gro_complete, }, }; int __init tcpv4_offload_init(void) { return inet_add_offload(&tcpv4_offload, IPPROTO_TCP); }
gpl-2.0
uniphier/linux-unph
drivers/media/platform/mx2_emmaprp.c
38
25789
/* * Support eMMa-PrP through mem2mem framework. * * eMMa-PrP is a piece of HW that allows fetching buffers * from one memory location and do several operations on * them such as scaling or format conversion giving, as a result * a new processed buffer in another memory location. * * Based on mem2mem_testdev.c by Pawel Osciak. * * Copyright (c) 2011 Vista Silicon S.L. * Javier Martin <javier.martin@vista-silicon.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the * License, or (at your option) any later version */ #include <linux/module.h> #include <linux/clk.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <media/v4l2-mem2mem.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/videobuf2-dma-contig.h> #include <linux/sizes.h> #define EMMAPRP_MODULE_NAME "mem2mem-emmaprp" MODULE_DESCRIPTION("Mem-to-mem device which supports eMMa-PrP present in mx2 SoCs"); MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.0.1"); static bool debug; module_param(debug, bool, 0644); #define MIN_W 32 #define MIN_H 32 #define MAX_W 2040 #define MAX_H 2046 #define S_ALIGN 1 /* multiple of 2 */ #define W_ALIGN_YUV420 3 /* multiple of 8 */ #define W_ALIGN_OTHERS 2 /* multiple of 4 */ #define H_ALIGN 1 /* multiple of 2 */ /* Flags that indicate a format can be used for capture/output */ #define MEM2MEM_CAPTURE (1 << 0) #define MEM2MEM_OUTPUT (1 << 1) #define MEM2MEM_NAME "m2m-emmaprp" /* In bytes, per queue */ #define MEM2MEM_VID_MEM_LIMIT SZ_16M #define dprintk(dev, fmt, arg...) \ v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) /* EMMA PrP */ #define PRP_CNTL 0x00 #define PRP_INTR_CNTL 0x04 #define PRP_INTRSTATUS 0x08 #define PRP_SOURCE_Y_PTR 0x0c #define PRP_SOURCE_CB_PTR 0x10 #define PRP_SOURCE_CR_PTR 0x14 #define PRP_DEST_RGB1_PTR 0x18 #define PRP_DEST_RGB2_PTR 0x1c #define PRP_DEST_Y_PTR 0x20 #define PRP_DEST_CB_PTR 0x24 #define PRP_DEST_CR_PTR 0x28 #define PRP_SRC_FRAME_SIZE 0x2c #define PRP_DEST_CH1_LINE_STRIDE 0x30 #define PRP_SRC_PIXEL_FORMAT_CNTL 0x34 #define PRP_CH1_PIXEL_FORMAT_CNTL 0x38 #define PRP_CH1_OUT_IMAGE_SIZE 0x3c #define PRP_CH2_OUT_IMAGE_SIZE 0x40 #define PRP_SRC_LINE_STRIDE 0x44 #define PRP_CSC_COEF_012 0x48 #define PRP_CSC_COEF_345 0x4c #define PRP_CSC_COEF_678 0x50 #define PRP_CH1_RZ_HORI_COEF1 0x54 #define PRP_CH1_RZ_HORI_COEF2 0x58 #define PRP_CH1_RZ_HORI_VALID 0x5c #define PRP_CH1_RZ_VERT_COEF1 0x60 #define PRP_CH1_RZ_VERT_COEF2 0x64 #define PRP_CH1_RZ_VERT_VALID 0x68 #define PRP_CH2_RZ_HORI_COEF1 0x6c #define PRP_CH2_RZ_HORI_COEF2 0x70 #define PRP_CH2_RZ_HORI_VALID 0x74 #define PRP_CH2_RZ_VERT_COEF1 0x78 #define PRP_CH2_RZ_VERT_COEF2 0x7c #define PRP_CH2_RZ_VERT_VALID 0x80 #define PRP_CNTL_CH1EN (1 << 0) #define PRP_CNTL_CH2EN (1 << 1) #define PRP_CNTL_CSIEN (1 << 2) #define PRP_CNTL_DATA_IN_YUV420 (0 << 3) #define PRP_CNTL_DATA_IN_YUV422 (1 << 3) #define PRP_CNTL_DATA_IN_RGB16 (2 << 3) #define PRP_CNTL_DATA_IN_RGB32 (3 << 3) #define PRP_CNTL_CH1_OUT_RGB8 (0 << 5) #define PRP_CNTL_CH1_OUT_RGB16 (1 << 5) #define PRP_CNTL_CH1_OUT_RGB32 (2 << 5) #define PRP_CNTL_CH1_OUT_YUV422 (3 << 5) #define PRP_CNTL_CH2_OUT_YUV420 (0 << 7) #define PRP_CNTL_CH2_OUT_YUV422 (1 << 7) #define PRP_CNTL_CH2_OUT_YUV444 (2 << 7) #define PRP_CNTL_CH1_LEN (1 << 9) #define PRP_CNTL_CH2_LEN (1 << 10) #define PRP_CNTL_SKIP_FRAME (1 << 11) #define PRP_CNTL_SWRST (1 << 12) #define PRP_CNTL_CLKEN (1 << 13) #define PRP_CNTL_WEN (1 << 14) #define PRP_CNTL_CH1BYP (1 << 15) #define PRP_CNTL_IN_TSKIP(x) ((x) << 16) #define PRP_CNTL_CH1_TSKIP(x) ((x) << 19) #define PRP_CNTL_CH2_TSKIP(x) ((x) << 22) #define PRP_CNTL_INPUT_FIFO_LEVEL(x) ((x) << 25) #define PRP_CNTL_RZ_FIFO_LEVEL(x) ((x) << 27) #define PRP_CNTL_CH2B1EN (1 << 29) #define PRP_CNTL_CH2B2EN (1 << 30) #define PRP_CNTL_CH2FEN (1 << 31) #define PRP_SIZE_HEIGHT(x) (x) #define PRP_SIZE_WIDTH(x) ((x) << 16) /* IRQ Enable and status register */ #define PRP_INTR_RDERR (1 << 0) #define PRP_INTR_CH1WERR (1 << 1) #define PRP_INTR_CH2WERR (1 << 2) #define PRP_INTR_CH1FC (1 << 3) #define PRP_INTR_CH2FC (1 << 5) #define PRP_INTR_LBOVF (1 << 7) #define PRP_INTR_CH2OVF (1 << 8) #define PRP_INTR_ST_RDERR (1 << 0) #define PRP_INTR_ST_CH1WERR (1 << 1) #define PRP_INTR_ST_CH2WERR (1 << 2) #define PRP_INTR_ST_CH2B2CI (1 << 3) #define PRP_INTR_ST_CH2B1CI (1 << 4) #define PRP_INTR_ST_CH1B2CI (1 << 5) #define PRP_INTR_ST_CH1B1CI (1 << 6) #define PRP_INTR_ST_LBOVF (1 << 7) #define PRP_INTR_ST_CH2OVF (1 << 8) struct emmaprp_fmt { char *name; u32 fourcc; /* Types the format can be used for */ u32 types; }; static struct emmaprp_fmt formats[] = { { .name = "YUV 4:2:0 Planar", .fourcc = V4L2_PIX_FMT_YUV420, .types = MEM2MEM_CAPTURE, }, { .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .types = MEM2MEM_OUTPUT, }, }; /* Per-queue, driver-specific private data */ struct emmaprp_q_data { unsigned int width; unsigned int height; unsigned int sizeimage; struct emmaprp_fmt *fmt; }; enum { V4L2_M2M_SRC = 0, V4L2_M2M_DST = 1, }; #define NUM_FORMATS ARRAY_SIZE(formats) static struct emmaprp_fmt *find_format(struct v4l2_format *f) { struct emmaprp_fmt *fmt; unsigned int k; for (k = 0; k < NUM_FORMATS; k++) { fmt = &formats[k]; if (fmt->fourcc == f->fmt.pix.pixelformat) break; } if (k == NUM_FORMATS) return NULL; return &formats[k]; } struct emmaprp_dev { struct v4l2_device v4l2_dev; struct video_device *vfd; struct mutex dev_mutex; spinlock_t irqlock; void __iomem *base_emma; struct clk *clk_emma_ahb, *clk_emma_ipg; struct v4l2_m2m_dev *m2m_dev; }; struct emmaprp_ctx { struct emmaprp_dev *dev; /* Abort requested by m2m */ int aborting; struct emmaprp_q_data q_data[2]; struct v4l2_m2m_ctx *m2m_ctx; }; static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx, enum v4l2_buf_type type) { switch (type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT: return &(ctx->q_data[V4L2_M2M_SRC]); case V4L2_BUF_TYPE_VIDEO_CAPTURE: return &(ctx->q_data[V4L2_M2M_DST]); default: BUG(); } return NULL; } /* * mem2mem callbacks */ static void emmaprp_job_abort(void *priv) { struct emmaprp_ctx *ctx = priv; struct emmaprp_dev *pcdev = ctx->dev; ctx->aborting = 1; dprintk(pcdev, "Aborting task\n"); v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx); } static inline void emmaprp_dump_regs(struct emmaprp_dev *pcdev) { dprintk(pcdev, "eMMa-PrP Registers:\n" " SOURCE_Y_PTR = 0x%08X\n" " SRC_FRAME_SIZE = 0x%08X\n" " DEST_Y_PTR = 0x%08X\n" " DEST_CR_PTR = 0x%08X\n" " DEST_CB_PTR = 0x%08X\n" " CH2_OUT_IMAGE_SIZE = 0x%08X\n" " CNTL = 0x%08X\n", readl(pcdev->base_emma + PRP_SOURCE_Y_PTR), readl(pcdev->base_emma + PRP_SRC_FRAME_SIZE), readl(pcdev->base_emma + PRP_DEST_Y_PTR), readl(pcdev->base_emma + PRP_DEST_CR_PTR), readl(pcdev->base_emma + PRP_DEST_CB_PTR), readl(pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE), readl(pcdev->base_emma + PRP_CNTL)); } static void emmaprp_device_run(void *priv) { struct emmaprp_ctx *ctx = priv; struct emmaprp_q_data *s_q_data, *d_q_data; struct vb2_buffer *src_buf, *dst_buf; struct emmaprp_dev *pcdev = ctx->dev; unsigned int s_width, s_height; unsigned int d_width, d_height; unsigned int d_size; dma_addr_t p_in, p_out; u32 tmp; src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); s_width = s_q_data->width; s_height = s_q_data->height; d_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); d_width = d_q_data->width; d_height = d_q_data->height; d_size = d_width * d_height; p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0); p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0); if (!p_in || !p_out) { v4l2_err(&pcdev->v4l2_dev, "Acquiring kernel pointers to buffers failed\n"); return; } /* Input frame parameters */ writel(p_in, pcdev->base_emma + PRP_SOURCE_Y_PTR); writel(PRP_SIZE_WIDTH(s_width) | PRP_SIZE_HEIGHT(s_height), pcdev->base_emma + PRP_SRC_FRAME_SIZE); /* Output frame parameters */ writel(p_out, pcdev->base_emma + PRP_DEST_Y_PTR); writel(p_out + d_size, pcdev->base_emma + PRP_DEST_CB_PTR); writel(p_out + d_size + (d_size >> 2), pcdev->base_emma + PRP_DEST_CR_PTR); writel(PRP_SIZE_WIDTH(d_width) | PRP_SIZE_HEIGHT(d_height), pcdev->base_emma + PRP_CH2_OUT_IMAGE_SIZE); /* IRQ configuration */ tmp = readl(pcdev->base_emma + PRP_INTR_CNTL); writel(tmp | PRP_INTR_RDERR | PRP_INTR_CH2WERR | PRP_INTR_CH2FC, pcdev->base_emma + PRP_INTR_CNTL); emmaprp_dump_regs(pcdev); /* Enable transfer */ tmp = readl(pcdev->base_emma + PRP_CNTL); writel(tmp | PRP_CNTL_CH2_OUT_YUV420 | PRP_CNTL_DATA_IN_YUV422 | PRP_CNTL_CH2EN, pcdev->base_emma + PRP_CNTL); } static irqreturn_t emmaprp_irq(int irq_emma, void *data) { struct emmaprp_dev *pcdev = data; struct emmaprp_ctx *curr_ctx; struct vb2_v4l2_buffer *src_vb, *dst_vb; unsigned long flags; u32 irqst; /* Check irq flags and clear irq */ irqst = readl(pcdev->base_emma + PRP_INTRSTATUS); writel(irqst, pcdev->base_emma + PRP_INTRSTATUS); dprintk(pcdev, "irqst = 0x%08x\n", irqst); curr_ctx = v4l2_m2m_get_curr_priv(pcdev->m2m_dev); if (curr_ctx == NULL) { pr_err("Instance released before the end of transaction\n"); return IRQ_HANDLED; } if (!curr_ctx->aborting) { if ((irqst & PRP_INTR_ST_RDERR) || (irqst & PRP_INTR_ST_CH2WERR)) { pr_err("PrP bus error occurred, this transfer is probably corrupted\n"); writel(PRP_CNTL_SWRST, pcdev->base_emma + PRP_CNTL); } else if (irqst & PRP_INTR_ST_CH2B1CI) { /* buffer ready */ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx); dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx); dst_vb->vb2_buf.timestamp = src_vb->vb2_buf.timestamp; dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->flags |= src_vb->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; dst_vb->timecode = src_vb->timecode; spin_lock_irqsave(&pcdev->irqlock, flags); v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE); v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE); spin_unlock_irqrestore(&pcdev->irqlock, flags); } } v4l2_m2m_job_finish(pcdev->m2m_dev, curr_ctx->m2m_ctx); return IRQ_HANDLED; } /* * video ioctls */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int enum_fmt(struct v4l2_fmtdesc *f, u32 type) { int i, num; struct emmaprp_fmt *fmt; num = 0; for (i = 0; i < NUM_FORMATS; ++i) { if (formats[i].types & type) { /* index-th format of type type found ? */ if (num == f->index) break; /* Correct type but haven't reached our index yet, * just increment per-type index */ ++num; } } if (i < NUM_FORMATS) { /* Format found */ fmt = &formats[i]; strscpy(f->description, fmt->name, sizeof(f->description) - 1); f->pixelformat = fmt->fourcc; return 0; } /* Format not found */ return -EINVAL; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_CAPTURE); } static int vidioc_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *f) { return enum_fmt(f, MEM2MEM_OUTPUT); } static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f) { struct vb2_queue *vq; struct emmaprp_q_data *q_data; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ctx, f->type); f->fmt.pix.width = q_data->width; f->fmt.pix.height = q_data->height; f->fmt.pix.field = V4L2_FIELD_NONE; f->fmt.pix.pixelformat = q_data->fmt->fourcc; if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) f->fmt.pix.bytesperline = q_data->width * 3 / 2; else /* YUYV */ f->fmt.pix.bytesperline = q_data->width * 2; f->fmt.pix.sizeimage = q_data->sizeimage; return 0; } static int vidioc_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(priv, f); } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { return vidioc_g_fmt(priv, f); } static int vidioc_try_fmt(struct v4l2_format *f) { enum v4l2_field field; if (!find_format(f)) return -EINVAL; field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) field = V4L2_FIELD_NONE; else if (V4L2_FIELD_NONE != field) return -EINVAL; /* V4L2 specification suggests the driver corrects the format struct * if any of the dimensions is unsupported */ f->fmt.pix.field = field; if (f->fmt.pix.pixelformat == V4L2_PIX_FMT_YUV420) { v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W, W_ALIGN_YUV420, &f->fmt.pix.height, MIN_H, MAX_H, H_ALIGN, S_ALIGN); f->fmt.pix.bytesperline = f->fmt.pix.width * 3 / 2; } else { v4l_bound_align_image(&f->fmt.pix.width, MIN_W, MAX_W, W_ALIGN_OTHERS, &f->fmt.pix.height, MIN_H, MAX_H, H_ALIGN, S_ALIGN); f->fmt.pix.bytesperline = f->fmt.pix.width * 2; } f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct emmaprp_fmt *fmt; struct emmaprp_ctx *ctx = priv; fmt = find_format(f); if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f); } static int vidioc_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { struct emmaprp_fmt *fmt; struct emmaprp_ctx *ctx = priv; fmt = find_format(f); if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) { v4l2_err(&ctx->dev->v4l2_dev, "Fourcc format (0x%08x) invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } return vidioc_try_fmt(f); } static int vidioc_s_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f) { struct emmaprp_q_data *q_data; struct vb2_queue *vq; int ret; vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); if (!vq) return -EINVAL; q_data = get_q_data(ctx, f->type); if (!q_data) return -EINVAL; if (vb2_is_busy(vq)) { v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); return -EBUSY; } ret = vidioc_try_fmt(f); if (ret) return ret; q_data->fmt = find_format(f); q_data->width = f->fmt.pix.width; q_data->height = f->fmt.pix.height; if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420) q_data->sizeimage = q_data->width * q_data->height * 3 / 2; else /* YUYV */ q_data->sizeimage = q_data->width * q_data->height * 2; dprintk(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d\n", f->type, q_data->width, q_data->height, q_data->fmt->fourcc); return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_cap(file, priv, f); if (ret) return ret; return vidioc_s_fmt(priv, f); } static int vidioc_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *f) { int ret; ret = vidioc_try_fmt_vid_out(file, priv, f); if (ret) return ret; return vidioc_s_fmt(priv, f); } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct emmaprp_ctx *ctx = priv; return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct emmaprp_ctx *ctx = priv; return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct emmaprp_ctx *ctx = priv; return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct emmaprp_ctx *ctx = priv; return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct emmaprp_ctx *ctx = priv; return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct emmaprp_ctx *ctx = priv; return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); } static const struct v4l2_ioctl_ops emmaprp_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out, .vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out, .vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, }; /* * Queue operations */ static int emmaprp_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], struct device *alloc_devs[]) { struct emmaprp_ctx *ctx = vb2_get_drv_priv(vq); struct emmaprp_q_data *q_data; unsigned int size, count = *nbuffers; q_data = get_q_data(ctx, vq->type); if (q_data->fmt->fourcc == V4L2_PIX_FMT_YUV420) size = q_data->width * q_data->height * 3 / 2; else size = q_data->width * q_data->height * 2; while (size * count > MEM2MEM_VID_MEM_LIMIT) (count)--; *nplanes = 1; *nbuffers = count; sizes[0] = size; dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); return 0; } static int emmaprp_buf_prepare(struct vb2_buffer *vb) { struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct emmaprp_q_data *q_data; dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); q_data = get_q_data(ctx, vb->vb2_queue->type); if (vb2_plane_size(vb, 0) < q_data->sizeimage) { dprintk(ctx->dev, "%s data will not fit into plane(%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage); return -EINVAL; } vb2_set_plane_payload(vb, 0, q_data->sizeimage); return 0; } static void emmaprp_buf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf); } static const struct vb2_ops emmaprp_qops = { .queue_setup = emmaprp_queue_setup, .buf_prepare = emmaprp_buf_prepare, .buf_queue = emmaprp_buf_queue, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, }; static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) { struct emmaprp_ctx *ctx = priv; int ret; src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; src_vq->io_modes = VB2_MMAP | VB2_USERPTR; src_vq->drv_priv = ctx; src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); src_vq->ops = &emmaprp_qops; src_vq->mem_ops = &vb2_dma_contig_memops; src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; src_vq->dev = ctx->dev->v4l2_dev.dev; src_vq->lock = &ctx->dev->dev_mutex; ret = vb2_queue_init(src_vq); if (ret) return ret; dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dst_vq->io_modes = VB2_MMAP | VB2_USERPTR; dst_vq->drv_priv = ctx; dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer); dst_vq->ops = &emmaprp_qops; dst_vq->mem_ops = &vb2_dma_contig_memops; dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; dst_vq->dev = ctx->dev->v4l2_dev.dev; dst_vq->lock = &ctx->dev->dev_mutex; return vb2_queue_init(dst_vq); } /* * File operations */ static int emmaprp_open(struct file *file) { struct emmaprp_dev *pcdev = video_drvdata(file); struct emmaprp_ctx *ctx; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; file->private_data = ctx; ctx->dev = pcdev; if (mutex_lock_interruptible(&pcdev->dev_mutex)) { kfree(ctx); return -ERESTARTSYS; } ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init); if (IS_ERR(ctx->m2m_ctx)) { int ret = PTR_ERR(ctx->m2m_ctx); mutex_unlock(&pcdev->dev_mutex); kfree(ctx); return ret; } clk_prepare_enable(pcdev->clk_emma_ipg); clk_prepare_enable(pcdev->clk_emma_ahb); ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1]; ctx->q_data[V4L2_M2M_DST].fmt = &formats[0]; mutex_unlock(&pcdev->dev_mutex); dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx); return 0; } static int emmaprp_release(struct file *file) { struct emmaprp_dev *pcdev = video_drvdata(file); struct emmaprp_ctx *ctx = file->private_data; dprintk(pcdev, "Releasing instance %p\n", ctx); mutex_lock(&pcdev->dev_mutex); clk_disable_unprepare(pcdev->clk_emma_ahb); clk_disable_unprepare(pcdev->clk_emma_ipg); v4l2_m2m_ctx_release(ctx->m2m_ctx); mutex_unlock(&pcdev->dev_mutex); kfree(ctx); return 0; } static __poll_t emmaprp_poll(struct file *file, struct poll_table_struct *wait) { struct emmaprp_dev *pcdev = video_drvdata(file); struct emmaprp_ctx *ctx = file->private_data; __poll_t res; mutex_lock(&pcdev->dev_mutex); res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); mutex_unlock(&pcdev->dev_mutex); return res; } static int emmaprp_mmap(struct file *file, struct vm_area_struct *vma) { struct emmaprp_dev *pcdev = video_drvdata(file); struct emmaprp_ctx *ctx = file->private_data; int ret; if (mutex_lock_interruptible(&pcdev->dev_mutex)) return -ERESTARTSYS; ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); mutex_unlock(&pcdev->dev_mutex); return ret; } static const struct v4l2_file_operations emmaprp_fops = { .owner = THIS_MODULE, .open = emmaprp_open, .release = emmaprp_release, .poll = emmaprp_poll, .unlocked_ioctl = video_ioctl2, .mmap = emmaprp_mmap, }; static const struct video_device emmaprp_videodev = { .name = MEM2MEM_NAME, .fops = &emmaprp_fops, .ioctl_ops = &emmaprp_ioctl_ops, .minor = -1, .release = video_device_release, .vfl_dir = VFL_DIR_M2M, }; static const struct v4l2_m2m_ops m2m_ops = { .device_run = emmaprp_device_run, .job_abort = emmaprp_job_abort, }; static int emmaprp_probe(struct platform_device *pdev) { struct emmaprp_dev *pcdev; struct video_device *vfd; struct resource *res; int irq, ret; pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); if (!pcdev) return -ENOMEM; spin_lock_init(&pcdev->irqlock); pcdev->clk_emma_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(pcdev->clk_emma_ipg)) { return PTR_ERR(pcdev->clk_emma_ipg); } pcdev->clk_emma_ahb = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(pcdev->clk_emma_ahb)) return PTR_ERR(pcdev->clk_emma_ahb); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pcdev->base_emma)) return PTR_ERR(pcdev->base_emma); ret = v4l2_device_register(&pdev->dev, &pcdev->v4l2_dev); if (ret) return ret; mutex_init(&pcdev->dev_mutex); vfd = video_device_alloc(); if (!vfd) { v4l2_err(&pcdev->v4l2_dev, "Failed to allocate video device\n"); ret = -ENOMEM; goto unreg_dev; } *vfd = emmaprp_videodev; vfd->lock = &pcdev->dev_mutex; vfd->v4l2_dev = &pcdev->v4l2_dev; video_set_drvdata(vfd, pcdev); pcdev->vfd = vfd; v4l2_info(&pcdev->v4l2_dev, EMMAPRP_MODULE_NAME " Device registered as /dev/video%d\n", vfd->num); platform_set_drvdata(pdev, pcdev); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, emmaprp_irq, 0, dev_name(&pdev->dev), pcdev); if (ret) goto rel_vdev; pcdev->m2m_dev = v4l2_m2m_init(&m2m_ops); if (IS_ERR(pcdev->m2m_dev)) { v4l2_err(&pcdev->v4l2_dev, "Failed to init mem2mem device\n"); ret = PTR_ERR(pcdev->m2m_dev); goto rel_vdev; } ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); if (ret) { v4l2_err(&pcdev->v4l2_dev, "Failed to register video device\n"); goto rel_m2m; } return 0; rel_m2m: v4l2_m2m_release(pcdev->m2m_dev); rel_vdev: video_device_release(vfd); unreg_dev: v4l2_device_unregister(&pcdev->v4l2_dev); mutex_destroy(&pcdev->dev_mutex); return ret; } static int emmaprp_remove(struct platform_device *pdev) { struct emmaprp_dev *pcdev = platform_get_drvdata(pdev); v4l2_info(&pcdev->v4l2_dev, "Removing " EMMAPRP_MODULE_NAME); video_unregister_device(pcdev->vfd); v4l2_m2m_release(pcdev->m2m_dev); v4l2_device_unregister(&pcdev->v4l2_dev); mutex_destroy(&pcdev->dev_mutex); return 0; } static struct platform_driver emmaprp_pdrv = { .probe = emmaprp_probe, .remove = emmaprp_remove, .driver = { .name = MEM2MEM_NAME, }, }; module_platform_driver(emmaprp_pdrv);
gpl-2.0
nanming/linux2.6
drivers/scsi/dpt_i2o.c
38
87919
/*************************************************************************** dpti.c - description ------------------- begin : Thu Sep 7 2000 copyright : (C) 2000 by Adaptec July 30, 2001 First version being submitted for inclusion in the kernel. V2.4 See Documentation/scsi/dpti.txt for history, notes, license info and credits ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ /*************************************************************************** * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp> - Support 2.6 kernel and DMA-mapping - ioctl fix for raid tools - use schedule_timeout in long long loop **************************************************************************/ /*#define DEBUG 1 */ /*#define UARTDELAY 1 */ /* On the real kernel ADDR32 should always be zero for 2.4. GFP_HIGH allocates high pages. Keep the macro around because of the broken unmerged ia64 tree */ #define ADDR32 (0) #include <linux/module.h> MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn"); MODULE_DESCRIPTION("Adaptec I2O RAID Driver"); //////////////////////////////////////////////////////////////// #include <linux/ioctl.h> /* For SCSI-Passthrough */ #include <asm/uaccess.h> #include <linux/stat.h> #include <linux/slab.h> /* for kmalloc() */ #include <linux/pci.h> /* for PCI support */ #include <linux/proc_fs.h> #include <linux/blkdev.h> #include <linux/delay.h> /* for udelay */ #include <linux/interrupt.h> #include <linux/kernel.h> /* for printk */ #include <linux/sched.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/mutex.h> #include <asm/processor.h> /* for boot_cpu_data */ #include <asm/pgtable.h> #include <asm/io.h> /* for virt_to_bus, etc. */ #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "dpt/dptsig.h" #include "dpti.h" /*============================================================================ * Create a binary signature - this is read by dptsig * Needed for our management apps *============================================================================ */ static dpt_sig_S DPTI_sig = { {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, #ifdef __i386__ PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, #elif defined(__ia64__) PROC_INTEL, PROC_IA64, #elif defined(__sparc__) PROC_ULTRASPARC, PROC_ULTRASPARC, #elif defined(__alpha__) PROC_ALPHA, PROC_ALPHA, #else (-1),(-1), #endif FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL, ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION, DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver" }; /*============================================================================ * Globals *============================================================================ */ static DEFINE_MUTEX(adpt_configuration_lock); static struct i2o_sys_tbl *sys_tbl = NULL; static int sys_tbl_ind = 0; static int sys_tbl_len = 0; static adpt_hba* hba_chain = NULL; static int hba_count = 0; static const struct file_operations adpt_fops = { .ioctl = adpt_ioctl, .open = adpt_open, .release = adpt_close }; #ifdef REBOOT_NOTIFIER static struct notifier_block adpt_reboot_notifier = { adpt_reboot_event, NULL, 0 }; #endif /* Structures and definitions for synchronous message posting. * See adpt_i2o_post_wait() for description * */ struct adpt_i2o_post_wait_data { int status; u32 id; adpt_wait_queue_head_t *wq; struct adpt_i2o_post_wait_data *next; }; static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL; static u32 adpt_post_wait_id = 0; static DEFINE_SPINLOCK(adpt_post_wait_lock); /*============================================================================ * Functions *============================================================================ */ static u8 adpt_read_blink_led(adpt_hba* host) { if(host->FwDebugBLEDflag_P != 0) { if( readb(host->FwDebugBLEDflag_P) == 0xbc ){ return readb(host->FwDebugBLEDvalue_P); } } return 0; } /*============================================================================ * Scsi host template interface functions *============================================================================ */ static struct pci_device_id dptids[] = { { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,}, { 0, } }; MODULE_DEVICE_TABLE(pci,dptids); static int adpt_detect(struct scsi_host_template* sht) { struct pci_dev *pDev = NULL; adpt_hba* pHba; adpt_init(); PINFO("Detecting Adaptec I2O RAID controllers...\n"); /* search for all Adatpec I2O RAID cards */ while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) { if(pDev->device == PCI_DPT_DEVICE_ID || pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){ if(adpt_install_hba(sht, pDev) ){ PERROR("Could not Init an I2O RAID device\n"); PERROR("Will not try to detect others.\n"); return hba_count-1; } pci_dev_get(pDev); } } /* In INIT state, Activate IOPs */ for (pHba = hba_chain; pHba; pHba = pHba->next) { // Activate does get status , init outbound, and get hrt if (adpt_i2o_activate_hba(pHba) < 0) { adpt_i2o_delete_hba(pHba); } } /* Active IOPs in HOLD state */ rebuild_sys_tab: if (hba_chain == NULL) return 0; /* * If build_sys_table fails, we kill everything and bail * as we can't init the IOPs w/o a system table */ if (adpt_i2o_build_sys_table() < 0) { adpt_i2o_sys_shutdown(); return 0; } PDEBUG("HBA's in HOLD state\n"); /* If IOP don't get online, we need to rebuild the System table */ for (pHba = hba_chain; pHba; pHba = pHba->next) { if (adpt_i2o_online_hba(pHba) < 0) { adpt_i2o_delete_hba(pHba); goto rebuild_sys_tab; } } /* Active IOPs now in OPERATIONAL state */ PDEBUG("HBA's in OPERATIONAL state\n"); printk("dpti: If you have a lot of devices this could take a few minutes.\n"); for (pHba = hba_chain; pHba; pHba = pHba->next) { printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name); if (adpt_i2o_lct_get(pHba) < 0){ adpt_i2o_delete_hba(pHba); continue; } if (adpt_i2o_parse_lct(pHba) < 0){ adpt_i2o_delete_hba(pHba); continue; } adpt_inquiry(pHba); } for (pHba = hba_chain; pHba; pHba = pHba->next) { if( adpt_scsi_register(pHba,sht) < 0){ adpt_i2o_delete_hba(pHba); continue; } pHba->initialized = TRUE; pHba->state &= ~DPTI_STATE_RESET; } // Register our control device node // nodes will need to be created in /dev to access this // the nodes can not be created from within the driver if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) { adpt_i2o_sys_shutdown(); return 0; } return hba_count; } /* * scsi_unregister will be called AFTER we return. */ static int adpt_release(struct Scsi_Host *host) { adpt_hba* pHba = (adpt_hba*) host->hostdata[0]; // adpt_i2o_quiesce_hba(pHba); adpt_i2o_delete_hba(pHba); scsi_unregister(host); return 0; } static void adpt_inquiry(adpt_hba* pHba) { u32 msg[14]; u32 *mptr; u32 *lenptr; int direction; int scsidir; u32 len; u32 reqlen; u8* buf; u8 scb[16]; s32 rcode; memset(msg, 0, sizeof(msg)); buf = kmalloc(80,GFP_KERNEL|ADDR32); if(!buf){ printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name); return; } memset((void*)buf, 0, 36); len = 36; direction = 0x00000000; scsidir =0x40000000; // DATA IN (iop<--dev) reqlen = 14; // SINGLE SGE /* Stick the headers on */ msg[0] = reqlen<<16 | SGL_OFFSET_12; msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID); msg[2] = 0; msg[3] = 0; // Adaptec/DPT Private stuff msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16; msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/; /* Direction, disconnect ok | sense data | simple queue , CDBLen */ // I2O_SCB_FLAG_ENABLE_DISCONNECT | // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; msg[6] = scsidir|0x20a00000| 6 /* cmd len*/; mptr=msg+7; memset(scb, 0, sizeof(scb)); // Write SCSI command into the message - always 16 byte block scb[0] = INQUIRY; scb[1] = 0; scb[2] = 0; scb[3] = 0; scb[4] = 36; scb[5] = 0; // Don't care about the rest of scb memcpy(mptr, scb, sizeof(scb)); mptr+=4; lenptr=mptr++; /* Remember me - fill in when we know */ /* Now fill in the SGList and command */ *lenptr = len; *mptr++ = 0xD0000000|direction|len; *mptr++ = virt_to_bus(buf); // Send it on it's way rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120); if (rcode != 0) { sprintf(pHba->detail, "Adaptec I2O RAID"); printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode); if (rcode != -ETIME && rcode != -EINTR) kfree(buf); } else { memset(pHba->detail, 0, sizeof(pHba->detail)); memcpy(&(pHba->detail), "Vendor: Adaptec ", 16); memcpy(&(pHba->detail[16]), " Model: ", 8); memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16); memcpy(&(pHba->detail[40]), " FW: ", 4); memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4); pHba->detail[48] = '\0'; /* precautionary */ kfree(buf); } adpt_i2o_status_get(pHba); return ; } static int adpt_slave_configure(struct scsi_device * device) { struct Scsi_Host *host = device->host; adpt_hba* pHba; pHba = (adpt_hba *) host->hostdata[0]; if (host->can_queue && device->tagged_supported) { scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG, host->can_queue - 1); } else { scsi_adjust_queue_depth(device, 0, 1); } return 0; } static int adpt_queue(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *)) { adpt_hba* pHba = NULL; struct adpt_device* pDev = NULL; /* dpt per device information */ cmd->scsi_done = done; /* * SCSI REQUEST_SENSE commands will be executed automatically by the * Host Adapter for any errors, so they should not be executed * explicitly unless the Sense Data is zero indicating that no error * occurred. */ if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) { cmd->result = (DID_OK << 16); cmd->scsi_done(cmd); return 0; } pHba = (adpt_hba*)cmd->device->host->hostdata[0]; if (!pHba) { return FAILED; } rmb(); /* * TODO: I need to block here if I am processing ioctl cmds * but if the outstanding cmds all finish before the ioctl, * the scsi-core will not know to start sending cmds to me again. * I need to a way to restart the scsi-cores queues or should I block * calling scsi_done on the outstanding cmds instead * for now we don't set the IOCTL state */ if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) { pHba->host->last_reset = jiffies; pHba->host->resetting = 1; return 1; } // TODO if the cmd->device if offline then I may need to issue a bus rescan // followed by a get_lct to see if the device is there anymore if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) { /* * First command request for this device. Set up a pointer * to the device structure. This should be a TEST_UNIT_READY * command from scan_scsis_single. */ if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) { // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue. cmd->result = (DID_NO_CONNECT << 16); cmd->scsi_done(cmd); return 0; } cmd->device->hostdata = pDev; } pDev->pScsi_dev = cmd->device; /* * If we are being called from when the device is being reset, * delay processing of the command until later. */ if (pDev->state & DPTI_DEV_RESET ) { return FAILED; } return adpt_scsi_to_i2o(pHba, cmd, pDev); } static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int geom[]) { int heads=-1; int sectors=-1; int cylinders=-1; // *** First lets set the default geometry **** // If the capacity is less than ox2000 if (capacity < 0x2000 ) { // floppy heads = 18; sectors = 2; } // else if between 0x2000 and 0x20000 else if (capacity < 0x20000) { heads = 64; sectors = 32; } // else if between 0x20000 and 0x40000 else if (capacity < 0x40000) { heads = 65; sectors = 63; } // else if between 0x4000 and 0x80000 else if (capacity < 0x80000) { heads = 128; sectors = 63; } // else if greater than 0x80000 else { heads = 255; sectors = 63; } cylinders = sector_div(capacity, heads * sectors); // Special case if CDROM if(sdev->type == 5) { // CDROM heads = 252; sectors = 63; cylinders = 1111; } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; PDEBUG("adpt_bios_param: exit\n"); return 0; } static const char *adpt_info(struct Scsi_Host *host) { adpt_hba* pHba; pHba = (adpt_hba *) host->hostdata[0]; return (char *) (pHba->detail); } static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int inout) { struct adpt_device* d; int id; int chan; int len = 0; int begin = 0; int pos = 0; adpt_hba* pHba; int unit; *start = buffer; if (inout == TRUE) { /* * The user has done a write and wants us to take the * data in the buffer and do something with it. * proc_scsiwrite calls us with inout = 1 * * Read data from buffer (writing to us) - NOT SUPPORTED */ return -EINVAL; } /* * inout = 0 means the user has done a read and wants information * returned, so we write information about the cards into the buffer * proc_scsiread() calls us with inout = 0 */ // Find HBA (host bus adapter) we are looking for mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->host == host) { break; /* found adapter */ } } mutex_unlock(&adpt_configuration_lock); if (pHba == NULL) { return 0; } host = pHba->host; len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION); len += sprintf(buffer+len, "%s\n", pHba->detail); len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n", pHba->host->host_no, pHba->name, host->irq); len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n", host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize); pos = begin + len; /* CHECKPOINT */ if(pos > offset + length) { goto stop_output; } if(pos <= offset) { /* * If we haven't even written to where we last left * off (the last time we were called), reset the * beginning pointer. */ len = 0; begin = pos; } len += sprintf(buffer+len, "Devices:\n"); for(chan = 0; chan < MAX_CHANNEL; chan++) { for(id = 0; id < MAX_ID; id++) { d = pHba->channel[chan].device[id]; while(d){ len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor); len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev); pos = begin + len; /* CHECKPOINT */ if(pos > offset + length) { goto stop_output; } if(pos <= offset) { len = 0; begin = pos; } unit = d->pI2o_dev->lct_data.tid; len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n", unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun, scsi_device_online(d->pScsi_dev)? "online":"offline"); pos = begin + len; /* CHECKPOINT */ if(pos > offset + length) { goto stop_output; } if(pos <= offset) { len = 0; begin = pos; } d = d->next_lun; } } } /* * begin is where we last checked our position with regards to offset * begin is always less than offset. len is relative to begin. It * is the number of bytes written past begin * */ stop_output: /* stop the output and calculate the correct length */ *(buffer + len) = '\0'; *start = buffer + (offset - begin); /* Start of wanted data */ len -= (offset - begin); if(len > length) { len = length; } else if(len < 0){ len = 0; **start = '\0'; } return len; } /*=========================================================================== * Error Handling routines *=========================================================================== */ static int adpt_abort(struct scsi_cmnd * cmd) { adpt_hba* pHba = NULL; /* host bus adapter structure */ struct adpt_device* dptdevice; /* dpt per device information */ u32 msg[5]; int rcode; if(cmd->serial_number == 0){ return FAILED; } pHba = (adpt_hba*) cmd->device->host->hostdata[0]; printk(KERN_INFO"%s: Trying to Abort cmd=%ld\n",pHba->name, cmd->serial_number); if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) { printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name); return FAILED; } memset(msg, 0, sizeof(msg)); msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid; msg[2] = 0; msg[3]= 0; msg[4] = (u32)cmd; if (pHba->host) spin_lock_irq(pHba->host->host_lock); rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER); if (pHba->host) spin_unlock_irq(pHba->host->host_lock); if (rcode != 0) { if(rcode == -EOPNOTSUPP ){ printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name); return FAILED; } printk(KERN_INFO"%s: Abort cmd=%ld failed.\n",pHba->name, cmd->serial_number); return FAILED; } printk(KERN_INFO"%s: Abort cmd=%ld complete.\n",pHba->name, cmd->serial_number); return SUCCESS; } #define I2O_DEVICE_RESET 0x27 // This is the same for BLK and SCSI devices // NOTE this is wrong in the i2o.h definitions // This is not currently supported by our adapter but we issue it anyway static int adpt_device_reset(struct scsi_cmnd* cmd) { adpt_hba* pHba; u32 msg[4]; u32 rcode; int old_state; struct adpt_device* d = cmd->device->hostdata; pHba = (void*) cmd->device->host->hostdata[0]; printk(KERN_INFO"%s: Trying to reset device\n",pHba->name); if (!d) { printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name); return FAILED; } memset(msg, 0, sizeof(msg)); msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid); msg[2] = 0; msg[3] = 0; if (pHba->host) spin_lock_irq(pHba->host->host_lock); old_state = d->state; d->state |= DPTI_DEV_RESET; rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); d->state = old_state; if (pHba->host) spin_unlock_irq(pHba->host->host_lock); if (rcode != 0) { if(rcode == -EOPNOTSUPP ){ printk(KERN_INFO"%s: Device reset not supported\n",pHba->name); return FAILED; } printk(KERN_INFO"%s: Device reset failed\n",pHba->name); return FAILED; } else { printk(KERN_INFO"%s: Device reset successful\n",pHba->name); return SUCCESS; } } #define I2O_HBA_BUS_RESET 0x87 // This version of bus reset is called by the eh_error handler static int adpt_bus_reset(struct scsi_cmnd* cmd) { adpt_hba* pHba; u32 msg[4]; u32 rcode; pHba = (adpt_hba*)cmd->device->host->hostdata[0]; memset(msg, 0, sizeof(msg)); printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid ); msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid); msg[2] = 0; msg[3] = 0; if (pHba->host) spin_lock_irq(pHba->host->host_lock); rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER); if (pHba->host) spin_unlock_irq(pHba->host->host_lock); if (rcode != 0) { printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name); return FAILED; } else { printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name); return SUCCESS; } } // This version of reset is called by the eh_error_handler static int __adpt_reset(struct scsi_cmnd* cmd) { adpt_hba* pHba; int rcode; pHba = (adpt_hba*)cmd->device->host->hostdata[0]; printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid ); rcode = adpt_hba_reset(pHba); if(rcode == 0){ printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name); return SUCCESS; } else { printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode); return FAILED; } } static int adpt_reset(struct scsi_cmnd* cmd) { int rc; spin_lock_irq(cmd->device->host->host_lock); rc = __adpt_reset(cmd); spin_unlock_irq(cmd->device->host->host_lock); return rc; } // This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset static int adpt_hba_reset(adpt_hba* pHba) { int rcode; pHba->state |= DPTI_STATE_RESET; // Activate does get status , init outbound, and get hrt if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) { printk(KERN_ERR "%s: Could not activate\n", pHba->name); adpt_i2o_delete_hba(pHba); return rcode; } if ((rcode=adpt_i2o_build_sys_table()) < 0) { adpt_i2o_delete_hba(pHba); return rcode; } PDEBUG("%s: in HOLD state\n",pHba->name); if ((rcode=adpt_i2o_online_hba(pHba)) < 0) { adpt_i2o_delete_hba(pHba); return rcode; } PDEBUG("%s: in OPERATIONAL state\n",pHba->name); if ((rcode=adpt_i2o_lct_get(pHba)) < 0){ adpt_i2o_delete_hba(pHba); return rcode; } if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){ adpt_i2o_delete_hba(pHba); return rcode; } pHba->state &= ~DPTI_STATE_RESET; adpt_fail_posted_scbs(pHba); return 0; /* return success */ } /*=========================================================================== * *=========================================================================== */ static void adpt_i2o_sys_shutdown(void) { adpt_hba *pHba, *pNext; struct adpt_i2o_post_wait_data *p1, *old; printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n"); printk(KERN_INFO" This could take a few minutes if there are many devices attached\n"); /* Delete all IOPs from the controller chain */ /* They should have already been released by the * scsi-core */ for (pHba = hba_chain; pHba; pHba = pNext) { pNext = pHba->next; adpt_i2o_delete_hba(pHba); } /* Remove any timedout entries from the wait queue. */ // spin_lock_irqsave(&adpt_post_wait_lock, flags); /* Nothing should be outstanding at this point so just * free them */ for(p1 = adpt_post_wait_queue; p1;) { old = p1; p1 = p1->next; kfree(old); } // spin_unlock_irqrestore(&adpt_post_wait_lock, flags); adpt_post_wait_queue = NULL; printk(KERN_INFO "Adaptec I2O controllers down.\n"); } /* * reboot/shutdown notification. * * - Quiesce each IOP in the system * */ #ifdef REBOOT_NOTIFIER static int adpt_reboot_event(struct notifier_block *n, ulong code, void *p) { if(code != SYS_RESTART && code != SYS_HALT && code != SYS_POWER_OFF) return NOTIFY_DONE; adpt_i2o_sys_shutdown(); return NOTIFY_DONE; } #endif static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev) { adpt_hba* pHba = NULL; adpt_hba* p = NULL; ulong base_addr0_phys = 0; ulong base_addr1_phys = 0; u32 hba_map0_area_size = 0; u32 hba_map1_area_size = 0; void __iomem *base_addr_virt = NULL; void __iomem *msg_addr_virt = NULL; int raptorFlag = FALSE; if(pci_enable_device(pDev)) { return -EINVAL; } if (pci_request_regions(pDev, "dpt_i2o")) { PERROR("dpti: adpt_config_hba: pci request region failed\n"); return -EINVAL; } pci_set_master(pDev); if (pci_set_dma_mask(pDev, DMA_64BIT_MASK) && pci_set_dma_mask(pDev, DMA_32BIT_MASK)) return -EINVAL; base_addr0_phys = pci_resource_start(pDev,0); hba_map0_area_size = pci_resource_len(pDev,0); // Check if standard PCI card or single BAR Raptor if(pDev->device == PCI_DPT_DEVICE_ID){ if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){ // Raptor card with this device id needs 4M hba_map0_area_size = 0x400000; } else { // Not Raptor - it is a PCI card if(hba_map0_area_size > 0x100000 ){ hba_map0_area_size = 0x100000; } } } else {// Raptor split BAR config // Use BAR1 in this configuration base_addr1_phys = pci_resource_start(pDev,1); hba_map1_area_size = pci_resource_len(pDev,1); raptorFlag = TRUE; } base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size); if (!base_addr_virt) { pci_release_regions(pDev); PERROR("dpti: adpt_config_hba: io remap failed\n"); return -EINVAL; } if(raptorFlag == TRUE) { msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size ); if (!msg_addr_virt) { PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n"); iounmap(base_addr_virt); pci_release_regions(pDev); return -EINVAL; } } else { msg_addr_virt = base_addr_virt; } // Allocate and zero the data structure pHba = kmalloc(sizeof(adpt_hba), GFP_KERNEL); if( pHba == NULL) { if(msg_addr_virt != base_addr_virt){ iounmap(msg_addr_virt); } iounmap(base_addr_virt); pci_release_regions(pDev); return -ENOMEM; } memset(pHba, 0, sizeof(adpt_hba)); mutex_lock(&adpt_configuration_lock); if(hba_chain != NULL){ for(p = hba_chain; p->next; p = p->next); p->next = pHba; } else { hba_chain = pHba; } pHba->next = NULL; pHba->unit = hba_count; sprintf(pHba->name, "dpti%d", hba_count); hba_count++; mutex_unlock(&adpt_configuration_lock); pHba->pDev = pDev; pHba->base_addr_phys = base_addr0_phys; // Set up the Virtual Base Address of the I2O Device pHba->base_addr_virt = base_addr_virt; pHba->msg_addr_virt = msg_addr_virt; pHba->irq_mask = base_addr_virt+0x30; pHba->post_port = base_addr_virt+0x40; pHba->reply_port = base_addr_virt+0x44; pHba->hrt = NULL; pHba->lct = NULL; pHba->lct_size = 0; pHba->status_block = NULL; pHba->post_count = 0; pHba->state = DPTI_STATE_RESET; pHba->pDev = pDev; pHba->devices = NULL; // Initializing the spinlocks spin_lock_init(&pHba->state_lock); spin_lock_init(&adpt_post_wait_lock); if(raptorFlag == 0){ printk(KERN_INFO"Adaptec I2O RAID controller %d at %p size=%x irq=%d\n", hba_count-1, base_addr_virt, hba_map0_area_size, pDev->irq); } else { printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d\n",hba_count-1, pDev->irq); printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size); printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size); } if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) { printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq); adpt_i2o_delete_hba(pHba); return -EINVAL; } return 0; } static void adpt_i2o_delete_hba(adpt_hba* pHba) { adpt_hba* p1; adpt_hba* p2; struct i2o_device* d; struct i2o_device* next; int i; int j; struct adpt_device* pDev; struct adpt_device* pNext; mutex_lock(&adpt_configuration_lock); // scsi_unregister calls our adpt_release which // does a quiese if(pHba->host){ free_irq(pHba->host->irq, pHba); } p2 = NULL; for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){ if(p1 == pHba) { if(p2) { p2->next = p1->next; } else { hba_chain = p1->next; } break; } } hba_count--; mutex_unlock(&adpt_configuration_lock); iounmap(pHba->base_addr_virt); pci_release_regions(pHba->pDev); if(pHba->msg_addr_virt != pHba->base_addr_virt){ iounmap(pHba->msg_addr_virt); } kfree(pHba->hrt); kfree(pHba->lct); kfree(pHba->status_block); kfree(pHba->reply_pool); for(d = pHba->devices; d ; d = next){ next = d->next; kfree(d); } for(i = 0 ; i < pHba->top_scsi_channel ; i++){ for(j = 0; j < MAX_ID; j++){ if(pHba->channel[i].device[j] != NULL){ for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){ pNext = pDev->next_lun; kfree(pDev); } } } } pci_dev_put(pHba->pDev); kfree(pHba); if(hba_count <= 0){ unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER); } } static int adpt_init(void) { printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n"); #ifdef REBOOT_NOTIFIER register_reboot_notifier(&adpt_reboot_notifier); #endif return 0; } static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun) { struct adpt_device* d; if(chan < 0 || chan >= MAX_CHANNEL) return NULL; if( pHba->channel[chan].device == NULL){ printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n"); return NULL; } d = pHba->channel[chan].device[id]; if(!d || d->tid == 0) { return NULL; } /* If it is the only lun at that address then this should match*/ if(d->scsi_lun == lun){ return d; } /* else we need to look through all the luns */ for(d=d->next_lun ; d ; d = d->next_lun){ if(d->scsi_lun == lun){ return d; } } return NULL; } static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout) { // I used my own version of the WAIT_QUEUE_HEAD // to handle some version differences // When embedded in the kernel this could go back to the vanilla one ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post); int status = 0; ulong flags = 0; struct adpt_i2o_post_wait_data *p1, *p2; struct adpt_i2o_post_wait_data *wait_data = kmalloc(sizeof(struct adpt_i2o_post_wait_data),GFP_KERNEL); DECLARE_WAITQUEUE(wait, current); if (!wait_data) return -ENOMEM; /* * The spin locking is needed to keep anyone from playing * with the queue pointers and id while we do the same */ spin_lock_irqsave(&adpt_post_wait_lock, flags); // TODO we need a MORE unique way of getting ids // to support async LCT get wait_data->next = adpt_post_wait_queue; adpt_post_wait_queue = wait_data; adpt_post_wait_id++; adpt_post_wait_id &= 0x7fff; wait_data->id = adpt_post_wait_id; spin_unlock_irqrestore(&adpt_post_wait_lock, flags); wait_data->wq = &adpt_wq_i2o_post; wait_data->status = -ETIMEDOUT; add_wait_queue(&adpt_wq_i2o_post, &wait); msg[2] |= 0x80000000 | ((u32)wait_data->id); timeout *= HZ; if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){ set_current_state(TASK_INTERRUPTIBLE); if(pHba->host) spin_unlock_irq(pHba->host->host_lock); if (!timeout) schedule(); else{ timeout = schedule_timeout(timeout); if (timeout == 0) { // I/O issued, but cannot get result in // specified time. Freeing resorces is // dangerous. status = -ETIME; } } if(pHba->host) spin_lock_irq(pHba->host->host_lock); } remove_wait_queue(&adpt_wq_i2o_post, &wait); if(status == -ETIMEDOUT){ printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit); // We will have to free the wait_data memory during shutdown return status; } /* Remove the entry from the queue. */ p2 = NULL; spin_lock_irqsave(&adpt_post_wait_lock, flags); for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) { if(p1 == wait_data) { if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) { status = -EOPNOTSUPP; } if(p2) { p2->next = p1->next; } else { adpt_post_wait_queue = p1->next; } break; } } spin_unlock_irqrestore(&adpt_post_wait_lock, flags); kfree(wait_data); return status; } static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len) { u32 m = EMPTY_QUEUE; u32 __iomem *msg; ulong timeout = jiffies + 30*HZ; do { rmb(); m = readl(pHba->post_port); if (m != EMPTY_QUEUE) { break; } if(time_after(jiffies,timeout)){ printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } while(m == EMPTY_QUEUE); msg = pHba->msg_addr_virt + m; memcpy_toio(msg, data, len); wmb(); //post message writel(m, pHba->post_port); wmb(); return 0; } static void adpt_i2o_post_wait_complete(u32 context, int status) { struct adpt_i2o_post_wait_data *p1 = NULL; /* * We need to search through the adpt_post_wait * queue to see if the given message is still * outstanding. If not, it means that the IOP * took longer to respond to the message than we * had allowed and timer has already expired. * Not much we can do about that except log * it for debug purposes, increase timeout, and recompile * * Lock needed to keep anyone from moving queue pointers * around while we're looking through them. */ context &= 0x7fff; spin_lock(&adpt_post_wait_lock); for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) { if(p1->id == context) { p1->status = status; spin_unlock(&adpt_post_wait_lock); wake_up_interruptible(p1->wq); return; } } spin_unlock(&adpt_post_wait_lock); // If this happens we lose commands that probably really completed printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context); printk(KERN_DEBUG" Tasks in wait queue:\n"); for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) { printk(KERN_DEBUG" %d\n",p1->id); } return; } static s32 adpt_i2o_reset_hba(adpt_hba* pHba) { u32 msg[8]; u8* status; u32 m = EMPTY_QUEUE ; ulong timeout = jiffies + (TMOUT_IOPRESET*HZ); if(pHba->initialized == FALSE) { // First time reset should be quick timeout = jiffies + (25*HZ); } else { adpt_i2o_quiesce_hba(pHba); } do { rmb(); m = readl(pHba->post_port); if (m != EMPTY_QUEUE) { break; } if(time_after(jiffies,timeout)){ printk(KERN_WARNING"Timeout waiting for message!\n"); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } while (m == EMPTY_QUEUE); status = kzalloc(4, GFP_KERNEL|ADDR32); if(status == NULL) { adpt_send_nop(pHba, m); printk(KERN_ERR"IOP reset failed - no free memory.\n"); return -ENOMEM; } msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID; msg[2]=0; msg[3]=0; msg[4]=0; msg[5]=0; msg[6]=virt_to_bus(status); msg[7]=0; memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg)); wmb(); writel(m, pHba->post_port); wmb(); while(*status == 0){ if(time_after(jiffies,timeout)){ printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name); kfree(status); return -ETIMEDOUT; } rmb(); schedule_timeout_uninterruptible(1); } if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) { PDEBUG("%s: Reset in progress...\n", pHba->name); // Here we wait for message frame to become available // indicated that reset has finished do { rmb(); m = readl(pHba->post_port); if (m != EMPTY_QUEUE) { break; } if(time_after(jiffies,timeout)){ printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } while (m == EMPTY_QUEUE); // Flush the offset adpt_send_nop(pHba, m); } adpt_i2o_status_get(pHba); if(*status == 0x02 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) { printk(KERN_WARNING"%s: Reset reject, trying to clear\n", pHba->name); } else { PDEBUG("%s: Reset completed.\n", pHba->name); } kfree(status); #ifdef UARTDELAY // This delay is to allow someone attached to the card through the debug UART to // set up the dump levels that they want before the rest of the initialization sequence adpt_delay(20000); #endif return 0; } static int adpt_i2o_parse_lct(adpt_hba* pHba) { int i; int max; int tid; struct i2o_device *d; i2o_lct *lct = pHba->lct; u8 bus_no = 0; s16 scsi_id; s16 scsi_lun; u32 buf[10]; // larger than 7, or 8 ... struct adpt_device* pDev; if (lct == NULL) { printk(KERN_ERR "%s: LCT is empty???\n",pHba->name); return -1; } max = lct->table_size; max -= 3; max /= 9; for(i=0;i<max;i++) { if( lct->lct_entry[i].user_tid != 0xfff){ /* * If we have hidden devices, we need to inform the upper layers about * the possible maximum id reference to handle device access when * an array is disassembled. This code has no other purpose but to * allow us future access to devices that are currently hidden * behind arrays, hotspares or have not been configured (JBOD mode). */ if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE && lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL && lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ continue; } tid = lct->lct_entry[i].tid; // I2O_DPT_DEVICE_INFO_GROUP_NO; if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) { continue; } bus_no = buf[0]>>16; scsi_id = buf[1]; scsi_lun = (buf[2]>>8 )&0xff; if(bus_no >= MAX_CHANNEL) { // Something wrong skip it printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); continue; } if (scsi_id >= MAX_ID){ printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no); continue; } if(bus_no > pHba->top_scsi_channel){ pHba->top_scsi_channel = bus_no; } if(scsi_id > pHba->top_scsi_id){ pHba->top_scsi_id = scsi_id; } if(scsi_lun > pHba->top_scsi_lun){ pHba->top_scsi_lun = scsi_lun; } continue; } d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL); if(d==NULL) { printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name); return -ENOMEM; } d->controller = pHba; d->next = NULL; memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); d->flags = 0; tid = d->lct_data.tid; adpt_i2o_report_hba_unit(pHba, d); adpt_i2o_install_device(pHba, d); } bus_no = 0; for(d = pHba->devices; d ; d = d->next) { if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT || d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){ tid = d->lct_data.tid; // TODO get the bus_no from hrt-but for now they are in order //bus_no = if(bus_no > pHba->top_scsi_channel){ pHba->top_scsi_channel = bus_no; } pHba->channel[bus_no].type = d->lct_data.class_id; pHba->channel[bus_no].tid = tid; if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0) { pHba->channel[bus_no].scsi_id = buf[1]; PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]); } // TODO remove - this is just until we get from hrt bus_no++; if(bus_no >= MAX_CHANNEL) { // Something wrong skip it printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no); break; } } } // Setup adpt_device table for(d = pHba->devices; d ; d = d->next) { if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE || d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL || d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ tid = d->lct_data.tid; scsi_id = -1; // I2O_DPT_DEVICE_INFO_GROUP_NO; if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) { bus_no = buf[0]>>16; scsi_id = buf[1]; scsi_lun = (buf[2]>>8 )&0xff; if(bus_no >= MAX_CHANNEL) { // Something wrong skip it continue; } if (scsi_id >= MAX_ID) { continue; } if( pHba->channel[bus_no].device[scsi_id] == NULL){ pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); if(pDev == NULL) { return -ENOMEM; } pHba->channel[bus_no].device[scsi_id] = pDev; } else { for( pDev = pHba->channel[bus_no].device[scsi_id]; pDev->next_lun; pDev = pDev->next_lun){ } pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); if(pDev->next_lun == NULL) { return -ENOMEM; } pDev = pDev->next_lun; } pDev->tid = tid; pDev->scsi_channel = bus_no; pDev->scsi_id = scsi_id; pDev->scsi_lun = scsi_lun; pDev->pI2o_dev = d; d->owner = pDev; pDev->type = (buf[0])&0xff; pDev->flags = (buf[0]>>8)&0xff; if(scsi_id > pHba->top_scsi_id){ pHba->top_scsi_id = scsi_id; } if(scsi_lun > pHba->top_scsi_lun){ pHba->top_scsi_lun = scsi_lun; } } if(scsi_id == -1){ printk(KERN_WARNING"Could not find SCSI ID for %s\n", d->lct_data.identity_tag); } } } return 0; } /* * Each I2O controller has a chain of devices on it - these match * the useful parts of the LCT of the board. */ static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d) { mutex_lock(&adpt_configuration_lock); d->controller=pHba; d->owner=NULL; d->next=pHba->devices; d->prev=NULL; if (pHba->devices != NULL){ pHba->devices->prev=d; } pHba->devices=d; *d->dev_name = 0; mutex_unlock(&adpt_configuration_lock); return 0; } static int adpt_open(struct inode *inode, struct file *file) { int minor; adpt_hba* pHba; //TODO check for root access // minor = iminor(inode); if (minor >= hba_count) { return -ENXIO; } mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->unit == minor) { break; /* found adapter */ } } if (pHba == NULL) { mutex_unlock(&adpt_configuration_lock); return -ENXIO; } // if(pHba->in_use){ // mutex_unlock(&adpt_configuration_lock); // return -EBUSY; // } pHba->in_use = 1; mutex_unlock(&adpt_configuration_lock); return 0; } static int adpt_close(struct inode *inode, struct file *file) { int minor; adpt_hba* pHba; minor = iminor(inode); if (minor >= hba_count) { return -ENXIO; } mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->unit == minor) { break; /* found adapter */ } } mutex_unlock(&adpt_configuration_lock); if (pHba == NULL) { return -ENXIO; } pHba->in_use = 0; return 0; } static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) { u32 msg[MAX_MESSAGE_SIZE]; u32* reply = NULL; u32 size = 0; u32 reply_size = 0; u32 __user *user_msg = arg; u32 __user * user_reply = NULL; void *sg_list[pHba->sg_tablesize]; u32 sg_offset = 0; u32 sg_count = 0; int sg_index = 0; u32 i = 0; u32 rcode = 0; void *p = NULL; ulong flags = 0; memset(&msg, 0, MAX_MESSAGE_SIZE*4); // get user msg size in u32s if(get_user(size, &user_msg[0])){ return -EFAULT; } size = size>>16; user_reply = &user_msg[size]; if(size > MAX_MESSAGE_SIZE){ return -EFAULT; } size *= 4; // Convert to bytes /* Copy in the user's I2O command */ if(copy_from_user(msg, user_msg, size)) { return -EFAULT; } get_user(reply_size, &user_reply[0]); reply_size = reply_size>>16; if(reply_size > REPLY_FRAME_SIZE){ reply_size = REPLY_FRAME_SIZE; } reply_size *= 4; reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); if(reply == NULL) { printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name); return -ENOMEM; } sg_offset = (msg[0]>>4)&0xf; msg[2] = 0x40000000; // IOCTL context msg[3] = (u32)reply; memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize); if(sg_offset) { // TODO 64bit fix struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset); sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); if (sg_count > pHba->sg_tablesize){ printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count); kfree (reply); return -EINVAL; } for(i = 0; i < sg_count; i++) { int sg_size; if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) { printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count); rcode = -EINVAL; goto cleanup; } sg_size = sg[i].flag_count & 0xffffff; /* Allocate memory for the transfer */ p = kmalloc(sg_size, GFP_KERNEL|ADDR32); if(!p) { printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", pHba->name,sg_size,i,sg_count); rcode = -ENOMEM; goto cleanup; } sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. /* Copy in the user's SG buffer if necessary */ if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) { // TODO 64bit fix if (copy_from_user(p,(void __user *)sg[i].addr_bus, sg_size)) { printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i); rcode = -EFAULT; goto cleanup; } } //TODO 64bit fix sg[i].addr_bus = (u32)virt_to_bus(p); } } do { if(pHba->host) spin_lock_irqsave(pHba->host->host_lock, flags); // This state stops any new commands from enterring the // controller while processing the ioctl // pHba->state |= DPTI_STATE_IOCTL; // We can't set this now - The scsi subsystem sets host_blocked and // the queue empties and stops. We need a way to restart the queue rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER); if (rcode != 0) printk("adpt_i2o_passthru: post wait failed %d %p\n", rcode, reply); // pHba->state &= ~DPTI_STATE_IOCTL; if(pHba->host) spin_unlock_irqrestore(pHba->host->host_lock, flags); } while(rcode == -ETIMEDOUT); if(rcode){ goto cleanup; } if(sg_offset) { /* Copy back the Scatter Gather buffers back to user space */ u32 j; // TODO 64bit fix struct sg_simple_element* sg; int sg_size; // re-acquire the original message to handle correctly the sg copy operation memset(&msg, 0, MAX_MESSAGE_SIZE*4); // get user msg size in u32s if(get_user(size, &user_msg[0])){ rcode = -EFAULT; goto cleanup; } size = size>>16; size *= 4; /* Copy in the user's I2O command */ if (copy_from_user (msg, user_msg, size)) { rcode = -EFAULT; goto cleanup; } sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element); // TODO 64bit fix sg = (struct sg_simple_element*)(msg + sg_offset); for (j = 0; j < sg_count; j++) { /* Copy out the SG list to user's buffer if necessary */ if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) { sg_size = sg[j].flag_count & 0xffffff; // TODO 64bit fix if (copy_to_user((void __user *)sg[j].addr_bus,sg_list[j], sg_size)) { printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus); rcode = -EFAULT; goto cleanup; } } } } /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name); rcode = -EFAULT; } if(copy_to_user(user_reply, reply, reply_size)) { printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name); rcode = -EFAULT; } } cleanup: if (rcode != -ETIME && rcode != -EINTR) kfree (reply); while(sg_index) { if(sg_list[--sg_index]) { if (rcode != -ETIME && rcode != -EINTR) kfree(sg_list[sg_index]); } } return rcode; } /* * This routine returns information about the system. This does not effect * any logic and if the info is wrong - it doesn't matter. */ /* Get all the info we can not get from kernel services */ static int adpt_system_info(void __user *buffer) { sysInfo_S si; memset(&si, 0, sizeof(si)); si.osType = OS_LINUX; si.osMajorVersion = 0; si.osMinorVersion = 0; si.osRevision = 0; si.busType = SI_PCI_BUS; si.processorFamily = DPTI_sig.dsProcessorFamily; #if defined __i386__ adpt_i386_info(&si); #elif defined (__ia64__) adpt_ia64_info(&si); #elif defined(__sparc__) adpt_sparc_info(&si); #elif defined (__alpha__) adpt_alpha_info(&si); #else si.processorType = 0xff ; #endif if(copy_to_user(buffer, &si, sizeof(si))){ printk(KERN_WARNING"dpti: Could not copy buffer TO user\n"); return -EFAULT; } return 0; } #if defined __ia64__ static void adpt_ia64_info(sysInfo_S* si) { // This is all the info we need for now // We will add more info as our new // managmenent utility requires it si->processorType = PROC_IA64; } #endif #if defined __sparc__ static void adpt_sparc_info(sysInfo_S* si) { // This is all the info we need for now // We will add more info as our new // managmenent utility requires it si->processorType = PROC_ULTRASPARC; } #endif #if defined __alpha__ static void adpt_alpha_info(sysInfo_S* si) { // This is all the info we need for now // We will add more info as our new // managmenent utility requires it si->processorType = PROC_ALPHA; } #endif #if defined __i386__ static void adpt_i386_info(sysInfo_S* si) { // This is all the info we need for now // We will add more info as our new // managmenent utility requires it switch (boot_cpu_data.x86) { case CPU_386: si->processorType = PROC_386; break; case CPU_486: si->processorType = PROC_486; break; case CPU_586: si->processorType = PROC_PENTIUM; break; default: // Just in case si->processorType = PROC_PENTIUM; break; } } #endif static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg) { int minor; int error = 0; adpt_hba* pHba; ulong flags = 0; void __user *argp = (void __user *)arg; minor = iminor(inode); if (minor >= DPTI_MAX_HBA){ return -ENXIO; } mutex_lock(&adpt_configuration_lock); for (pHba = hba_chain; pHba; pHba = pHba->next) { if (pHba->unit == minor) { break; /* found adapter */ } } mutex_unlock(&adpt_configuration_lock); if(pHba == NULL){ return -ENXIO; } while((volatile u32) pHba->state & DPTI_STATE_RESET ) schedule_timeout_uninterruptible(2); switch (cmd) { // TODO: handle 3 cases case DPT_SIGNATURE: if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) { return -EFAULT; } break; case I2OUSRCMD: return adpt_i2o_passthru(pHba, argp); case DPT_CTRLINFO:{ drvrHBAinfo_S HbaInfo; #define FLG_OSD_PCI_VALID 0x0001 #define FLG_OSD_DMA 0x0002 #define FLG_OSD_I2O 0x0004 memset(&HbaInfo, 0, sizeof(HbaInfo)); HbaInfo.drvrHBAnum = pHba->unit; HbaInfo.baseAddr = (ulong) pHba->base_addr_phys; HbaInfo.blinkState = adpt_read_blink_led(pHba); HbaInfo.pciBusNum = pHba->pDev->bus->number; HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn); HbaInfo.Interrupt = pHba->pDev->irq; HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O; if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){ printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name); return -EFAULT; } break; } case DPT_SYSINFO: return adpt_system_info(argp); case DPT_BLINKLED:{ u32 value; value = (u32)adpt_read_blink_led(pHba); if (copy_to_user(argp, &value, sizeof(value))) { return -EFAULT; } break; } case I2ORESETCMD: if(pHba->host) spin_lock_irqsave(pHba->host->host_lock, flags); adpt_hba_reset(pHba); if(pHba->host) spin_unlock_irqrestore(pHba->host->host_lock, flags); break; case I2ORESCANCMD: adpt_rescan(pHba); break; default: return -EINVAL; } return error; } static irqreturn_t adpt_isr(int irq, void *dev_id) { struct scsi_cmnd* cmd; adpt_hba* pHba = dev_id; u32 m; void __iomem *reply; u32 status=0; u32 context; ulong flags = 0; int handled = 0; if (pHba == NULL){ printk(KERN_WARNING"adpt_isr: NULL dev_id\n"); return IRQ_NONE; } if(pHba->host) spin_lock_irqsave(pHba->host->host_lock, flags); while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) { m = readl(pHba->reply_port); if(m == EMPTY_QUEUE){ // Try twice then give up rmb(); m = readl(pHba->reply_port); if(m == EMPTY_QUEUE){ // This really should not happen printk(KERN_ERR"dpti: Could not get reply frame\n"); goto out; } } reply = bus_to_virt(m); if (readl(reply) & MSG_FAIL) { u32 old_m = readl(reply+28); void __iomem *msg; u32 old_context; PDEBUG("%s: Failed message\n",pHba->name); if(old_m >= 0x100000){ printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m); writel(m,pHba->reply_port); continue; } // Transaction context is 0 in failed reply frame msg = pHba->msg_addr_virt + old_m; old_context = readl(msg+12); writel(old_context, reply+12); adpt_send_nop(pHba, old_m); } context = readl(reply+8); if(context & 0x40000000){ // IOCTL void *p = (void *)readl(reply+12); if( p != NULL) { memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); } // All IOCTLs will also be post wait } if(context & 0x80000000){ // Post wait message status = readl(reply+16); if(status >> 24){ status &= 0xffff; /* Get detail status */ } else { status = I2O_POST_WAIT_OK; } if(!(context & 0x40000000)) { cmd = (struct scsi_cmnd*) readl(reply+12); if(cmd != NULL) { printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context); } } adpt_i2o_post_wait_complete(context, status); } else { // SCSI message cmd = (struct scsi_cmnd*) readl(reply+12); if(cmd != NULL){ if(cmd->serial_number != 0) { // If not timedout adpt_i2o_to_scsi(reply, cmd); } } } writel(m, pHba->reply_port); wmb(); rmb(); } handled = 1; out: if(pHba->host) spin_unlock_irqrestore(pHba->host->host_lock, flags); return IRQ_RETVAL(handled); } static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d) { int i; u32 msg[MAX_MESSAGE_SIZE]; u32* mptr; u32 *lenptr; int direction; int scsidir; u32 len; u32 reqlen; s32 rcode; memset(msg, 0 , sizeof(msg)); len = cmd->request_bufflen; direction = 0x00000000; scsidir = 0x00000000; // DATA NO XFER if(len) { /* * Set SCBFlags to indicate if data is being transferred * in or out, or no data transfer * Note: Do not have to verify index is less than 0 since * cmd->cmnd[0] is an unsigned char */ switch(cmd->sc_data_direction){ case DMA_FROM_DEVICE: scsidir =0x40000000; // DATA IN (iop<--dev) break; case DMA_TO_DEVICE: direction=0x04000000; // SGL OUT scsidir =0x80000000; // DATA OUT (iop-->dev) break; case DMA_NONE: break; case DMA_BIDIRECTIONAL: scsidir =0x40000000; // DATA IN (iop<--dev) // Assume In - and continue; break; default: printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n", pHba->name, cmd->cmnd[0]); cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8); cmd->scsi_done(cmd); return 0; } } // msg[0] is set later // I2O_CMD_SCSI_EXEC msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid); msg[2] = 0; msg[3] = (u32)cmd; /* We want the SCSI control block back */ // Our cards use the transaction context as the tag for queueing // Adaptec/DPT Private stuff msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16); msg[5] = d->tid; /* Direction, disconnect ok | sense data | simple queue , CDBLen */ // I2O_SCB_FLAG_ENABLE_DISCONNECT | // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG | // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE; msg[6] = scsidir|0x20a00000|cmd->cmd_len; mptr=msg+7; // Write SCSI command into the message - always 16 byte block memset(mptr, 0, 16); memcpy(mptr, cmd->cmnd, cmd->cmd_len); mptr+=4; lenptr=mptr++; /* Remember me - fill in when we know */ reqlen = 14; // SINGLE SGE /* Now fill in the SGList and command */ if(cmd->use_sg) { struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer; int sg_count = pci_map_sg(pHba->pDev, sg, cmd->use_sg, cmd->sc_data_direction); len = 0; for(i = 0 ; i < sg_count; i++) { *mptr++ = direction|0x10000000|sg_dma_len(sg); len+=sg_dma_len(sg); *mptr++ = sg_dma_address(sg); sg++; } /* Make this an end of list */ mptr[-2] = direction|0xD0000000|sg_dma_len(sg-1); reqlen = mptr - msg; *lenptr = len; if(cmd->underflow && len != cmd->underflow){ printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n", len, cmd->underflow); } } else { *lenptr = len = cmd->request_bufflen; if(len == 0) { reqlen = 12; } else { *mptr++ = 0xD0000000|direction|cmd->request_bufflen; *mptr++ = pci_map_single(pHba->pDev, cmd->request_buffer, cmd->request_bufflen, cmd->sc_data_direction); } } /* Stick the headers on */ msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0); // Send it on it's way rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2); if (rcode == 0) { return 0; } return rcode; } static s32 adpt_scsi_register(adpt_hba* pHba,struct scsi_host_template * sht) { struct Scsi_Host *host = NULL; host = scsi_register(sht, sizeof(adpt_hba*)); if (host == NULL) { printk ("%s: scsi_register returned NULL\n",pHba->name); return -1; } host->hostdata[0] = (unsigned long)pHba; pHba->host = host; host->irq = pHba->pDev->irq; /* no IO ports, so don't have to set host->io_port and * host->n_io_port */ host->io_port = 0; host->n_io_port = 0; /* see comments in scsi_host.h */ host->max_id = 16; host->max_lun = 256; host->max_channel = pHba->top_scsi_channel + 1; host->cmd_per_lun = 1; host->unique_id = (uint) pHba; host->sg_tablesize = pHba->sg_tablesize; host->can_queue = pHba->post_fifo_size; return 0; } static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) { adpt_hba* pHba; u32 hba_status; u32 dev_status; u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits // I know this would look cleaner if I just read bytes // but the model I have been using for all the rest of the // io is in 4 byte words - so I keep that model u16 detailed_status = readl(reply+16) &0xffff; dev_status = (detailed_status & 0xff); hba_status = detailed_status >> 8; // calculate resid for sg cmd->resid = cmd->request_bufflen - readl(reply+5); pHba = (adpt_hba*) cmd->device->host->hostdata[0]; cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false if(!(reply_flags & MSG_FAIL)) { switch(detailed_status & I2O_SCSI_DSC_MASK) { case I2O_SCSI_DSC_SUCCESS: cmd->result = (DID_OK << 16); // handle underflow if(readl(reply+5) < cmd->underflow ) { cmd->result = (DID_ERROR <<16); printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name); } break; case I2O_SCSI_DSC_REQUEST_ABORTED: cmd->result = (DID_ABORT << 16); break; case I2O_SCSI_DSC_PATH_INVALID: case I2O_SCSI_DSC_DEVICE_NOT_PRESENT: case I2O_SCSI_DSC_SELECTION_TIMEOUT: case I2O_SCSI_DSC_COMMAND_TIMEOUT: case I2O_SCSI_DSC_NO_ADAPTER: case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE: printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n", pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); cmd->result = (DID_TIME_OUT << 16); break; case I2O_SCSI_DSC_ADAPTER_BUSY: case I2O_SCSI_DSC_BUS_BUSY: cmd->result = (DID_BUS_BUSY << 16); break; case I2O_SCSI_DSC_SCSI_BUS_RESET: case I2O_SCSI_DSC_BDR_MESSAGE_SENT: cmd->result = (DID_RESET << 16); break; case I2O_SCSI_DSC_PARITY_ERROR_FAILURE: printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name); cmd->result = (DID_PARITY << 16); break; case I2O_SCSI_DSC_UNABLE_TO_ABORT: case I2O_SCSI_DSC_COMPLETE_WITH_ERROR: case I2O_SCSI_DSC_UNABLE_TO_TERMINATE: case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED: case I2O_SCSI_DSC_AUTOSENSE_FAILED: case I2O_SCSI_DSC_DATA_OVERRUN: case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE: case I2O_SCSI_DSC_SEQUENCE_FAILURE: case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR: case I2O_SCSI_DSC_PROVIDE_FAILURE: case I2O_SCSI_DSC_REQUEST_TERMINATED: case I2O_SCSI_DSC_IDE_MESSAGE_SENT: case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT: case I2O_SCSI_DSC_MESSAGE_RECEIVED: case I2O_SCSI_DSC_INVALID_CDB: case I2O_SCSI_DSC_LUN_INVALID: case I2O_SCSI_DSC_SCSI_TID_INVALID: case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE: case I2O_SCSI_DSC_NO_NEXUS: case I2O_SCSI_DSC_CDB_RECEIVED: case I2O_SCSI_DSC_LUN_ALREADY_ENABLED: case I2O_SCSI_DSC_QUEUE_FROZEN: case I2O_SCSI_DSC_REQUEST_INVALID: default: printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); cmd->result = (DID_ERROR << 16); break; } // copy over the request sense data if it was a check // condition status if(dev_status == 0x02 /*CHECK_CONDITION*/) { u32 len = sizeof(cmd->sense_buffer); len = (len > 40) ? 40 : len; // Copy over the sense data memcpy_fromio(cmd->sense_buffer, (reply+28) , len); if(cmd->sense_buffer[0] == 0x70 /* class 7 */ && cmd->sense_buffer[2] == DATA_PROTECT ){ /* This is to handle an array failed */ cmd->result = (DID_TIME_OUT << 16); printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n", pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]); } } } else { /* In this condtion we could not talk to the tid * the card rejected it. We should signal a retry * for a limitted number of retries. */ cmd->result = (DID_TIME_OUT << 16); printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n", pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]); } cmd->result |= (dev_status); if(cmd->scsi_done != NULL){ cmd->scsi_done(cmd); } return cmd->result; } static s32 adpt_rescan(adpt_hba* pHba) { s32 rcode; ulong flags = 0; if(pHba->host) spin_lock_irqsave(pHba->host->host_lock, flags); if ((rcode=adpt_i2o_lct_get(pHba)) < 0) goto out; if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0) goto out; rcode = 0; out: if(pHba->host) spin_unlock_irqrestore(pHba->host->host_lock, flags); return rcode; } static s32 adpt_i2o_reparse_lct(adpt_hba* pHba) { int i; int max; int tid; struct i2o_device *d; i2o_lct *lct = pHba->lct; u8 bus_no = 0; s16 scsi_id; s16 scsi_lun; u32 buf[10]; // at least 8 u32's struct adpt_device* pDev = NULL; struct i2o_device* pI2o_dev = NULL; if (lct == NULL) { printk(KERN_ERR "%s: LCT is empty???\n",pHba->name); return -1; } max = lct->table_size; max -= 3; max /= 9; // Mark each drive as unscanned for (d = pHba->devices; d; d = d->next) { pDev =(struct adpt_device*) d->owner; if(!pDev){ continue; } pDev->state |= DPTI_DEV_UNSCANNED; } printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max); for(i=0;i<max;i++) { if( lct->lct_entry[i].user_tid != 0xfff){ continue; } if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE || lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL || lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){ tid = lct->lct_entry[i].tid; if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) { printk(KERN_ERR"%s: Could not query device\n",pHba->name); continue; } bus_no = buf[0]>>16; scsi_id = buf[1]; scsi_lun = (buf[2]>>8 )&0xff; pDev = pHba->channel[bus_no].device[scsi_id]; /* da lun */ while(pDev) { if(pDev->scsi_lun == scsi_lun) { break; } pDev = pDev->next_lun; } if(!pDev ) { // Something new add it d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL); if(d==NULL) { printk(KERN_CRIT "Out of memory for I2O device data.\n"); return -ENOMEM; } d->controller = pHba; d->next = NULL; memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); d->flags = 0; adpt_i2o_report_hba_unit(pHba, d); adpt_i2o_install_device(pHba, d); if(bus_no >= MAX_CHANNEL) { // Something wrong skip it printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no); continue; } pDev = pHba->channel[bus_no].device[scsi_id]; if( pDev == NULL){ pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); if(pDev == NULL) { return -ENOMEM; } pHba->channel[bus_no].device[scsi_id] = pDev; } else { while (pDev->next_lun) { pDev = pDev->next_lun; } pDev = pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL); if(pDev == NULL) { return -ENOMEM; } } pDev->tid = d->lct_data.tid; pDev->scsi_channel = bus_no; pDev->scsi_id = scsi_id; pDev->scsi_lun = scsi_lun; pDev->pI2o_dev = d; d->owner = pDev; pDev->type = (buf[0])&0xff; pDev->flags = (buf[0]>>8)&0xff; // Too late, SCSI system has made up it's mind, but what the hey ... if(scsi_id > pHba->top_scsi_id){ pHba->top_scsi_id = scsi_id; } if(scsi_lun > pHba->top_scsi_lun){ pHba->top_scsi_lun = scsi_lun; } continue; } // end of new i2o device // We found an old device - check it while(pDev) { if(pDev->scsi_lun == scsi_lun) { if(!scsi_device_online(pDev->pScsi_dev)) { printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n", pHba->name,bus_no,scsi_id,scsi_lun); if (pDev->pScsi_dev) { scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING); } } d = pDev->pI2o_dev; if(d->lct_data.tid != tid) { // something changed pDev->tid = tid; memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry)); if (pDev->pScsi_dev) { pDev->pScsi_dev->changed = TRUE; pDev->pScsi_dev->removable = TRUE; } } // Found it - mark it scanned pDev->state = DPTI_DEV_ONLINE; break; } pDev = pDev->next_lun; } } } for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) { pDev =(struct adpt_device*) pI2o_dev->owner; if(!pDev){ continue; } // Drive offline drives that previously existed but could not be found // in the LCT table if (pDev->state & DPTI_DEV_UNSCANNED){ pDev->state = DPTI_DEV_OFFLINE; printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun); if (pDev->pScsi_dev) { scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE); } } } return 0; } static void adpt_fail_posted_scbs(adpt_hba* pHba) { struct scsi_cmnd* cmd = NULL; struct scsi_device* d = NULL; shost_for_each_device(d, pHba->host) { unsigned long flags; spin_lock_irqsave(&d->list_lock, flags); list_for_each_entry(cmd, &d->cmd_list, list) { if(cmd->serial_number == 0){ continue; } cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1); cmd->scsi_done(cmd); } spin_unlock_irqrestore(&d->list_lock, flags); } } /*============================================================================ * Routines from i2o subsystem *============================================================================ */ /* * Bring an I2O controller into HOLD state. See the spec. */ static int adpt_i2o_activate_hba(adpt_hba* pHba) { int rcode; if(pHba->initialized ) { if (adpt_i2o_status_get(pHba) < 0) { if((rcode = adpt_i2o_reset_hba(pHba)) != 0){ printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name); return rcode; } if (adpt_i2o_status_get(pHba) < 0) { printk(KERN_INFO "HBA not responding.\n"); return -1; } } if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) { printk(KERN_CRIT "%s: hardware fault\n", pHba->name); return -1; } if (pHba->status_block->iop_state == ADAPTER_STATE_READY || pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL || pHba->status_block->iop_state == ADAPTER_STATE_HOLD || pHba->status_block->iop_state == ADAPTER_STATE_FAILED) { adpt_i2o_reset_hba(pHba); if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) { printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name); return -1; } } } else { if((rcode = adpt_i2o_reset_hba(pHba)) != 0){ printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name); return rcode; } } if (adpt_i2o_init_outbound_q(pHba) < 0) { return -1; } /* In HOLD state */ if (adpt_i2o_hrt_get(pHba) < 0) { return -1; } return 0; } /* * Bring a controller online into OPERATIONAL state. */ static int adpt_i2o_online_hba(adpt_hba* pHba) { if (adpt_i2o_systab_send(pHba) < 0) { adpt_i2o_delete_hba(pHba); return -1; } /* In READY state */ if (adpt_i2o_enable_hba(pHba) < 0) { adpt_i2o_delete_hba(pHba); return -1; } /* In OPERATIONAL state */ return 0; } static s32 adpt_send_nop(adpt_hba*pHba,u32 m) { u32 __iomem *msg; ulong timeout = jiffies + 5*HZ; while(m == EMPTY_QUEUE){ rmb(); m = readl(pHba->post_port); if(m != EMPTY_QUEUE){ break; } if(time_after(jiffies,timeout)){ printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name); return 2; } schedule_timeout_uninterruptible(1); } msg = (u32 __iomem *)(pHba->msg_addr_virt + m); writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]); writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]); writel( 0,&msg[2]); wmb(); writel(m, pHba->post_port); wmb(); return 0; } static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba) { u8 *status; u32 __iomem *msg = NULL; int i; ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ; u32* ptr; u32 outbound_frame; // This had to be a 32 bit address u32 m; do { rmb(); m = readl(pHba->post_port); if (m != EMPTY_QUEUE) { break; } if(time_after(jiffies,timeout)){ printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } while(m == EMPTY_QUEUE); msg=(u32 __iomem *)(pHba->msg_addr_virt+m); status = kmalloc(4,GFP_KERNEL|ADDR32); if (status==NULL) { adpt_send_nop(pHba, m); printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n", pHba->name); return -ENOMEM; } memset(status, 0, 4); writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]); writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]); writel(0, &msg[2]); writel(0x0106, &msg[3]); /* Transaction context */ writel(4096, &msg[4]); /* Host page frame size */ writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */ writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */ writel(virt_to_bus(status), &msg[7]); writel(m, pHba->post_port); wmb(); // Wait for the reply status to come back do { if (*status) { if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) { break; } } rmb(); if(time_after(jiffies,timeout)){ printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } while (1); // If the command was successful, fill the fifo with our reply // message packets if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) { kfree(status); return -2; } kfree(status); kfree(pHba->reply_pool); pHba->reply_pool = kmalloc(pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4, GFP_KERNEL|ADDR32); if(!pHba->reply_pool){ printk(KERN_ERR"%s: Could not allocate reply pool\n",pHba->name); return -1; } memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4); ptr = pHba->reply_pool; for(i = 0; i < pHba->reply_fifo_size; i++) { outbound_frame = (u32)virt_to_bus(ptr); writel(outbound_frame, pHba->reply_port); wmb(); ptr += REPLY_FRAME_SIZE; } adpt_i2o_status_get(pHba); return 0; } /* * I2O System Table. Contains information about * all the IOPs in the system. Used to inform IOPs * about each other's existence. * * sys_tbl_ver is the CurrentChangeIndicator that is * used by IOPs to track changes. */ static s32 adpt_i2o_status_get(adpt_hba* pHba) { ulong timeout; u32 m; u32 __iomem *msg; u8 *status_block=NULL; ulong status_block_bus; if(pHba->status_block == NULL) { pHba->status_block = (i2o_status_block*) kmalloc(sizeof(i2o_status_block),GFP_KERNEL|ADDR32); if(pHba->status_block == NULL) { printk(KERN_ERR "dpti%d: Get Status Block failed; Out of memory. \n", pHba->unit); return -ENOMEM; } } memset(pHba->status_block, 0, sizeof(i2o_status_block)); status_block = (u8*)(pHba->status_block); status_block_bus = virt_to_bus(pHba->status_block); timeout = jiffies+TMOUT_GETSTATUS*HZ; do { rmb(); m = readl(pHba->post_port); if (m != EMPTY_QUEUE) { break; } if(time_after(jiffies,timeout)){ printk(KERN_ERR "%s: Timeout waiting for message !\n", pHba->name); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } while(m==EMPTY_QUEUE); msg=(u32 __iomem *)(pHba->msg_addr_virt+m); writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]); writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]); writel(1, &msg[2]); writel(0, &msg[3]); writel(0, &msg[4]); writel(0, &msg[5]); writel(((u32)status_block_bus)&0xffffffff, &msg[6]); writel(0, &msg[7]); writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes //post message writel(m, pHba->post_port); wmb(); while(status_block[87]!=0xff){ if(time_after(jiffies,timeout)){ printk(KERN_ERR"dpti%d: Get status timeout.\n", pHba->unit); return -ETIMEDOUT; } rmb(); schedule_timeout_uninterruptible(1); } // Set up our number of outbound and inbound messages pHba->post_fifo_size = pHba->status_block->max_inbound_frames; if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) { pHba->post_fifo_size = MAX_TO_IOP_MESSAGES; } pHba->reply_fifo_size = pHba->status_block->max_outbound_frames; if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) { pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES; } // Calculate the Scatter Gather list size pHba->sg_tablesize = (pHba->status_block->inbound_frame_size * 4 -40)/ sizeof(struct sg_simple_element); if (pHba->sg_tablesize > SG_LIST_ELEMENTS) { pHba->sg_tablesize = SG_LIST_ELEMENTS; } #ifdef DEBUG printk("dpti%d: State = ",pHba->unit); switch(pHba->status_block->iop_state) { case 0x01: printk("INIT\n"); break; case 0x02: printk("RESET\n"); break; case 0x04: printk("HOLD\n"); break; case 0x05: printk("READY\n"); break; case 0x08: printk("OPERATIONAL\n"); break; case 0x10: printk("FAILED\n"); break; case 0x11: printk("FAULTED\n"); break; default: printk("%x (unknown!!)\n",pHba->status_block->iop_state); } #endif return 0; } /* * Get the IOP's Logical Configuration Table */ static int adpt_i2o_lct_get(adpt_hba* pHba) { u32 msg[8]; int ret; u32 buf[16]; if ((pHba->lct_size == 0) || (pHba->lct == NULL)){ pHba->lct_size = pHba->status_block->expected_lct_size; } do { if (pHba->lct == NULL) { pHba->lct = kmalloc(pHba->lct_size, GFP_KERNEL|ADDR32); if(pHba->lct == NULL) { printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n", pHba->name); return -ENOMEM; } } memset(pHba->lct, 0, pHba->lct_size); msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6; msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID; msg[2] = 0; msg[3] = 0; msg[4] = 0xFFFFFFFF; /* All devices */ msg[5] = 0x00000000; /* Report now */ msg[6] = 0xD0000000|pHba->lct_size; msg[7] = virt_to_bus(pHba->lct); if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) { printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n", pHba->name, ret); printk(KERN_ERR"Adaptec: Error Reading Hardware.\n"); return ret; } if ((pHba->lct->table_size << 2) > pHba->lct_size) { pHba->lct_size = pHba->lct->table_size << 2; kfree(pHba->lct); pHba->lct = NULL; } } while (pHba->lct == NULL); PDEBUG("%s: Hardware resource table read.\n", pHba->name); // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO; if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) { pHba->FwDebugBufferSize = buf[1]; pHba->FwDebugBuffer_P = pHba->base_addr_virt + buf[0]; pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P + FW_DEBUG_FLAGS_OFFSET; pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P + FW_DEBUG_BLED_OFFSET; pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1; pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P + FW_DEBUG_STR_LENGTH_OFFSET; pHba->FwDebugBuffer_P += buf[2]; pHba->FwDebugFlags = 0; } return 0; } static int adpt_i2o_build_sys_table(void) { adpt_hba* pHba = NULL; int count = 0; sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs (hba_count) * sizeof(struct i2o_sys_tbl_entry); kfree(sys_tbl); sys_tbl = kmalloc(sys_tbl_len, GFP_KERNEL|ADDR32); if(!sys_tbl) { printk(KERN_WARNING "SysTab Set failed. Out of memory.\n"); return -ENOMEM; } memset(sys_tbl, 0, sys_tbl_len); sys_tbl->num_entries = hba_count; sys_tbl->version = I2OVERSION; sys_tbl->change_ind = sys_tbl_ind++; for(pHba = hba_chain; pHba; pHba = pHba->next) { // Get updated Status Block so we have the latest information if (adpt_i2o_status_get(pHba)) { sys_tbl->num_entries--; continue; // try next one } sys_tbl->iops[count].org_id = pHba->status_block->org_id; sys_tbl->iops[count].iop_id = pHba->unit + 2; sys_tbl->iops[count].seg_num = 0; sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version; sys_tbl->iops[count].iop_state = pHba->status_block->iop_state; sys_tbl->iops[count].msg_type = pHba->status_block->msg_type; sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size; sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ?? sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities; sys_tbl->iops[count].inbound_low = (u32)virt_to_bus(pHba->post_port); sys_tbl->iops[count].inbound_high = (u32)((u64)virt_to_bus(pHba->post_port)>>32); count++; } #ifdef DEBUG { u32 *table = (u32*)sys_tbl; printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2)); for(count = 0; count < (sys_tbl_len >>2); count++) { printk(KERN_INFO "sys_tbl[%d] = %0#10x\n", count, table[count]); } } #endif return 0; } /* * Dump the information block associated with a given unit (TID) */ static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d) { char buf[64]; int unit = d->lct_data.tid; printk(KERN_INFO "TID %3.3d ", unit); if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0) { buf[16]=0; printk(" Vendor: %-12.12s", buf); } if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0) { buf[16]=0; printk(" Device: %-12.12s", buf); } if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0) { buf[8]=0; printk(" Rev: %-12.12s\n", buf); } #ifdef DEBUG printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id)); printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class); printk(KERN_INFO "\tFlags: "); if(d->lct_data.device_flags&(1<<0)) printk("C"); // ConfigDialog requested if(d->lct_data.device_flags&(1<<1)) printk("U"); // Multi-user capable if(!(d->lct_data.device_flags&(1<<4))) printk("P"); // Peer service enabled! if(!(d->lct_data.device_flags&(1<<5))) printk("M"); // Mgmt service enabled! printk("\n"); #endif } #ifdef DEBUG /* * Do i2o class name lookup */ static const char *adpt_i2o_get_class_name(int class) { int idx = 16; static char *i2o_class_name[] = { "Executive", "Device Driver Module", "Block Device", "Tape Device", "LAN Interface", "WAN Interface", "Fibre Channel Port", "Fibre Channel Device", "SCSI Device", "ATE Port", "ATE Device", "Floppy Controller", "Floppy Device", "Secondary Bus Port", "Peer Transport Agent", "Peer Transport", "Unknown" }; switch(class&0xFFF) { case I2O_CLASS_EXECUTIVE: idx = 0; break; case I2O_CLASS_DDM: idx = 1; break; case I2O_CLASS_RANDOM_BLOCK_STORAGE: idx = 2; break; case I2O_CLASS_SEQUENTIAL_STORAGE: idx = 3; break; case I2O_CLASS_LAN: idx = 4; break; case I2O_CLASS_WAN: idx = 5; break; case I2O_CLASS_FIBRE_CHANNEL_PORT: idx = 6; break; case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL: idx = 7; break; case I2O_CLASS_SCSI_PERIPHERAL: idx = 8; break; case I2O_CLASS_ATE_PORT: idx = 9; break; case I2O_CLASS_ATE_PERIPHERAL: idx = 10; break; case I2O_CLASS_FLOPPY_CONTROLLER: idx = 11; break; case I2O_CLASS_FLOPPY_DEVICE: idx = 12; break; case I2O_CLASS_BUS_ADAPTER_PORT: idx = 13; break; case I2O_CLASS_PEER_TRANSPORT_AGENT: idx = 14; break; case I2O_CLASS_PEER_TRANSPORT: idx = 15; break; } return i2o_class_name[idx]; } #endif static s32 adpt_i2o_hrt_get(adpt_hba* pHba) { u32 msg[6]; int ret, size = sizeof(i2o_hrt); do { if (pHba->hrt == NULL) { pHba->hrt=kmalloc(size, GFP_KERNEL|ADDR32); if (pHba->hrt == NULL) { printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name); return -ENOMEM; } } msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4; msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID; msg[2]= 0; msg[3]= 0; msg[4]= (0xD0000000 | size); /* Simple transaction */ msg[5]= virt_to_bus(pHba->hrt); /* Dump it here */ if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) { printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret); return ret; } if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) { size = pHba->hrt->num_entries * pHba->hrt->entry_len << 2; kfree(pHba->hrt); pHba->hrt = NULL; } } while(pHba->hrt == NULL); return 0; } /* * Query one scalar group value or a whole scalar group. */ static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid, int group, int field, void *buf, int buflen) { u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field }; u8 *resblk; int size; /* 8 bytes for header */ resblk = kmalloc(sizeof(u8) * (8+buflen), GFP_KERNEL|ADDR32); if (resblk == NULL) { printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name); return -ENOMEM; } if (field == -1) /* whole group */ opblk[4] = -1; size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid, opblk, sizeof(opblk), resblk, sizeof(u8)*(8+buflen)); if (size == -ETIME) { printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name); return -ETIME; } else if (size == -EINTR) { printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name); return -EINTR; } memcpy(buf, resblk+8, buflen); /* cut off header */ kfree(resblk); if (size < 0) return size; return buflen; } /* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET * * This function can be used for all UtilParamsGet/Set operations. * The OperationBlock is given in opblk-buffer, * and results are returned in resblk-buffer. * Note that the minimum sized resblk is 8 bytes and contains * ResultCount, ErrorInfoSize, BlockStatus and BlockSize. */ static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid, void *opblk, int oplen, void *resblk, int reslen) { u32 msg[9]; u32 *res = (u32 *)resblk; int wait_status; msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5; msg[1] = cmd << 24 | HOST_TID << 12 | tid; msg[2] = 0; msg[3] = 0; msg[4] = 0; msg[5] = 0x54000000 | oplen; /* OperationBlock */ msg[6] = virt_to_bus(opblk); msg[7] = 0xD0000000 | reslen; /* ResultBlock */ msg[8] = virt_to_bus(resblk); if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) { printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk); return wait_status; /* -DetailedStatus */ } if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */ printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, " "BlockStatus = 0x%02x, BlockSize = 0x%04x\n", pHba->name, (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" : "PARAMS_GET", res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF); return -((res[1] >> 16) & 0xFF); /* -BlockStatus */ } return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */ } static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba) { u32 msg[4]; int ret; adpt_i2o_status_get(pHba); /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */ if((pHba->status_block->iop_state != ADAPTER_STATE_READY) && (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){ return 0; } msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID; msg[2] = 0; msg[3] = 0; if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n", pHba->unit, -ret); } else { printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit); } adpt_i2o_status_get(pHba); return ret; } /* * Enable IOP. Allows the IOP to resume external operations. */ static int adpt_i2o_enable_hba(adpt_hba* pHba) { u32 msg[4]; int ret; adpt_i2o_status_get(pHba); if(!pHba->status_block){ return -ENOMEM; } /* Enable only allowed on READY state */ if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL) return 0; if(pHba->status_block->iop_state != ADAPTER_STATE_READY) return -EINVAL; msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0; msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID; msg[2]= 0; msg[3]= 0; if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) { printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n", pHba->name, ret); } else { PDEBUG("%s: Enabled.\n", pHba->name); } adpt_i2o_status_get(pHba); return ret; } static int adpt_i2o_systab_send(adpt_hba* pHba) { u32 msg[12]; int ret; msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6; msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID; msg[2] = 0; msg[3] = 0; msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */ msg[5] = 0; /* Segment 0 */ /* * Provide three SGL-elements: * System table (SysTab), Private memory space declaration and * Private i/o space declaration */ msg[6] = 0x54000000 | sys_tbl_len; msg[7] = virt_to_phys(sys_tbl); msg[8] = 0x54000000 | 0; msg[9] = 0; msg[10] = 0xD4000000 | 0; msg[11] = 0; if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) { printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n", pHba->name, ret); } #ifdef DEBUG else { PINFO("%s: SysTab set.\n", pHba->name); } #endif return ret; } /*============================================================================ * *============================================================================ */ #ifdef UARTDELAY static static void adpt_delay(int millisec) { int i; for (i = 0; i < millisec; i++) { udelay(1000); /* delay for one millisecond */ } } #endif static struct scsi_host_template driver_template = { .name = "dpt_i2o", .proc_name = "dpt_i2o", .proc_info = adpt_proc_info, .detect = adpt_detect, .release = adpt_release, .info = adpt_info, .queuecommand = adpt_queue, .eh_abort_handler = adpt_abort, .eh_device_reset_handler = adpt_device_reset, .eh_bus_reset_handler = adpt_bus_reset, .eh_host_reset_handler = adpt_reset, .bios_param = adpt_bios_param, .slave_configure = adpt_slave_configure, .can_queue = MAX_TO_IOP_MESSAGES, .this_id = 7, .cmd_per_lun = 1, .use_clustering = ENABLE_CLUSTERING, }; #include "scsi_module.c" MODULE_LICENSE("GPL");
gpl-2.0
Talustus/android_kernel_samsung_msm8660-common
arch/arm/mach-msm/board-8930-display.c
38
18583
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/bootmem.h> #include <asm/mach-types.h> #include <mach/msm_bus_board.h> #include <mach/msm_memtypes.h> #include <mach/board.h> #include <mach/gpio.h> #include <mach/gpiomux.h> #include <mach/socinfo.h> #include <linux/ion.h> #include <mach/ion.h> #include "devices.h" /* TODO: Remove this once PM8038 physically becomes * available. */ #ifndef MSM8930_PHASE_2 #include "board-8960.h" #else #include "board-8930.h" #endif #ifdef CONFIG_FB_MSM_TRIPLE_BUFFER #define MSM_FB_PRIM_BUF_SIZE (1376 * 768 * 4 * 3) /* 4 bpp x 3 pages */ #else #define MSM_FB_PRIM_BUF_SIZE (1376 * 768 * 4 * 2) /* 4 bpp x 2 pages */ #endif #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL #define MSM_FB_EXT_BUF_SIZE (1920 * 1088 * 2 * 1) /* 2 bpp x 1 page */ #elif defined(CONFIG_FB_MSM_TVOUT) #define MSM_FB_EXT_BUF_SIZE (720 * 576 * 2 * 2) /* 2 bpp x 2 pages */ #else #define MSM_FB_EXT_BUF_SIZE 0 #endif #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY /* 4 bpp x 2 page HDMI case */ #define MSM_FB_SIZE roundup((1920 * 1088 * 4 * 2), 4096) #else /* Note: must be multiple of 4096 */ #define MSM_FB_SIZE roundup(MSM_FB_PRIM_BUF_SIZE + MSM_FB_EXT_BUF_SIZE, 4096) #endif #ifdef CONFIG_FB_MSM_OVERLAY0_WRITEBACK #define MSM_FB_OVERLAY0_WRITEBACK_SIZE roundup((1376 * 768 * 3 * 2), 4096) #else #define MSM_FB_OVERLAY0_WRITEBACK_SIZE (0) #endif /* CONFIG_FB_MSM_OVERLAY0_WRITEBACK */ #ifdef CONFIG_FB_MSM_OVERLAY1_WRITEBACK #define MSM_FB_OVERLAY1_WRITEBACK_SIZE roundup((1920 * 1088 * 3 * 2), 4096) #else #define MSM_FB_OVERLAY1_WRITEBACK_SIZE (0) #endif /* CONFIG_FB_MSM_OVERLAY1_WRITEBACK */ #define MDP_VSYNC_GPIO 0 #define PANEL_NAME_MAX_LEN 30 #define MIPI_CMD_NOVATEK_QHD_PANEL_NAME "mipi_cmd_novatek_qhd" #define MIPI_VIDEO_NOVATEK_QHD_PANEL_NAME "mipi_video_novatek_qhd" #define MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME "mipi_video_toshiba_wsvga" #define MIPI_VIDEO_CHIMEI_WXGA_PANEL_NAME "mipi_video_chimei_wxga" #define MIPI_VIDEO_SIMULATOR_VGA_PANEL_NAME "mipi_video_simulator_vga" #define MIPI_CMD_RENESAS_FWVGA_PANEL_NAME "mipi_cmd_renesas_fwvga" #define HDMI_PANEL_NAME "hdmi_msm" #define TVOUT_PANEL_NAME "tvout_msm" static struct resource msm_fb_resources[] = { { .flags = IORESOURCE_DMA, } }; static int msm_fb_detect_panel(const char *name) { if (!strncmp(name, MIPI_CMD_NOVATEK_QHD_PANEL_NAME, strnlen(MIPI_CMD_NOVATEK_QHD_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; #ifndef CONFIG_FB_MSM_MIPI_PANEL_DETECT if (!strncmp(name, MIPI_VIDEO_NOVATEK_QHD_PANEL_NAME, strnlen(MIPI_VIDEO_NOVATEK_QHD_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; if (!strncmp(name, MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME, strnlen(MIPI_VIDEO_TOSHIBA_WSVGA_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; if (!strncmp(name, MIPI_VIDEO_SIMULATOR_VGA_PANEL_NAME, strnlen(MIPI_VIDEO_SIMULATOR_VGA_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; if (!strncmp(name, MIPI_CMD_RENESAS_FWVGA_PANEL_NAME, strnlen(MIPI_CMD_RENESAS_FWVGA_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; #endif if (!strncmp(name, HDMI_PANEL_NAME, strnlen(HDMI_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; if (!strncmp(name, TVOUT_PANEL_NAME, strnlen(TVOUT_PANEL_NAME, PANEL_NAME_MAX_LEN))) return 0; pr_warning("%s: not supported '%s'", __func__, name); return -ENODEV; } static struct msm_fb_platform_data msm_fb_pdata = { .detect_client = msm_fb_detect_panel, }; static struct platform_device msm_fb_device = { .name = "msm_fb", .id = 0, .num_resources = ARRAY_SIZE(msm_fb_resources), .resource = msm_fb_resources, .dev.platform_data = &msm_fb_pdata, }; static bool dsi_power_on; /* * TODO: When physical 8930/PM8038 hardware becomes * available, replace mipi_dsi_cdp_panel_power with * appropriate function. */ #define DISP_RST_GPIO 58 static int mipi_dsi_cdp_panel_power(int on) { static struct regulator *reg_l8, *reg_l23, *reg_l2; int rc; pr_debug("%s: state : %d\n", __func__, on); if (!dsi_power_on) { reg_l8 = regulator_get(&msm_mipi_dsi1_device.dev, "dsi_vdc"); if (IS_ERR(reg_l8)) { pr_err("could not get 8038_l8, rc = %ld\n", PTR_ERR(reg_l8)); return -ENODEV; } reg_l23 = regulator_get(&msm_mipi_dsi1_device.dev, "dsi_vddio"); if (IS_ERR(reg_l23)) { pr_err("could not get 8038_l23, rc = %ld\n", PTR_ERR(reg_l23)); return -ENODEV; } reg_l2 = regulator_get(&msm_mipi_dsi1_device.dev, "dsi_vdda"); if (IS_ERR(reg_l2)) { pr_err("could not get 8038_l2, rc = %ld\n", PTR_ERR(reg_l2)); return -ENODEV; } rc = regulator_set_voltage(reg_l8, 2800000, 3000000); if (rc) { pr_err("set_voltage l8 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_set_voltage(reg_l23, 1800000, 1800000); if (rc) { pr_err("set_voltage l23 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_set_voltage(reg_l2, 1200000, 1200000); if (rc) { pr_err("set_voltage l2 failed, rc=%d\n", rc); return -EINVAL; } rc = gpio_request(DISP_RST_GPIO, "disp_rst_n"); if (rc) { pr_err("request gpio DISP_RST_GPIO failed, rc=%d\n", rc); gpio_free(DISP_RST_GPIO); return -ENODEV; } dsi_power_on = true; } if (on) { rc = regulator_set_optimum_mode(reg_l8, 100000); if (rc < 0) { pr_err("set_optimum_mode l8 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_set_optimum_mode(reg_l23, 100000); if (rc < 0) { pr_err("set_optimum_mode l23 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_set_optimum_mode(reg_l2, 100000); if (rc < 0) { pr_err("set_optimum_mode l2 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_enable(reg_l8); if (rc) { pr_err("enable l8 failed, rc=%d\n", rc); return -ENODEV; } rc = regulator_enable(reg_l23); if (rc) { pr_err("enable l8 failed, rc=%d\n", rc); return -ENODEV; } rc = regulator_enable(reg_l2); if (rc) { pr_err("enable l2 failed, rc=%d\n", rc); return -ENODEV; } gpio_set_value(DISP_RST_GPIO, 1); } else { gpio_set_value(DISP_RST_GPIO, 0); rc = regulator_disable(reg_l2); if (rc) { pr_err("disable reg_l2 failed, rc=%d\n", rc); return -ENODEV; } rc = regulator_disable(reg_l8); if (rc) { pr_err("disable reg_l8 failed, rc=%d\n", rc); return -ENODEV; } rc = regulator_disable(reg_l23); if (rc) { pr_err("disable reg_l23 failed, rc=%d\n", rc); return -ENODEV; } rc = regulator_set_optimum_mode(reg_l8, 100); if (rc < 0) { pr_err("set_optimum_mode l8 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_set_optimum_mode(reg_l23, 100); if (rc < 0) { pr_err("set_optimum_mode l23 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_set_optimum_mode(reg_l2, 100); if (rc < 0) { pr_err("set_optimum_mode l2 failed, rc=%d\n", rc); return -EINVAL; } } return 0; } static int mipi_dsi_panel_power(int on) { pr_debug("%s: on=%d\n", __func__, on); return mipi_dsi_cdp_panel_power(on); } static struct mipi_dsi_platform_data mipi_dsi_pdata = { .vsync_gpio = MDP_VSYNC_GPIO, .dsi_power_save = mipi_dsi_panel_power, }; #ifdef CONFIG_MSM_BUS_SCALING static struct msm_bus_vectors mdp_init_vectors[] = { { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY static struct msm_bus_vectors hdmi_as_primary_vectors[] = { /* If HDMI is used as primary */ { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 2000000000, .ib = 2000000000, }, }; static struct msm_bus_paths mdp_bus_scale_usecases[] = { { ARRAY_SIZE(mdp_init_vectors), mdp_init_vectors, }, { ARRAY_SIZE(hdmi_as_primary_vectors), hdmi_as_primary_vectors, }, { ARRAY_SIZE(hdmi_as_primary_vectors), hdmi_as_primary_vectors, }, { ARRAY_SIZE(hdmi_as_primary_vectors), hdmi_as_primary_vectors, }, { ARRAY_SIZE(hdmi_as_primary_vectors), hdmi_as_primary_vectors, }, { ARRAY_SIZE(hdmi_as_primary_vectors), hdmi_as_primary_vectors, }, }; #else static struct msm_bus_vectors mdp_ui_vectors[] = { { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 216000000 * 2, .ib = 270000000 * 2, }, }; static struct msm_bus_vectors mdp_vga_vectors[] = { /* VGA and less video */ { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 216000000 * 2, .ib = 270000000 * 2, }, }; static struct msm_bus_vectors mdp_720p_vectors[] = { /* 720p and less video */ { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 230400000 * 2, .ib = 288000000 * 2, }, }; static struct msm_bus_vectors mdp_1080p_vectors[] = { /* 1080p and less video */ { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 334080000 * 2, .ib = 417600000 * 2, }, }; static struct msm_bus_paths mdp_bus_scale_usecases[] = { { ARRAY_SIZE(mdp_init_vectors), mdp_init_vectors, }, { ARRAY_SIZE(mdp_ui_vectors), mdp_ui_vectors, }, { ARRAY_SIZE(mdp_ui_vectors), mdp_ui_vectors, }, { ARRAY_SIZE(mdp_vga_vectors), mdp_vga_vectors, }, { ARRAY_SIZE(mdp_720p_vectors), mdp_720p_vectors, }, { ARRAY_SIZE(mdp_1080p_vectors), mdp_1080p_vectors, }, }; #endif static struct msm_bus_scale_pdata mdp_bus_scale_pdata = { mdp_bus_scale_usecases, ARRAY_SIZE(mdp_bus_scale_usecases), .name = "mdp", }; #endif #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY static int mdp_core_clk_rate_table[] = { 200000000, 200000000, 200000000, 200000000, }; #else static int mdp_core_clk_rate_table[] = { 85330000, 85330000, 160000000, 200000000, }; #endif static struct msm_panel_common_pdata mdp_pdata = { .gpio = MDP_VSYNC_GPIO, #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY .mdp_core_clk_rate = 200000000, #else .mdp_core_clk_rate = 85330000, #endif .mdp_core_clk_table = mdp_core_clk_rate_table, .num_mdp_clk = ARRAY_SIZE(mdp_core_clk_rate_table), #ifdef CONFIG_MSM_BUS_SCALING .mdp_bus_scale_table = &mdp_bus_scale_pdata, #endif .mdp_rev = MDP_REV_42, #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION .mem_hid = ION_CP_MM_HEAP_ID, #else .mem_hid = MEMTYPE_EBI1, #endif }; void __init msm8930_mdp_writeback(struct memtype_reserve* reserve_table) { mdp_pdata.ov0_wb_size = MSM_FB_OVERLAY0_WRITEBACK_SIZE; mdp_pdata.ov1_wb_size = MSM_FB_OVERLAY1_WRITEBACK_SIZE; #if defined(CONFIG_ANDROID_PMEM) && !defined(CONFIG_MSM_MULTIMEDIA_USE_ION) reserve_table[mdp_pdata.mem_hid].size += mdp_pdata.ov0_wb_size; reserve_table[mdp_pdata.mem_hid].size += mdp_pdata.ov1_wb_size; #endif } #define LPM_CHANNEL0 0 static int toshiba_gpio[] = {LPM_CHANNEL0}; static struct mipi_dsi_panel_platform_data toshiba_pdata = { .gpio = toshiba_gpio, }; static struct platform_device mipi_dsi_toshiba_panel_device = { .name = "mipi_toshiba", .id = 0, .dev = { .platform_data = &toshiba_pdata, } }; #define FPGA_3D_GPIO_CONFIG_ADDR 0xB5 static struct mipi_dsi_phy_ctrl dsi_novatek_cmd_mode_phy_db = { /* DSI_BIT_CLK at 500MHz, 2 lane, RGB888 */ {0x0F, 0x0a, 0x04, 0x00, 0x20}, /* regulator */ /* timing */ {0xab, 0x8a, 0x18, 0x00, 0x92, 0x97, 0x1b, 0x8c, 0x0c, 0x03, 0x04, 0xa0}, {0x5f, 0x00, 0x00, 0x10}, /* phy ctrl */ {0xff, 0x00, 0x06, 0x00}, /* strength */ /* pll control */ {0x40, 0xf9, 0x30, 0xda, 0x00, 0x40, 0x03, 0x62, 0x40, 0x07, 0x03, 0x00, 0x1a, 0x00, 0x00, 0x02, 0x00, 0x20, 0x00, 0x01}, }; static struct mipi_dsi_panel_platform_data novatek_pdata = { .fpga_3d_config_addr = FPGA_3D_GPIO_CONFIG_ADDR, .fpga_ctrl_mode = FPGA_SPI_INTF, .phy_ctrl_settings = &dsi_novatek_cmd_mode_phy_db, }; static struct platform_device mipi_dsi_novatek_panel_device = { .name = "mipi_novatek", .id = 0, .dev = { .platform_data = &novatek_pdata, } }; #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL static struct resource hdmi_msm_resources[] = { { .name = "hdmi_msm_qfprom_addr", .start = 0x00700000, .end = 0x007060FF, .flags = IORESOURCE_MEM, }, { .name = "hdmi_msm_hdmi_addr", .start = 0x04A00000, .end = 0x04A00FFF, .flags = IORESOURCE_MEM, }, { .name = "hdmi_msm_irq", .start = HDMI_IRQ, .end = HDMI_IRQ, .flags = IORESOURCE_IRQ, }, }; static int hdmi_enable_5v(int on); static int hdmi_core_power(int on, int show); static int hdmi_cec_power(int on); static struct msm_hdmi_platform_data hdmi_msm_data = { .irq = HDMI_IRQ, .enable_5v = hdmi_enable_5v, .core_power = hdmi_core_power, .cec_power = hdmi_cec_power, }; static struct platform_device hdmi_msm_device = { .name = "hdmi_msm", .id = 0, .num_resources = ARRAY_SIZE(hdmi_msm_resources), .resource = hdmi_msm_resources, .dev.platform_data = &hdmi_msm_data, }; #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL */ #ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL static struct platform_device wfd_panel_device = { .name = "wfd_panel", .id = 0, .dev.platform_data = NULL, }; static struct platform_device wfd_device = { .name = "msm_wfd", .id = -1, }; #endif #ifdef CONFIG_MSM_BUS_SCALING static struct msm_bus_vectors dtv_bus_init_vectors[] = { { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; #ifdef CONFIG_FB_MSM_HDMI_AS_PRIMARY static struct msm_bus_vectors dtv_bus_def_vectors[] = { { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 2000000000, .ib = 2000000000, }, }; #else static struct msm_bus_vectors dtv_bus_def_vectors[] = { { .src = MSM_BUS_MASTER_MDP_PORT0, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 566092800 * 2, .ib = 707616000 * 2, }, }; #endif static struct msm_bus_paths dtv_bus_scale_usecases[] = { { ARRAY_SIZE(dtv_bus_init_vectors), dtv_bus_init_vectors, }, { ARRAY_SIZE(dtv_bus_def_vectors), dtv_bus_def_vectors, }, }; static struct msm_bus_scale_pdata dtv_bus_scale_pdata = { dtv_bus_scale_usecases, ARRAY_SIZE(dtv_bus_scale_usecases), .name = "dtv", }; static struct lcdc_platform_data dtv_pdata = { .bus_scale_table = &dtv_bus_scale_pdata, }; #endif #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL static int hdmi_enable_5v(int on) { static struct regulator *reg_ext_5v; /* HDMI_5V */ static int prev_on; int rc; if (on == prev_on) return 0; if (!reg_ext_5v) reg_ext_5v = regulator_get(&hdmi_msm_device.dev, "hdmi_mvs"); if (on) { rc = regulator_enable(reg_ext_5v); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "reg_ext_5v", rc); return rc; } pr_debug("%s(on): success\n", __func__); } else { rc = regulator_disable(reg_ext_5v); if (rc) pr_warning("'%s' regulator disable failed, rc=%d\n", "reg_ext_5v", rc); pr_debug("%s(off): success\n", __func__); } prev_on = on; return 0; } static int hdmi_core_power(int on, int show) { /* Both HDMI "avdd" and "vcc" are powered by 8038_l23 regulator */ static struct regulator *reg_8038_l23; static int prev_on; int rc; if (on == prev_on) return 0; if (!reg_8038_l23) { reg_8038_l23 = regulator_get(&hdmi_msm_device.dev, "hdmi_avdd"); if (IS_ERR(reg_8038_l23)) { pr_err("could not get reg_8038_l23, rc = %ld\n", PTR_ERR(reg_8038_l23)); return -ENODEV; } rc = regulator_set_voltage(reg_8038_l23, 1800000, 1800000); if (rc) { pr_err("set_voltage failed for 8921_l23, rc=%d\n", rc); return -EINVAL; } } if (on) { rc = regulator_set_optimum_mode(reg_8038_l23, 100000); if (rc < 0) { pr_err("set_optimum_mode l23 failed, rc=%d\n", rc); return -EINVAL; } rc = regulator_enable(reg_8038_l23); if (rc) { pr_err("'%s' regulator enable failed, rc=%d\n", "hdmi_avdd", rc); return rc; } rc = gpio_request(100, "HDMI_DDC_CLK"); if (rc) { pr_err("'%s'(%d) gpio_request failed, rc=%d\n", "HDMI_DDC_CLK", 100, rc); goto error1; } rc = gpio_request(101, "HDMI_DDC_DATA"); if (rc) { pr_err("'%s'(%d) gpio_request failed, rc=%d\n", "HDMI_DDC_DATA", 101, rc); goto error2; } rc = gpio_request(102, "HDMI_HPD"); if (rc) { pr_err("'%s'(%d) gpio_request failed, rc=%d\n", "HDMI_HPD", 102, rc); goto error3; } pr_debug("%s(on): success\n", __func__); } else { gpio_free(100); gpio_free(101); gpio_free(102); rc = regulator_disable(reg_8038_l23); if (rc) { pr_err("disable reg_8038_l23 failed, rc=%d\n", rc); return -ENODEV; } rc = regulator_set_optimum_mode(reg_8038_l23, 100); if (rc < 0) { pr_err("set_optimum_mode l23 failed, rc=%d\n", rc); return -EINVAL; } pr_debug("%s(off): success\n", __func__); } prev_on = on; return 0; error3: gpio_free(101); error2: gpio_free(100); error1: regulator_disable(reg_8038_l23); return rc; } static int hdmi_cec_power(int on) { static int prev_on; int rc; if (on == prev_on) return 0; if (on) { rc = gpio_request(99, "HDMI_CEC_VAR"); if (rc) { pr_err("'%s'(%d) gpio_request failed, rc=%d\n", "HDMI_CEC_VAR", 99, rc); goto error; } pr_debug("%s(on): success\n", __func__); } else { gpio_free(99); pr_debug("%s(off): success\n", __func__); } prev_on = on; return 0; error: return rc; } #endif /* CONFIG_FB_MSM_HDMI_MSM_PANEL */ void __init msm8930_init_fb(void) { platform_device_register(&msm_fb_device); #ifdef CONFIG_FB_MSM_WRITEBACK_MSM_PANEL platform_device_register(&wfd_panel_device); platform_device_register(&wfd_device); #endif platform_device_register(&mipi_dsi_novatek_panel_device); #ifdef CONFIG_FB_MSM_HDMI_MSM_PANEL if (!cpu_is_msm8930()) platform_device_register(&hdmi_msm_device); #endif platform_device_register(&mipi_dsi_toshiba_panel_device); msm_fb_register_device("mdp", &mdp_pdata); msm_fb_register_device("mipi_dsi", &mipi_dsi_pdata); #ifdef CONFIG_MSM_BUS_SCALING msm_fb_register_device("dtv", &dtv_pdata); #endif } void __init msm8930_allocate_fb_region(void) { void *addr; unsigned long size; size = MSM_FB_SIZE; addr = alloc_bootmem_align(size, 0x1000); msm_fb_resources[0].start = __pa(addr); msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1; pr_info("allocating %lu bytes at %p (%lx physical) for fb\n", size, addr, __pa(addr)); }
gpl-2.0
TeamEpsilon/linux-3.8-test_context
drivers/pci/pci-driver.c
38
31948
/* * drivers/pci/pci-driver.c * * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com> * (C) Copyright 2007 Novell Inc. * * Released under the GPL v2 only. * */ #include <linux/pci.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/mempolicy.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/cpu.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> #include "pci.h" struct pci_dynid { struct list_head node; struct pci_device_id id; }; /** * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices * @drv: target pci driver * @vendor: PCI vendor ID * @device: PCI device ID * @subvendor: PCI subvendor ID * @subdevice: PCI subdevice ID * @class: PCI class * @class_mask: PCI class mask * @driver_data: private driver data * * Adds a new dynamic pci device ID to this driver and causes the * driver to probe for all devices again. @drv must have been * registered prior to calling this function. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * 0 on success, -errno on failure. */ int pci_add_dynid(struct pci_driver *drv, unsigned int vendor, unsigned int device, unsigned int subvendor, unsigned int subdevice, unsigned int class, unsigned int class_mask, unsigned long driver_data) { struct pci_dynid *dynid; int retval; dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; dynid->id.vendor = vendor; dynid->id.device = device; dynid->id.subvendor = subvendor; dynid->id.subdevice = subdevice; dynid->id.class = class; dynid->id.class_mask = class_mask; dynid->id.driver_data = driver_data; spin_lock(&drv->dynids.lock); list_add_tail(&dynid->node, &drv->dynids.list); spin_unlock(&drv->dynids.lock); retval = driver_attach(&drv->driver); return retval; } static void pci_free_dynids(struct pci_driver *drv) { struct pci_dynid *dynid, *n; spin_lock(&drv->dynids.lock); list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { list_del(&dynid->node); kfree(dynid); } spin_unlock(&drv->dynids.lock); } /** * store_new_id - sysfs frontend to pci_add_dynid() * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size * * Allow PCI IDs to be added to an existing driver via sysfs. */ static ssize_t store_new_id(struct device_driver *driver, const char *buf, size_t count) { struct pci_driver *pdrv = to_pci_driver(driver); const struct pci_device_id *ids = pdrv->id_table; __u32 vendor, device, subvendor=PCI_ANY_ID, subdevice=PCI_ANY_ID, class=0, class_mask=0; unsigned long driver_data=0; int fields=0; int retval; fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, &class, &class_mask, &driver_data); if (fields < 2) return -EINVAL; /* Only accept driver_data values that match an existing id_table entry */ if (ids) { retval = -EINVAL; while (ids->vendor || ids->subvendor || ids->class_mask) { if (driver_data == ids->driver_data) { retval = 0; break; } ids++; } if (retval) /* No match */ return retval; } retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice, class, class_mask, driver_data); if (retval) return retval; return count; } /** * store_remove_id - remove a PCI device ID from this driver * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size * * Removes a dynamic pci device ID to this driver. */ static ssize_t store_remove_id(struct device_driver *driver, const char *buf, size_t count) { struct pci_dynid *dynid, *n; struct pci_driver *pdrv = to_pci_driver(driver); __u32 vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class = 0, class_mask = 0; int fields = 0; int retval = -ENODEV; fields = sscanf(buf, "%x %x %x %x %x %x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); if (fields < 2) return -EINVAL; spin_lock(&pdrv->dynids.lock); list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) { struct pci_device_id *id = &dynid->id; if ((id->vendor == vendor) && (id->device == device) && (subvendor == PCI_ANY_ID || id->subvendor == subvendor) && (subdevice == PCI_ANY_ID || id->subdevice == subdevice) && !((id->class ^ class) & class_mask)) { list_del(&dynid->node); kfree(dynid); retval = 0; break; } } spin_unlock(&pdrv->dynids.lock); if (retval) return retval; return count; } static struct driver_attribute pci_drv_attrs[] = { __ATTR(new_id, S_IWUSR, NULL, store_new_id), __ATTR(remove_id, S_IWUSR, NULL, store_remove_id), __ATTR_NULL, }; /** * pci_match_id - See if a pci device matches a given pci_id table * @ids: array of PCI device id structures to search in * @dev: the PCI device structure to match against. * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching * pci_device_id structure or %NULL if there is no match. * * Deprecated, don't use this as it will not catch any dynamic ids * that a driver might want to check for. */ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev) { if (ids) { while (ids->vendor || ids->subvendor || ids->class_mask) { if (pci_match_one_device(ids, dev)) return ids; ids++; } } return NULL; } /** * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure * @drv: the PCI driver to match against * @dev: the PCI device structure to match against * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching * pci_device_id structure or %NULL if there is no match. */ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev) { struct pci_dynid *dynid; /* Look at the dynamic ids first, before the static ones */ spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (pci_match_one_device(&dynid->id, dev)) { spin_unlock(&drv->dynids.lock); return &dynid->id; } } spin_unlock(&drv->dynids.lock); return pci_match_id(drv->id_table, dev); } struct drv_dev_and_id { struct pci_driver *drv; struct pci_dev *dev; const struct pci_device_id *id; }; static long local_pci_probe(void *_ddi) { struct drv_dev_and_id *ddi = _ddi; struct pci_dev *pci_dev = ddi->dev; struct pci_driver *pci_drv = ddi->drv; struct device *dev = &pci_dev->dev; int rc; /* * Unbound PCI devices are always put in D0, regardless of * runtime PM status. During probe, the device is set to * active and the usage count is incremented. If the driver * supports runtime PM, it should call pm_runtime_put_noidle() * in its probe routine and pm_runtime_get_noresume() in its * remove routine. */ pm_runtime_get_sync(dev); pci_dev->driver = pci_drv; rc = pci_drv->probe(pci_dev, ddi->id); if (rc) { pci_dev->driver = NULL; pm_runtime_put_sync(dev); } return rc; } static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, const struct pci_device_id *id) { int error, node; struct drv_dev_and_id ddi = { drv, dev, id }; /* Execute driver initialization on node where the device's bus is attached to. This way the driver likely allocates its local memory on the right node without any need to change it. */ node = dev_to_node(&dev->dev); if (node >= 0) { int cpu; get_online_cpus(); cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); if (cpu < nr_cpu_ids) error = work_on_cpu(cpu, local_pci_probe, &ddi); else error = local_pci_probe(&ddi); put_online_cpus(); } else error = local_pci_probe(&ddi); return error; } /** * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device * @pci_dev: PCI device being probed * * returns 0 on success, else error. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. */ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) { const struct pci_device_id *id; int error = 0; if (!pci_dev->driver && drv->probe) { error = -ENODEV; id = pci_match_device(drv, pci_dev); if (id) error = pci_call_probe(drv, pci_dev, id); if (error >= 0) error = 0; } return error; } static int pci_device_probe(struct device * dev) { int error = 0; struct pci_driver *drv; struct pci_dev *pci_dev; drv = to_pci_driver(dev->driver); pci_dev = to_pci_dev(dev); pci_dev_get(pci_dev); error = __pci_device_probe(drv, pci_dev); if (error) pci_dev_put(pci_dev); return error; } static int pci_device_remove(struct device * dev) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; if (drv) { if (drv->remove) { pm_runtime_get_sync(dev); drv->remove(pci_dev); pm_runtime_put_noidle(dev); } pci_dev->driver = NULL; } /* Undo the runtime PM settings in local_pci_probe() */ pm_runtime_put_sync(dev); /* * If the device is still on, set the power state as "unknown", * since it might change by the next time we load the driver. */ if (pci_dev->current_state == PCI_D0) pci_dev->current_state = PCI_UNKNOWN; /* * We would love to complain here if pci_dev->is_enabled is set, that * the driver should have called pci_disable_device(), but the * unfortunate fact is there are too many odd BIOS and bridge setups * that don't like drivers doing that all of the time. * Oh well, we can dream of sane hardware when we sleep, no matter how * horrible the crap we have to deal with is when we are awake... */ pci_dev_put(pci_dev); return 0; } static void pci_device_shutdown(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; pm_runtime_resume(dev); if (drv && drv->shutdown) drv->shutdown(pci_dev); pci_msi_shutdown(pci_dev); pci_msix_shutdown(pci_dev); /* * Turn off Bus Master bit on the device to tell it to not * continue to do DMA */ pci_disable_device(pci_dev); } #ifdef CONFIG_PM /* Auxiliary functions used for system resume and run-time resume. */ /** * pci_restore_standard_config - restore standard config registers of PCI device * @pci_dev: PCI device to handle */ static int pci_restore_standard_config(struct pci_dev *pci_dev) { pci_update_current_state(pci_dev, PCI_UNKNOWN); if (pci_dev->current_state != PCI_D0) { int error = pci_set_power_state(pci_dev, PCI_D0); if (error) return error; } pci_restore_state(pci_dev); return 0; } #endif #ifdef CONFIG_PM_SLEEP static void pci_pm_default_resume_early(struct pci_dev *pci_dev) { pci_power_up(pci_dev); pci_restore_state(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); } /* * Default "suspend" method for devices that have no driver provided suspend, * or not even a driver at all (second part). */ static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) { /* * mark its power state as "unknown", since we don't know if * e.g. the BIOS will change its device state when we suspend. */ if (pci_dev->current_state == PCI_D0) pci_dev->current_state = PCI_UNKNOWN; } /* * Default "resume" method for devices that have no driver provided resume, * or not even a driver at all (second part). */ static int pci_pm_reenable_device(struct pci_dev *pci_dev) { int retval; /* if the device was enabled before suspend, reenable */ retval = pci_reenable_device(pci_dev); /* * if the device was busmaster before the suspend, make it busmaster * again */ if (pci_dev->is_busmaster) pci_set_master(pci_dev); return retval; } static int pci_legacy_suspend(struct device *dev, pm_message_t state) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; if (drv && drv->suspend) { pci_power_t prev = pci_dev->current_state; int error; error = drv->suspend(pci_dev, state); suspend_report_result(drv->suspend, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: Device state not saved by %pF\n", drv->suspend); } } pci_fixup_device(pci_fixup_suspend, pci_dev); return 0; } static int pci_legacy_suspend_late(struct device *dev, pm_message_t state) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; if (drv && drv->suspend_late) { pci_power_t prev = pci_dev->current_state; int error; error = drv->suspend_late(pci_dev, state); suspend_report_result(drv->suspend_late, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: Device state not saved by %pF\n", drv->suspend_late); return 0; } } if (!pci_dev->state_saved) pci_save_state(pci_dev); pci_pm_set_unknown_state(pci_dev); return 0; } static int pci_legacy_resume_early(struct device *dev) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; return drv && drv->resume_early ? drv->resume_early(pci_dev) : 0; } static int pci_legacy_resume(struct device *dev) { struct pci_dev * pci_dev = to_pci_dev(dev); struct pci_driver * drv = pci_dev->driver; pci_fixup_device(pci_fixup_resume, pci_dev); return drv && drv->resume ? drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev); } /* Auxiliary functions used by the new power management framework */ static void pci_pm_default_resume(struct pci_dev *pci_dev) { pci_fixup_device(pci_fixup_resume, pci_dev); if (!pci_is_bridge(pci_dev)) pci_enable_wake(pci_dev, PCI_D0, false); } static void pci_pm_default_suspend(struct pci_dev *pci_dev) { /* Disable non-bridge devices without PM support */ if (!pci_is_bridge(pci_dev)) pci_disable_enabled_device(pci_dev); } static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) { struct pci_driver *drv = pci_dev->driver; bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume || drv->resume_early); /* * Legacy PM support is used by default, so warn if the new framework is * supported as well. Drivers are supposed to support either the * former, or the latter, but not both at the same time. */ WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n", drv->name, pci_dev->vendor, pci_dev->device); return ret; } /* New power management framework */ static int pci_pm_prepare(struct device *dev) { struct device_driver *drv = dev->driver; int error = 0; /* * PCI devices suspended at run time need to be resumed at this * point, because in general it is necessary to reconfigure them for * system suspend. Namely, if the device is supposed to wake up the * system from the sleep state, we may need to reconfigure it for this * purpose. In turn, if the device is not supposed to wake up the * system from the sleep state, we'll have to prevent it from signaling * wake-up. */ pm_runtime_resume(dev); if (drv && drv->pm && drv->pm->prepare) error = drv->pm->prepare(dev); return error; } static void pci_pm_complete(struct device *dev) { struct device_driver *drv = dev->driver; if (drv && drv->pm && drv->pm->complete) drv->pm->complete(dev); } #else /* !CONFIG_PM_SLEEP */ #define pci_pm_prepare NULL #define pci_pm_complete NULL #endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND static int pci_pm_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); if (!pm) { pci_pm_default_suspend(pci_dev); goto Fixup; } pci_dev->state_saved = false; if (pm->suspend) { pci_power_t prev = pci_dev->current_state; int error; error = pm->suspend(dev); suspend_report_result(pm->suspend, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: State of device not saved by %pF\n", pm->suspend); } } Fixup: pci_fixup_device(pci_fixup_suspend, pci_dev); return 0; } static int pci_pm_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev, PMSG_SUSPEND); if (!pm) { pci_save_state(pci_dev); return 0; } if (pm->suspend_noirq) { pci_power_t prev = pci_dev->current_state; int error; error = pm->suspend_noirq(dev); suspend_report_result(pm->suspend_noirq, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: State of device not saved by %pF\n", pm->suspend_noirq); return 0; } } if (!pci_dev->state_saved) { pci_save_state(pci_dev); if (!pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); } pci_pm_set_unknown_state(pci_dev); /* * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's * PCI COMMAND register isn't 0, the BIOS assumes that the controller * hasn't been quiesced and tries to turn it off. If the controller * is already in D3, this can hang or cause memory corruption. * * Since the value of the COMMAND register doesn't matter once the * device has been suspended, we can safely set it to 0 here. */ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) pci_write_config_word(pci_dev, PCI_COMMAND, 0); return 0; } static int pci_pm_resume_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; pci_pm_default_resume_early(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); if (drv && drv->pm && drv->pm->resume_noirq) error = drv->pm->resume_noirq(dev); return error; } static int pci_pm_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; /* * This is necessary for the suspend error path in which resume is * called without restoring the standard config registers of the device. */ if (pci_dev->state_saved) pci_restore_standard_config(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); pci_pm_default_resume(pci_dev); if (pm) { if (pm->resume) error = pm->resume(dev); } else { pci_pm_reenable_device(pci_dev); } return error; } #else /* !CONFIG_SUSPEND */ #define pci_pm_suspend NULL #define pci_pm_suspend_noirq NULL #define pci_pm_resume NULL #define pci_pm_resume_noirq NULL #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS static int pci_pm_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_FREEZE); if (!pm) { pci_pm_default_suspend(pci_dev); return 0; } pci_dev->state_saved = false; if (pm->freeze) { int error; error = pm->freeze(dev); suspend_report_result(pm->freeze, error); if (error) return error; } return 0; } static int pci_pm_freeze_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev, PMSG_FREEZE); if (drv && drv->pm && drv->pm->freeze_noirq) { int error; error = drv->pm->freeze_noirq(dev); suspend_report_result(drv->pm->freeze_noirq, error); if (error) return error; } if (!pci_dev->state_saved) pci_save_state(pci_dev); pci_pm_set_unknown_state(pci_dev); return 0; } static int pci_pm_thaw_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); pci_update_current_state(pci_dev, PCI_D0); if (drv && drv->pm && drv->pm->thaw_noirq) error = drv->pm->thaw_noirq(dev); return error; } static int pci_pm_thaw(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); if (pm) { if (pm->thaw) error = pm->thaw(dev); } else { pci_pm_reenable_device(pci_dev); } pci_dev->state_saved = false; return error; } static int pci_pm_poweroff(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_HIBERNATE); if (!pm) { pci_pm_default_suspend(pci_dev); goto Fixup; } pci_dev->state_saved = false; if (pm->poweroff) { int error; error = pm->poweroff(dev); suspend_report_result(pm->poweroff, error); if (error) return error; } Fixup: pci_fixup_device(pci_fixup_suspend, pci_dev); return 0; } static int pci_pm_poweroff_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; if (pci_has_legacy_pm_support(to_pci_dev(dev))) return pci_legacy_suspend_late(dev, PMSG_HIBERNATE); if (!drv || !drv->pm) return 0; if (drv->pm->poweroff_noirq) { int error; error = drv->pm->poweroff_noirq(dev); suspend_report_result(drv->pm->poweroff_noirq, error); if (error) return error; } if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); /* * The reason for doing this here is the same as for the analogous code * in pci_pm_suspend_noirq(). */ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) pci_write_config_word(pci_dev, PCI_COMMAND, 0); return 0; } static int pci_pm_restore_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct device_driver *drv = dev->driver; int error = 0; pci_pm_default_resume_early(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume_early(dev); if (drv && drv->pm && drv->pm->restore_noirq) error = drv->pm->restore_noirq(dev); return error; } static int pci_pm_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; /* * This is necessary for the hibernation error path in which restore is * called without restoring the standard config registers of the device. */ if (pci_dev->state_saved) pci_restore_standard_config(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); pci_pm_default_resume(pci_dev); if (pm) { if (pm->restore) error = pm->restore(dev); } else { pci_pm_reenable_device(pci_dev); } return error; } #else /* !CONFIG_HIBERNATE_CALLBACKS */ #define pci_pm_freeze NULL #define pci_pm_freeze_noirq NULL #define pci_pm_thaw NULL #define pci_pm_thaw_noirq NULL #define pci_pm_poweroff NULL #define pci_pm_poweroff_noirq NULL #define pci_pm_restore NULL #define pci_pm_restore_noirq NULL #endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM_RUNTIME static int pci_pm_runtime_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; pci_power_t prev = pci_dev->current_state; int error; /* * If pci_dev->driver is not set (unbound), the device should * always remain in D0 regardless of the runtime PM status */ if (!pci_dev->driver) return 0; if (!pm || !pm->runtime_suspend) return -ENOSYS; pci_dev->state_saved = false; pci_dev->no_d3cold = false; error = pm->runtime_suspend(dev); suspend_report_result(pm->runtime_suspend, error); if (error) return error; if (!pci_dev->d3cold_allowed) pci_dev->no_d3cold = true; pci_fixup_device(pci_fixup_suspend, pci_dev); if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { WARN_ONCE(pci_dev->current_state != prev, "PCI PM: State of device not saved by %pF\n", pm->runtime_suspend); return 0; } if (!pci_dev->state_saved) { pci_save_state(pci_dev); pci_finish_runtime_suspend(pci_dev); } return 0; } static int pci_pm_runtime_resume(struct device *dev) { int rc; struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* * If pci_dev->driver is not set (unbound), the device should * always remain in D0 regardless of the runtime PM status */ if (!pci_dev->driver) return 0; if (!pm || !pm->runtime_resume) return -ENOSYS; pci_restore_standard_config(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); __pci_enable_wake(pci_dev, PCI_D0, true, false); pci_fixup_device(pci_fixup_resume, pci_dev); rc = pm->runtime_resume(dev); pci_dev->runtime_d3cold = false; return rc; } static int pci_pm_runtime_idle(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* * If pci_dev->driver is not set (unbound), the device should * always remain in D0 regardless of the runtime PM status */ if (!pci_dev->driver) goto out; if (!pm) return -ENOSYS; if (pm->runtime_idle) { int ret = pm->runtime_idle(dev); if (ret) return ret; } out: pm_runtime_suspend(dev); return 0; } #else /* !CONFIG_PM_RUNTIME */ #define pci_pm_runtime_suspend NULL #define pci_pm_runtime_resume NULL #define pci_pm_runtime_idle NULL #endif /* !CONFIG_PM_RUNTIME */ #ifdef CONFIG_PM const struct dev_pm_ops pci_dev_pm_ops = { .prepare = pci_pm_prepare, .complete = pci_pm_complete, .suspend = pci_pm_suspend, .resume = pci_pm_resume, .freeze = pci_pm_freeze, .thaw = pci_pm_thaw, .poweroff = pci_pm_poweroff, .restore = pci_pm_restore, .suspend_noirq = pci_pm_suspend_noirq, .resume_noirq = pci_pm_resume_noirq, .freeze_noirq = pci_pm_freeze_noirq, .thaw_noirq = pci_pm_thaw_noirq, .poweroff_noirq = pci_pm_poweroff_noirq, .restore_noirq = pci_pm_restore_noirq, .runtime_suspend = pci_pm_runtime_suspend, .runtime_resume = pci_pm_runtime_resume, .runtime_idle = pci_pm_runtime_idle, }; #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) #else /* !COMFIG_PM_OPS */ #define PCI_PM_OPS_PTR NULL #endif /* !COMFIG_PM_OPS */ /** * __pci_register_driver - register a new pci driver * @drv: the driver structure to register * @owner: owner module of drv * @mod_name: module name string * * Adds the driver structure to the list of registered drivers. * Returns a negative value on error, otherwise 0. * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int __pci_register_driver(struct pci_driver *drv, struct module *owner, const char *mod_name) { /* initialize common driver fields */ drv->driver.name = drv->name; drv->driver.bus = &pci_bus_type; drv->driver.owner = owner; drv->driver.mod_name = mod_name; spin_lock_init(&drv->dynids.lock); INIT_LIST_HEAD(&drv->dynids.list); /* register with core */ return driver_register(&drv->driver); } /** * pci_unregister_driver - unregister a pci driver * @drv: the driver structure to unregister * * Deletes the driver structure from the list of registered PCI drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as * driverless. */ void pci_unregister_driver(struct pci_driver *drv) { driver_unregister(&drv->driver); pci_free_dynids(drv); } static struct pci_driver pci_compat_driver = { .name = "compat" }; /** * pci_dev_driver - get the pci_driver of a device * @dev: the device to query * * Returns the appropriate pci_driver structure or %NULL if there is no * registered driver for the device. */ struct pci_driver * pci_dev_driver(const struct pci_dev *dev) { if (dev->driver) return dev->driver; else { int i; for(i=0; i<=PCI_ROM_RESOURCE; i++) if (dev->resource[i].flags & IORESOURCE_BUSY) return &pci_compat_driver; } return NULL; } /** * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure * @dev: the PCI device structure to match against * @drv: the device driver to search for matching PCI device id structures * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching * pci_device_id structure or %NULL if there is no match. */ static int pci_bus_match(struct device *dev, struct device_driver *drv) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *pci_drv = to_pci_driver(drv); const struct pci_device_id *found_id; found_id = pci_match_device(pci_drv, pci_dev); if (found_id) return 1; return 0; } /** * pci_dev_get - increments the reference count of the pci device structure * @dev: the device being referenced * * Each live reference to a device should be refcounted. * * Drivers for PCI devices should normally record such references in * their probe() methods, when they bind to a device, and release * them by calling pci_dev_put(), in their disconnect() methods. * * A pointer to the device with the incremented reference counter is returned. */ struct pci_dev *pci_dev_get(struct pci_dev *dev) { if (dev) get_device(&dev->dev); return dev; } /** * pci_dev_put - release a use of the pci device structure * @dev: device that's been disconnected * * Must be called when a user of a device is finished with it. When the last * user of the device calls this function, the memory of the device is freed. */ void pci_dev_put(struct pci_dev *dev) { if (dev) put_device(&dev->dev); } static int pci_uevent(struct device *dev, struct kobj_uevent_env *env) { struct pci_dev *pdev; if (!dev) return -ENODEV; pdev = to_pci_dev(dev); if (!pdev) return -ENODEV; if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class)) return -ENOMEM; if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device)) return -ENOMEM; if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, pdev->subsystem_device)) return -ENOMEM; if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), (u8)(pdev->class))) return -ENOMEM; return 0; } struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, .uevent = pci_uevent, .probe = pci_device_probe, .remove = pci_device_remove, .shutdown = pci_device_shutdown, .dev_attrs = pci_dev_attrs, .bus_attrs = pci_bus_attrs, .drv_attrs = pci_drv_attrs, .pm = PCI_PM_OPS_PTR, }; static int __init pci_driver_init(void) { return bus_register(&pci_bus_type); } postcore_initcall(pci_driver_init); EXPORT_SYMBOL_GPL(pci_add_dynid); EXPORT_SYMBOL(pci_match_id); EXPORT_SYMBOL(__pci_register_driver); EXPORT_SYMBOL(pci_unregister_driver); EXPORT_SYMBOL(pci_dev_driver); EXPORT_SYMBOL(pci_bus_type); EXPORT_SYMBOL(pci_dev_get); EXPORT_SYMBOL(pci_dev_put);
gpl-2.0
aristeu/linux-2.6
drivers/watchdog/renesas_wdt.c
38
8090
// SPDX-License-Identifier: GPL-2.0 /* * Watchdog driver for Renesas WDT watchdog * * Copyright (C) 2015-17 Wolfram Sang, Sang Engineering <wsa@sang-engineering.com> * Copyright (C) 2015-17 Renesas Electronics Corporation */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/smp.h> #include <linux/sys_soc.h> #include <linux/watchdog.h> #define RWTCNT 0 #define RWTCSRA 4 #define RWTCSRA_WOVF BIT(4) #define RWTCSRA_WRFLG BIT(5) #define RWTCSRA_TME BIT(7) #define RWTCSRB 8 #define RWDT_DEFAULT_TIMEOUT 60U /* * In probe, clk_rate is checked to be not more than 16 bit * biggest clock * divider (12 bits). d is only a factor to fully utilize the WDT counter and * will not exceed its 16 bits. Thus, no overflow, we stay below 32 bits. */ #define MUL_BY_CLKS_PER_SEC(p, d) \ DIV_ROUND_UP((d) * (p)->clk_rate, clk_divs[(p)->cks]) /* d is 16 bit, clk_divs 12 bit -> no 32 bit overflow */ #define DIV_BY_CLKS_PER_SEC(p, d) ((d) * clk_divs[(p)->cks] / (p)->clk_rate) static const unsigned int clk_divs[] = { 1, 4, 16, 32, 64, 128, 1024, 4096 }; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); struct rwdt_priv { void __iomem *base; struct watchdog_device wdev; unsigned long clk_rate; u8 cks; }; static void rwdt_write(struct rwdt_priv *priv, u32 val, unsigned int reg) { if (reg == RWTCNT) val |= 0x5a5a0000; else val |= 0xa5a5a500; writel_relaxed(val, priv->base + reg); } static int rwdt_init_timeout(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); rwdt_write(priv, 65536 - MUL_BY_CLKS_PER_SEC(priv, wdev->timeout), RWTCNT); return 0; } static void rwdt_wait_cycles(struct rwdt_priv *priv, unsigned int cycles) { unsigned int delay; delay = DIV_ROUND_UP(cycles * 1000000, priv->clk_rate); usleep_range(delay, 2 * delay); } static int rwdt_start(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); u8 val; pm_runtime_get_sync(wdev->parent); /* Stop the timer before we modify any register */ val = readb_relaxed(priv->base + RWTCSRA) & ~RWTCSRA_TME; rwdt_write(priv, val, RWTCSRA); /* Delay 2 cycles before setting watchdog counter */ rwdt_wait_cycles(priv, 2); rwdt_init_timeout(wdev); rwdt_write(priv, priv->cks, RWTCSRA); rwdt_write(priv, 0, RWTCSRB); while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG) cpu_relax(); rwdt_write(priv, priv->cks | RWTCSRA_TME, RWTCSRA); return 0; } static int rwdt_stop(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); rwdt_write(priv, priv->cks, RWTCSRA); /* Delay 3 cycles before disabling module clock */ rwdt_wait_cycles(priv, 3); pm_runtime_put(wdev->parent); return 0; } static unsigned int rwdt_get_timeleft(struct watchdog_device *wdev) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); u16 val = readw_relaxed(priv->base + RWTCNT); return DIV_BY_CLKS_PER_SEC(priv, 65536 - val); } static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, void *data) { struct rwdt_priv *priv = watchdog_get_drvdata(wdev); rwdt_start(wdev); rwdt_write(priv, 0xffff, RWTCNT); return 0; } static const struct watchdog_info rwdt_ident = { .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_CARDRESET, .identity = "Renesas WDT Watchdog", }; static const struct watchdog_ops rwdt_ops = { .owner = THIS_MODULE, .start = rwdt_start, .stop = rwdt_stop, .ping = rwdt_init_timeout, .get_timeleft = rwdt_get_timeleft, .restart = rwdt_restart, }; #if defined(CONFIG_ARCH_RCAR_GEN2) && defined(CONFIG_SMP) /* * Watchdog-reset integration is broken on early revisions of R-Car Gen2 SoCs */ static const struct soc_device_attribute rwdt_quirks_match[] = { { .soc_id = "r8a7790", .revision = "ES1.*", .data = (void *)1, /* needs single CPU */ }, { .soc_id = "r8a7791", .revision = "ES1.*", .data = (void *)1, /* needs single CPU */ }, { .soc_id = "r8a7792", .data = (void *)0, /* needs SMP disabled */ }, { /* sentinel */ } }; static bool rwdt_blacklisted(struct device *dev) { const struct soc_device_attribute *attr; attr = soc_device_match(rwdt_quirks_match); if (attr && setup_max_cpus > (uintptr_t)attr->data) { dev_info(dev, "Watchdog blacklisted on %s %s\n", attr->soc_id, attr->revision); return true; } return false; } #else /* !CONFIG_ARCH_RCAR_GEN2 || !CONFIG_SMP */ static inline bool rwdt_blacklisted(struct device *dev) { return false; } #endif /* !CONFIG_ARCH_RCAR_GEN2 || !CONFIG_SMP */ static int rwdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rwdt_priv *priv; struct clk *clk; unsigned long clks_per_sec; int ret, i; u8 csra; if (rwdt_blacklisted(dev)) return -ENODEV; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); pm_runtime_enable(dev); pm_runtime_get_sync(dev); priv->clk_rate = clk_get_rate(clk); csra = readb_relaxed(priv->base + RWTCSRA); priv->wdev.bootstatus = csra & RWTCSRA_WOVF ? WDIOF_CARDRESET : 0; pm_runtime_put(dev); if (!priv->clk_rate) { ret = -ENOENT; goto out_pm_disable; } for (i = ARRAY_SIZE(clk_divs) - 1; i >= 0; i--) { clks_per_sec = priv->clk_rate / clk_divs[i]; if (clks_per_sec && clks_per_sec < 65536) { priv->cks = i; break; } } if (i < 0) { dev_err(dev, "Can't find suitable clock divider\n"); ret = -ERANGE; goto out_pm_disable; } priv->wdev.info = &rwdt_ident; priv->wdev.ops = &rwdt_ops; priv->wdev.parent = dev; priv->wdev.min_timeout = 1; priv->wdev.max_timeout = DIV_BY_CLKS_PER_SEC(priv, 65536); priv->wdev.timeout = min(priv->wdev.max_timeout, RWDT_DEFAULT_TIMEOUT); platform_set_drvdata(pdev, priv); watchdog_set_drvdata(&priv->wdev, priv); watchdog_set_nowayout(&priv->wdev, nowayout); watchdog_set_restart_priority(&priv->wdev, 0); watchdog_stop_on_unregister(&priv->wdev); /* This overrides the default timeout only if DT configuration was found */ watchdog_init_timeout(&priv->wdev, 0, dev); /* Check if FW enabled the watchdog */ if (csra & RWTCSRA_TME) { /* Ensure properly initialized dividers */ rwdt_start(&priv->wdev); set_bit(WDOG_HW_RUNNING, &priv->wdev.status); } ret = watchdog_register_device(&priv->wdev); if (ret < 0) goto out_pm_disable; return 0; out_pm_disable: pm_runtime_disable(dev); return ret; } static int rwdt_remove(struct platform_device *pdev) { struct rwdt_priv *priv = platform_get_drvdata(pdev); watchdog_unregister_device(&priv->wdev); pm_runtime_disable(&pdev->dev); return 0; } static int __maybe_unused rwdt_suspend(struct device *dev) { struct rwdt_priv *priv = dev_get_drvdata(dev); if (watchdog_active(&priv->wdev)) rwdt_stop(&priv->wdev); return 0; } static int __maybe_unused rwdt_resume(struct device *dev) { struct rwdt_priv *priv = dev_get_drvdata(dev); if (watchdog_active(&priv->wdev)) rwdt_start(&priv->wdev); return 0; } static SIMPLE_DEV_PM_OPS(rwdt_pm_ops, rwdt_suspend, rwdt_resume); static const struct of_device_id rwdt_ids[] = { { .compatible = "renesas,rcar-gen2-wdt", }, { .compatible = "renesas,rcar-gen3-wdt", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, rwdt_ids); static struct platform_driver rwdt_driver = { .driver = { .name = "renesas_wdt", .of_match_table = rwdt_ids, .pm = &rwdt_pm_ops, }, .probe = rwdt_probe, .remove = rwdt_remove, }; module_platform_driver(rwdt_driver); MODULE_DESCRIPTION("Renesas WDT Watchdog Driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>");
gpl-2.0
x13thangelx/droid2we-kernel
drivers/net/wireless/b43/main.c
294
135842
/* Broadcom B43 wireless driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de> Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2005-2009 Michael Buesch <mb@bu3sch.de> Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org> Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch> SDIO support Copyright (c) 2009 Albert Herranz <albert_herranz@yahoo.es> Some parts of the code in this file are derived from the ipw2200 driver Copyright(c) 2003 - 2004 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/moduleparam.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/firmware.h> #include <linux/wireless.h> #include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <asm/unaligned.h> #include "b43.h" #include "main.h" #include "debugfs.h" #include "phy_common.h" #include "phy_g.h" #include "phy_n.h" #include "dma.h" #include "pio.h" #include "sysfs.h" #include "xmit.h" #include "lo.h" #include "pcmcia.h" #include "sdio.h" #include <linux/mmc/sdio_func.h> MODULE_DESCRIPTION("Broadcom B43 wireless driver"); MODULE_AUTHOR("Martin Langer"); MODULE_AUTHOR("Stefano Brivio"); MODULE_AUTHOR("Michael Buesch"); MODULE_AUTHOR("Gábor Stefanik"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID); static int modparam_bad_frames_preempt; module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444); MODULE_PARM_DESC(bad_frames_preempt, "enable(1) / disable(0) Bad Frames Preemption"); static char modparam_fwpostfix[16]; module_param_string(fwpostfix, modparam_fwpostfix, 16, 0444); MODULE_PARM_DESC(fwpostfix, "Postfix for the .fw files to load."); static int modparam_hwpctl; module_param_named(hwpctl, modparam_hwpctl, int, 0444); MODULE_PARM_DESC(hwpctl, "Enable hardware-side power control (default off)"); static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static int modparam_hwtkip; module_param_named(hwtkip, modparam_hwtkip, int, 0444); MODULE_PARM_DESC(hwtkip, "Enable hardware tkip."); static int modparam_qos = 1; module_param_named(qos, modparam_qos, int, 0444); MODULE_PARM_DESC(qos, "Enable QOS support (default on)"); static int modparam_btcoex = 1; module_param_named(btcoex, modparam_btcoex, int, 0444); MODULE_PARM_DESC(btcoex, "Enable Bluetooth coexistence (default on)"); int b43_modparam_verbose = B43_VERBOSITY_DEFAULT; module_param_named(verbose, b43_modparam_verbose, int, 0644); MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug"); static const struct ssb_device_id b43_ssb_tbl[] = { SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 7), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), SSB_DEVTABLE_END }; MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl); /* Channel and ratetables are shared for all devices. * They can't be const, because ieee80211 puts some precalculated * data in there. This data is the same for all devices, so we don't * get concurrency issues */ #define RATETAB_ENT(_rateid, _flags) \ { \ .bitrate = B43_RATE_TO_BASE100KBPS(_rateid), \ .hw_value = (_rateid), \ .flags = (_flags), \ } /* * NOTE: When changing this, sync with xmit.c's * b43_plcp_get_bitrate_idx_* functions! */ static struct ieee80211_rate __b43_ratetable[] = { RATETAB_ENT(B43_CCK_RATE_1MB, 0), RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE), RATETAB_ENT(B43_OFDM_RATE_6MB, 0), RATETAB_ENT(B43_OFDM_RATE_9MB, 0), RATETAB_ENT(B43_OFDM_RATE_12MB, 0), RATETAB_ENT(B43_OFDM_RATE_18MB, 0), RATETAB_ENT(B43_OFDM_RATE_24MB, 0), RATETAB_ENT(B43_OFDM_RATE_36MB, 0), RATETAB_ENT(B43_OFDM_RATE_48MB, 0), RATETAB_ENT(B43_OFDM_RATE_54MB, 0), }; #define b43_a_ratetable (__b43_ratetable + 4) #define b43_a_ratetable_size 8 #define b43_b_ratetable (__b43_ratetable + 0) #define b43_b_ratetable_size 4 #define b43_g_ratetable (__b43_ratetable + 0) #define b43_g_ratetable_size 12 #define CHAN4G(_channel, _freq, _flags) { \ .band = IEEE80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static struct ieee80211_channel b43_2ghz_chantable[] = { CHAN4G(1, 2412, 0), CHAN4G(2, 2417, 0), CHAN4G(3, 2422, 0), CHAN4G(4, 2427, 0), CHAN4G(5, 2432, 0), CHAN4G(6, 2437, 0), CHAN4G(7, 2442, 0), CHAN4G(8, 2447, 0), CHAN4G(9, 2452, 0), CHAN4G(10, 2457, 0), CHAN4G(11, 2462, 0), CHAN4G(12, 2467, 0), CHAN4G(13, 2472, 0), CHAN4G(14, 2484, 0), }; #undef CHAN4G #define CHAN5G(_channel, _flags) { \ .band = IEEE80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static struct ieee80211_channel b43_5ghz_nphy_chantable[] = { CHAN5G(32, 0), CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(50, 0), CHAN5G(52, 0), CHAN5G(54, 0), CHAN5G(56, 0), CHAN5G(58, 0), CHAN5G(60, 0), CHAN5G(62, 0), CHAN5G(64, 0), CHAN5G(66, 0), CHAN5G(68, 0), CHAN5G(70, 0), CHAN5G(72, 0), CHAN5G(74, 0), CHAN5G(76, 0), CHAN5G(78, 0), CHAN5G(80, 0), CHAN5G(82, 0), CHAN5G(84, 0), CHAN5G(86, 0), CHAN5G(88, 0), CHAN5G(90, 0), CHAN5G(92, 0), CHAN5G(94, 0), CHAN5G(96, 0), CHAN5G(98, 0), CHAN5G(100, 0), CHAN5G(102, 0), CHAN5G(104, 0), CHAN5G(106, 0), CHAN5G(108, 0), CHAN5G(110, 0), CHAN5G(112, 0), CHAN5G(114, 0), CHAN5G(116, 0), CHAN5G(118, 0), CHAN5G(120, 0), CHAN5G(122, 0), CHAN5G(124, 0), CHAN5G(126, 0), CHAN5G(128, 0), CHAN5G(130, 0), CHAN5G(132, 0), CHAN5G(134, 0), CHAN5G(136, 0), CHAN5G(138, 0), CHAN5G(140, 0), CHAN5G(142, 0), CHAN5G(144, 0), CHAN5G(145, 0), CHAN5G(146, 0), CHAN5G(147, 0), CHAN5G(148, 0), CHAN5G(149, 0), CHAN5G(150, 0), CHAN5G(151, 0), CHAN5G(152, 0), CHAN5G(153, 0), CHAN5G(154, 0), CHAN5G(155, 0), CHAN5G(156, 0), CHAN5G(157, 0), CHAN5G(158, 0), CHAN5G(159, 0), CHAN5G(160, 0), CHAN5G(161, 0), CHAN5G(162, 0), CHAN5G(163, 0), CHAN5G(164, 0), CHAN5G(165, 0), CHAN5G(166, 0), CHAN5G(168, 0), CHAN5G(170, 0), CHAN5G(172, 0), CHAN5G(174, 0), CHAN5G(176, 0), CHAN5G(178, 0), CHAN5G(180, 0), CHAN5G(182, 0), CHAN5G(184, 0), CHAN5G(186, 0), CHAN5G(188, 0), CHAN5G(190, 0), CHAN5G(192, 0), CHAN5G(194, 0), CHAN5G(196, 0), CHAN5G(198, 0), CHAN5G(200, 0), CHAN5G(202, 0), CHAN5G(204, 0), CHAN5G(206, 0), CHAN5G(208, 0), CHAN5G(210, 0), CHAN5G(212, 0), CHAN5G(214, 0), CHAN5G(216, 0), CHAN5G(218, 0), CHAN5G(220, 0), CHAN5G(222, 0), CHAN5G(224, 0), CHAN5G(226, 0), CHAN5G(228, 0), }; static struct ieee80211_channel b43_5ghz_aphy_chantable[] = { CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), CHAN5G(108, 0), CHAN5G(112, 0), CHAN5G(116, 0), CHAN5G(120, 0), CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(149, 0), CHAN5G(153, 0), CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0), CHAN5G(184, 0), CHAN5G(188, 0), CHAN5G(192, 0), CHAN5G(196, 0), CHAN5G(200, 0), CHAN5G(204, 0), CHAN5G(208, 0), CHAN5G(212, 0), CHAN5G(216, 0), }; #undef CHAN5G static struct ieee80211_supported_band b43_band_5GHz_nphy = { .band = IEEE80211_BAND_5GHZ, .channels = b43_5ghz_nphy_chantable, .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable), .bitrates = b43_a_ratetable, .n_bitrates = b43_a_ratetable_size, }; static struct ieee80211_supported_band b43_band_5GHz_aphy = { .band = IEEE80211_BAND_5GHZ, .channels = b43_5ghz_aphy_chantable, .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable), .bitrates = b43_a_ratetable, .n_bitrates = b43_a_ratetable_size, }; static struct ieee80211_supported_band b43_band_2GHz = { .band = IEEE80211_BAND_2GHZ, .channels = b43_2ghz_chantable, .n_channels = ARRAY_SIZE(b43_2ghz_chantable), .bitrates = b43_g_ratetable, .n_bitrates = b43_g_ratetable_size, }; static void b43_wireless_core_exit(struct b43_wldev *dev); static int b43_wireless_core_init(struct b43_wldev *dev); static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev); static int b43_wireless_core_start(struct b43_wldev *dev); static int b43_ratelimit(struct b43_wl *wl) { if (!wl || !wl->current_dev) return 1; if (b43_status(wl->current_dev) < B43_STAT_STARTED) return 1; /* We are up and running. * Ratelimit the messages to avoid DoS over the net. */ return net_ratelimit(); } void b43info(struct b43_wl *wl, const char *fmt, ...) { va_list args; if (b43_modparam_verbose < B43_VERBOSITY_INFO) return; if (!b43_ratelimit(wl)) return; va_start(args, fmt); printk(KERN_INFO "b43-%s: ", (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); vprintk(fmt, args); va_end(args); } void b43err(struct b43_wl *wl, const char *fmt, ...) { va_list args; if (b43_modparam_verbose < B43_VERBOSITY_ERROR) return; if (!b43_ratelimit(wl)) return; va_start(args, fmt); printk(KERN_ERR "b43-%s ERROR: ", (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); vprintk(fmt, args); va_end(args); } void b43warn(struct b43_wl *wl, const char *fmt, ...) { va_list args; if (b43_modparam_verbose < B43_VERBOSITY_WARN) return; if (!b43_ratelimit(wl)) return; va_start(args, fmt); printk(KERN_WARNING "b43-%s warning: ", (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); vprintk(fmt, args); va_end(args); } void b43dbg(struct b43_wl *wl, const char *fmt, ...) { va_list args; if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) return; va_start(args, fmt); printk(KERN_DEBUG "b43-%s debug: ", (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); vprintk(fmt, args); va_end(args); } static void b43_ram_write(struct b43_wldev *dev, u16 offset, u32 val) { u32 macctl; B43_WARN_ON(offset % 4 != 0); macctl = b43_read32(dev, B43_MMIO_MACCTL); if (macctl & B43_MACCTL_BE) val = swab32(val); b43_write32(dev, B43_MMIO_RAM_CONTROL, offset); mmiowb(); b43_write32(dev, B43_MMIO_RAM_DATA, val); } static inline void b43_shm_control_word(struct b43_wldev *dev, u16 routing, u16 offset) { u32 control; /* "offset" is the WORD offset. */ control = routing; control <<= 16; control |= offset; b43_write32(dev, B43_MMIO_SHM_CONTROL, control); } u32 b43_shm_read32(struct b43_wldev *dev, u16 routing, u16 offset) { u32 ret; if (routing == B43_SHM_SHARED) { B43_WARN_ON(offset & 0x0001); if (offset & 0x0003) { /* Unaligned access */ b43_shm_control_word(dev, routing, offset >> 2); ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); b43_shm_control_word(dev, routing, (offset >> 2) + 1); ret |= ((u32)b43_read16(dev, B43_MMIO_SHM_DATA)) << 16; goto out; } offset >>= 2; } b43_shm_control_word(dev, routing, offset); ret = b43_read32(dev, B43_MMIO_SHM_DATA); out: return ret; } u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset) { u16 ret; if (routing == B43_SHM_SHARED) { B43_WARN_ON(offset & 0x0001); if (offset & 0x0003) { /* Unaligned access */ b43_shm_control_word(dev, routing, offset >> 2); ret = b43_read16(dev, B43_MMIO_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } b43_shm_control_word(dev, routing, offset); ret = b43_read16(dev, B43_MMIO_SHM_DATA); out: return ret; } void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value) { if (routing == B43_SHM_SHARED) { B43_WARN_ON(offset & 0x0001); if (offset & 0x0003) { /* Unaligned access */ b43_shm_control_word(dev, routing, offset >> 2); b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, value & 0xFFFF); b43_shm_control_word(dev, routing, (offset >> 2) + 1); b43_write16(dev, B43_MMIO_SHM_DATA, (value >> 16) & 0xFFFF); return; } offset >>= 2; } b43_shm_control_word(dev, routing, offset); b43_write32(dev, B43_MMIO_SHM_DATA, value); } void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value) { if (routing == B43_SHM_SHARED) { B43_WARN_ON(offset & 0x0001); if (offset & 0x0003) { /* Unaligned access */ b43_shm_control_word(dev, routing, offset >> 2); b43_write16(dev, B43_MMIO_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } b43_shm_control_word(dev, routing, offset); b43_write16(dev, B43_MMIO_SHM_DATA, value); } /* Read HostFlags */ u64 b43_hf_read(struct b43_wldev *dev) { u64 ret; ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); ret <<= 16; ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI); ret <<= 16; ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); return ret; } /* Write HostFlags */ void b43_hf_write(struct b43_wldev *dev, u64 value) { u16 lo, mi, hi; lo = (value & 0x00000000FFFFULL); mi = (value & 0x0000FFFF0000ULL) >> 16; hi = (value & 0xFFFF00000000ULL) >> 32; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi); } /* Read the firmware capabilities bitmask (Opensource firmware only) */ static u16 b43_fwcapa_read(struct b43_wldev *dev) { B43_WARN_ON(!dev->fw.opensource); return b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_FWCAPA); } void b43_tsf_read(struct b43_wldev *dev, u64 *tsf) { u32 low, high; B43_WARN_ON(dev->dev->id.revision < 3); /* The hardware guarantees us an atomic read, if we * read the low register first. */ low = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_LOW); high = b43_read32(dev, B43_MMIO_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static void b43_time_lock(struct b43_wldev *dev) { u32 macctl; macctl = b43_read32(dev, B43_MMIO_MACCTL); macctl |= B43_MACCTL_TBTTHOLD; b43_write32(dev, B43_MMIO_MACCTL, macctl); /* Commit the write */ b43_read32(dev, B43_MMIO_MACCTL); } static void b43_time_unlock(struct b43_wldev *dev) { u32 macctl; macctl = b43_read32(dev, B43_MMIO_MACCTL); macctl &= ~B43_MACCTL_TBTTHOLD; b43_write32(dev, B43_MMIO_MACCTL, macctl); /* Commit the write */ b43_read32(dev, B43_MMIO_MACCTL); } static void b43_tsf_write_locked(struct b43_wldev *dev, u64 tsf) { u32 low, high; B43_WARN_ON(dev->dev->id.revision < 3); low = tsf; high = (tsf >> 32); /* The hardware guarantees us an atomic write, if we * write the low register first. */ b43_write32(dev, B43_MMIO_REV3PLUS_TSF_LOW, low); mmiowb(); b43_write32(dev, B43_MMIO_REV3PLUS_TSF_HIGH, high); mmiowb(); } void b43_tsf_write(struct b43_wldev *dev, u64 tsf) { b43_time_lock(dev); b43_tsf_write_locked(dev, tsf); b43_time_unlock(dev); } static void b43_macfilter_set(struct b43_wldev *dev, u16 offset, const u8 *mac) { static const u8 zero_addr[ETH_ALEN] = { 0 }; u16 data; if (!mac) mac = zero_addr; offset |= 0x0020; b43_write16(dev, B43_MMIO_MACFILTER_CONTROL, offset); data = mac[0]; data |= mac[1] << 8; b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); data = mac[2]; data |= mac[3] << 8; b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); data = mac[4]; data |= mac[5] << 8; b43_write16(dev, B43_MMIO_MACFILTER_DATA, data); } static void b43_write_mac_bssid_templates(struct b43_wldev *dev) { const u8 *mac; const u8 *bssid; u8 mac_bssid[ETH_ALEN * 2]; int i; u32 tmp; bssid = dev->wl->bssid; mac = dev->wl->mac_addr; b43_macfilter_set(dev, B43_MACFILTER_BSSID, bssid); memcpy(mac_bssid, mac, ETH_ALEN); memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN); /* Write our MAC address and BSSID to template ram */ for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32)) { tmp = (u32) (mac_bssid[i + 0]); tmp |= (u32) (mac_bssid[i + 1]) << 8; tmp |= (u32) (mac_bssid[i + 2]) << 16; tmp |= (u32) (mac_bssid[i + 3]) << 24; b43_ram_write(dev, 0x20 + i, tmp); } } static void b43_upload_card_macaddress(struct b43_wldev *dev) { b43_write_mac_bssid_templates(dev); b43_macfilter_set(dev, B43_MACFILTER_SELF, dev->wl->mac_addr); } static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time) { /* slot_time is in usec. */ /* This test used to exit for all but a G PHY. */ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) return; b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time); /* Shared memory location 0x0010 is the slot time and should be * set to slot_time; however, this register is initially 0 and changing * the value adversely affects the transmit rate for BCM4311 * devices. Until this behavior is unterstood, delete this step * * b43_shm_write16(dev, B43_SHM_SHARED, 0x0010, slot_time); */ } static void b43_short_slot_timing_enable(struct b43_wldev *dev) { b43_set_slot_time(dev, 9); } static void b43_short_slot_timing_disable(struct b43_wldev *dev) { b43_set_slot_time(dev, 20); } /* DummyTransmission function, as documented on * http://bcm-v4.sipsolutions.net/802.11/DummyTransmission */ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on) { struct b43_phy *phy = &dev->phy; unsigned int i, max_loop; u16 value; u32 buffer[5] = { 0x00000000, 0x00D40000, 0x00000000, 0x01000000, 0x00000000, }; if (ofdm) { max_loop = 0x1E; buffer[0] = 0x000201CC; } else { max_loop = 0xFA; buffer[0] = 0x000B846E; } for (i = 0; i < 5; i++) b43_ram_write(dev, i * 4, buffer[i]); b43_write16(dev, 0x0568, 0x0000); if (dev->dev->id.revision < 11) b43_write16(dev, 0x07C0, 0x0000); else b43_write16(dev, 0x07C0, 0x0100); value = (ofdm ? 0x41 : 0x40); b43_write16(dev, 0x050C, value); if ((phy->type == B43_PHYTYPE_N) || (phy->type == B43_PHYTYPE_LP)) b43_write16(dev, 0x0514, 0x1A02); b43_write16(dev, 0x0508, 0x0000); b43_write16(dev, 0x050A, 0x0000); b43_write16(dev, 0x054C, 0x0000); b43_write16(dev, 0x056A, 0x0014); b43_write16(dev, 0x0568, 0x0826); b43_write16(dev, 0x0500, 0x0000); if (!pa_on && (phy->type == B43_PHYTYPE_N)) { //SPEC TODO } switch (phy->type) { case B43_PHYTYPE_N: b43_write16(dev, 0x0502, 0x00D0); break; case B43_PHYTYPE_LP: b43_write16(dev, 0x0502, 0x0050); break; default: b43_write16(dev, 0x0502, 0x0030); } if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) b43_radio_write16(dev, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = b43_read16(dev, 0x050E); if (value & 0x0080) break; udelay(10); } for (i = 0x00; i < 0x0A; i++) { value = b43_read16(dev, 0x050E); if (value & 0x0400) break; udelay(10); } for (i = 0x00; i < 0x19; i++) { value = b43_read16(dev, 0x0690); if (!(value & 0x0100)) break; udelay(10); } if (phy->radio_ver == 0x2050 && phy->radio_rev <= 0x5) b43_radio_write16(dev, 0x0051, 0x0037); } static void key_write(struct b43_wldev *dev, u8 index, u8 algorithm, const u8 *key) { unsigned int i; u32 offset; u16 value; u16 kidx; /* Key index/algo block */ kidx = b43_kidx_to_fw(dev, index); value = ((kidx << 4) | algorithm); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_KEYIDXBLOCK + (kidx * 2), value); /* Write the key to the Key Table Pointer offset */ offset = dev->ktp + (index * B43_SEC_KEYSIZE); for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (u16) (key[i + 1]) << 8; b43_shm_write16(dev, B43_SHM_SHARED, offset + i, value); } } static void keymac_write(struct b43_wldev *dev, u8 index, const u8 *addr) { u32 addrtmp[2] = { 0, 0, }; u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; if (b43_new_kidx_api(dev)) pairwise_keys_start = B43_NR_GROUP_KEYS; B43_WARN_ON(index < pairwise_keys_start); /* We have four default TX keys and possibly four default RX keys. * Physical mac 0 is mapped to physical key 4 or 8, depending * on the firmware version. * So we must adjust the index here. */ index -= pairwise_keys_start; B43_WARN_ON(index >= B43_NR_PAIRWISE_KEYS); if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((u32) (addr[1]) << 8); addrtmp[0] |= ((u32) (addr[2]) << 16); addrtmp[0] |= ((u32) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((u32) (addr[5]) << 8); } /* Receive match transmitter address (RCMTA) mechanism */ b43_shm_write32(dev, B43_SHM_RCMTA, (index * 2) + 0, addrtmp[0]); b43_shm_write16(dev, B43_SHM_RCMTA, (index * 2) + 1, addrtmp[1]); } /* The ucode will use phase1 key with TEK key to decrypt rx packets. * When a packet is received, the iv32 is checked. * - if it doesn't the packet is returned without modification (and software * decryption can be done). That's what happen when iv16 wrap. * - if it does, the rc4 key is computed, and decryption is tried. * Either it will success and B43_RX_MAC_DEC is returned, * either it fails and B43_RX_MAC_DEC|B43_RX_MAC_DECERR is returned * and the packet is not usable (it got modified by the ucode). * So in order to never have B43_RX_MAC_DECERR, we should provide * a iv32 and phase1key that match. Because we drop packets in case of * B43_RX_MAC_DECERR, if we have a correct iv32 but a wrong phase1key, all * packets will be lost without higher layer knowing (ie no resync possible * until next wrap). * * NOTE : this should support 50 key like RCMTA because * (B43_SHM_SH_KEYIDXBLOCK - B43_SHM_SH_TKIPTSCTTAK)/14 = 50 */ static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32, u16 *phase1key) { unsigned int i; u32 offset; u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; if (!modparam_hwtkip) return; if (b43_new_kidx_api(dev)) pairwise_keys_start = B43_NR_GROUP_KEYS; B43_WARN_ON(index < pairwise_keys_start); /* We have four default TX keys and possibly four default RX keys. * Physical mac 0 is mapped to physical key 4 or 8, depending * on the firmware version. * So we must adjust the index here. */ index -= pairwise_keys_start; B43_WARN_ON(index >= B43_NR_PAIRWISE_KEYS); if (b43_debug(dev, B43_DBG_KEYS)) { b43dbg(dev->wl, "rx_tkip_phase1_write : idx 0x%x, iv32 0x%x\n", index, iv32); } /* Write the key to the RX tkip shared mem */ offset = B43_SHM_SH_TKIPTSCTTAK + index * (10 + 4); for (i = 0; i < 10; i += 2) { b43_shm_write16(dev, B43_SHM_SHARED, offset + i, phase1key ? phase1key[i / 2] : 0); } b43_shm_write16(dev, B43_SHM_SHARED, offset + i, iv32); b43_shm_write16(dev, B43_SHM_SHARED, offset + i + 2, iv32 >> 16); } static void b43_op_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_key_conf *keyconf, const u8 *addr, u32 iv32, u16 *phase1key) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; int index = keyconf->hw_key_idx; if (B43_WARN_ON(!modparam_hwtkip)) return; mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) goto out_unlock; keymac_write(dev, index, NULL); /* First zero out mac to avoid race */ rx_tkip_phase1_write(dev, index, iv32, phase1key); keymac_write(dev, index, addr); out_unlock: mutex_unlock(&wl->mutex); } static void do_key_write(struct b43_wldev *dev, u8 index, u8 algorithm, const u8 *key, size_t key_len, const u8 *mac_addr) { u8 buf[B43_SEC_KEYSIZE] = { 0, }; u8 pairwise_keys_start = B43_NR_GROUP_KEYS * 2; if (b43_new_kidx_api(dev)) pairwise_keys_start = B43_NR_GROUP_KEYS; B43_WARN_ON(index >= ARRAY_SIZE(dev->key)); B43_WARN_ON(key_len > B43_SEC_KEYSIZE); if (index >= pairwise_keys_start) keymac_write(dev, index, NULL); /* First zero out mac. */ if (algorithm == B43_SEC_ALGO_TKIP) { /* * We should provide an initial iv32, phase1key pair. * We could start with iv32=0 and compute the corresponding * phase1key, but this means calling ieee80211_get_tkip_key * with a fake skb (or export other tkip function). * Because we are lazy we hope iv32 won't start with * 0xffffffff and let's b43_op_update_tkip_key provide a * correct pair. */ rx_tkip_phase1_write(dev, index, 0xffffffff, (u16*)buf); } else if (index >= pairwise_keys_start) /* clear it */ rx_tkip_phase1_write(dev, index, 0, NULL); if (key) memcpy(buf, key, key_len); key_write(dev, index, algorithm, buf); if (index >= pairwise_keys_start) keymac_write(dev, index, mac_addr); dev->key[index].algorithm = algorithm; } static int b43_key_write(struct b43_wldev *dev, int index, u8 algorithm, const u8 *key, size_t key_len, const u8 *mac_addr, struct ieee80211_key_conf *keyconf) { int i; int pairwise_keys_start; /* For ALG_TKIP the key is encoded as a 256-bit (32 byte) data block: * - Temporal Encryption Key (128 bits) * - Temporal Authenticator Tx MIC Key (64 bits) * - Temporal Authenticator Rx MIC Key (64 bits) * * Hardware only store TEK */ if (algorithm == B43_SEC_ALGO_TKIP && key_len == 32) key_len = 16; if (key_len > B43_SEC_KEYSIZE) return -EINVAL; for (i = 0; i < ARRAY_SIZE(dev->key); i++) { /* Check that we don't already have this key. */ B43_WARN_ON(dev->key[i].keyconf == keyconf); } if (index < 0) { /* Pairwise key. Get an empty slot for the key. */ if (b43_new_kidx_api(dev)) pairwise_keys_start = B43_NR_GROUP_KEYS; else pairwise_keys_start = B43_NR_GROUP_KEYS * 2; for (i = pairwise_keys_start; i < pairwise_keys_start + B43_NR_PAIRWISE_KEYS; i++) { B43_WARN_ON(i >= ARRAY_SIZE(dev->key)); if (!dev->key[i].keyconf) { /* found empty */ index = i; break; } } if (index < 0) { b43warn(dev->wl, "Out of hardware key memory\n"); return -ENOSPC; } } else B43_WARN_ON(index > 3); do_key_write(dev, index, algorithm, key, key_len, mac_addr); if ((index <= 3) && !b43_new_kidx_api(dev)) { /* Default RX key */ B43_WARN_ON(mac_addr); do_key_write(dev, index + 4, algorithm, key, key_len, NULL); } keyconf->hw_key_idx = index; dev->key[index].keyconf = keyconf; return 0; } static int b43_key_clear(struct b43_wldev *dev, int index) { if (B43_WARN_ON((index < 0) || (index >= ARRAY_SIZE(dev->key)))) return -EINVAL; do_key_write(dev, index, B43_SEC_ALGO_NONE, NULL, B43_SEC_KEYSIZE, NULL); if ((index <= 3) && !b43_new_kidx_api(dev)) { do_key_write(dev, index + 4, B43_SEC_ALGO_NONE, NULL, B43_SEC_KEYSIZE, NULL); } dev->key[index].keyconf = NULL; return 0; } static void b43_clear_keys(struct b43_wldev *dev) { int i, count; if (b43_new_kidx_api(dev)) count = B43_NR_GROUP_KEYS + B43_NR_PAIRWISE_KEYS; else count = B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS; for (i = 0; i < count; i++) b43_key_clear(dev, i); } static void b43_dump_keymemory(struct b43_wldev *dev) { unsigned int i, index, count, offset, pairwise_keys_start; u8 mac[ETH_ALEN]; u16 algo; u32 rcmta0; u16 rcmta1; u64 hf; struct b43_key *key; if (!b43_debug(dev, B43_DBG_KEYS)) return; hf = b43_hf_read(dev); b43dbg(dev->wl, "Hardware key memory dump: USEDEFKEYS=%u\n", !!(hf & B43_HF_USEDEFKEYS)); if (b43_new_kidx_api(dev)) { pairwise_keys_start = B43_NR_GROUP_KEYS; count = B43_NR_GROUP_KEYS + B43_NR_PAIRWISE_KEYS; } else { pairwise_keys_start = B43_NR_GROUP_KEYS * 2; count = B43_NR_GROUP_KEYS * 2 + B43_NR_PAIRWISE_KEYS; } for (index = 0; index < count; index++) { key = &(dev->key[index]); printk(KERN_DEBUG "Key slot %02u: %s", index, (key->keyconf == NULL) ? " " : "*"); offset = dev->ktp + (index * B43_SEC_KEYSIZE); for (i = 0; i < B43_SEC_KEYSIZE; i += 2) { u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i); printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF)); } algo = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_KEYIDXBLOCK + (index * 2)); printk(" Algo: %04X/%02X", algo, key->algorithm); if (index >= pairwise_keys_start) { if (key->algorithm == B43_SEC_ALGO_TKIP) { printk(" TKIP: "); offset = B43_SHM_SH_TKIPTSCTTAK + (index - 4) * (10 + 4); for (i = 0; i < 14; i += 2) { u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, offset + i); printk("%02X%02X", (tmp & 0xFF), ((tmp >> 8) & 0xFF)); } } rcmta0 = b43_shm_read32(dev, B43_SHM_RCMTA, ((index - pairwise_keys_start) * 2) + 0); rcmta1 = b43_shm_read16(dev, B43_SHM_RCMTA, ((index - pairwise_keys_start) * 2) + 1); *((__le32 *)(&mac[0])) = cpu_to_le32(rcmta0); *((__le16 *)(&mac[4])) = cpu_to_le16(rcmta1); printk(" MAC: %pM", mac); } else printk(" DEFAULT KEY"); printk("\n"); } } void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags) { u32 macctl; u16 ucstat; bool hwps; bool awake; int i; B43_WARN_ON((ps_flags & B43_PS_ENABLED) && (ps_flags & B43_PS_DISABLED)); B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP)); if (ps_flags & B43_PS_ENABLED) { hwps = 1; } else if (ps_flags & B43_PS_DISABLED) { hwps = 0; } else { //TODO: If powersave is not off and FIXME is not set and we are not in adhoc // and thus is not an AP and we are associated, set bit 25 } if (ps_flags & B43_PS_AWAKE) { awake = 1; } else if (ps_flags & B43_PS_ASLEEP) { awake = 0; } else { //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME, // or we are associated, or FIXME, or the latest PS-Poll packet sent was // successful, set bit26 } /* FIXME: For now we force awake-on and hwps-off */ hwps = 0; awake = 1; macctl = b43_read32(dev, B43_MMIO_MACCTL); if (hwps) macctl |= B43_MACCTL_HWPS; else macctl &= ~B43_MACCTL_HWPS; if (awake) macctl |= B43_MACCTL_AWAKE; else macctl &= ~B43_MACCTL_AWAKE; b43_write32(dev, B43_MMIO_MACCTL, macctl); /* Commit write */ b43_read32(dev, B43_MMIO_MACCTL); if (awake && dev->dev->id.revision >= 5) { /* Wait for the microcode to wake up. */ for (i = 0; i < 100; i++) { ucstat = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODESTAT); if (ucstat != B43_SHM_SH_UCODESTAT_SLEEP) break; udelay(10); } } } void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) { u32 tmslow; u32 macctl; flags |= B43_TMSLOW_PHYCLKEN; flags |= B43_TMSLOW_PHYRESET; ssb_device_enable(dev->dev, flags); msleep(2); /* Wait for the PLL to turn on. */ /* Now take the PHY out of Reset again */ tmslow = ssb_read32(dev->dev, SSB_TMSLOW); tmslow |= SSB_TMSLOW_FGC; tmslow &= ~B43_TMSLOW_PHYRESET; ssb_write32(dev->dev, SSB_TMSLOW, tmslow); ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ msleep(1); tmslow &= ~SSB_TMSLOW_FGC; ssb_write32(dev->dev, SSB_TMSLOW, tmslow); ssb_read32(dev->dev, SSB_TMSLOW); /* flush */ msleep(1); /* Turn Analog ON, but only if we already know the PHY-type. * This protects against very early setup where we don't know the * PHY-type, yet. wireless_core_reset will be called once again later, * when we know the PHY-type. */ if (dev->phy.ops) dev->phy.ops->switch_analog(dev, 1); macctl = b43_read32(dev, B43_MMIO_MACCTL); macctl &= ~B43_MACCTL_GMODE; if (flags & B43_TMSLOW_GMODE) macctl |= B43_MACCTL_GMODE; macctl |= B43_MACCTL_IHR_ENABLED; b43_write32(dev, B43_MMIO_MACCTL, macctl); } static void handle_irq_transmit_status(struct b43_wldev *dev) { u32 v0, v1; u16 tmp; struct b43_txstatus stat; while (1) { v0 = b43_read32(dev, B43_MMIO_XMITSTAT_0); if (!(v0 & 0x00000001)) break; v1 = b43_read32(dev, B43_MMIO_XMITSTAT_1); stat.cookie = (v0 >> 16); stat.seq = (v1 & 0x0000FFFF); stat.phy_stat = ((v1 & 0x00FF0000) >> 16); tmp = (v0 & 0x0000FFFF); stat.frame_count = ((tmp & 0xF000) >> 12); stat.rts_count = ((tmp & 0x0F00) >> 8); stat.supp_reason = ((tmp & 0x001C) >> 2); stat.pm_indicated = !!(tmp & 0x0080); stat.intermediate = !!(tmp & 0x0040); stat.for_ampdu = !!(tmp & 0x0020); stat.acked = !!(tmp & 0x0002); b43_handle_txstatus(dev, &stat); } } static void drain_txstatus_queue(struct b43_wldev *dev) { u32 dummy; if (dev->dev->id.revision < 5) return; /* Read all entries from the microcode TXstatus FIFO * and throw them away. */ while (1) { dummy = b43_read32(dev, B43_MMIO_XMITSTAT_0); if (!(dummy & 0x00000001)) break; dummy = b43_read32(dev, B43_MMIO_XMITSTAT_1); } } static u32 b43_jssi_read(struct b43_wldev *dev) { u32 val = 0; val = b43_shm_read16(dev, B43_SHM_SHARED, 0x08A); val <<= 16; val |= b43_shm_read16(dev, B43_SHM_SHARED, 0x088); return val; } static void b43_jssi_write(struct b43_wldev *dev, u32 jssi) { b43_shm_write16(dev, B43_SHM_SHARED, 0x088, (jssi & 0x0000FFFF)); b43_shm_write16(dev, B43_SHM_SHARED, 0x08A, (jssi & 0xFFFF0000) >> 16); } static void b43_generate_noise_sample(struct b43_wldev *dev) { b43_jssi_write(dev, 0x7F7F7F7F); b43_write32(dev, B43_MMIO_MACCMD, b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_BGNOISE); } static void b43_calculate_link_quality(struct b43_wldev *dev) { /* Top half of Link Quality calculation. */ if (dev->phy.type != B43_PHYTYPE_G) return; if (dev->noisecalc.calculation_running) return; dev->noisecalc.calculation_running = 1; dev->noisecalc.nr_samples = 0; b43_generate_noise_sample(dev); } static void handle_irq_noise(struct b43_wldev *dev) { struct b43_phy_g *phy = dev->phy.g; u16 tmp; u8 noise[4]; u8 i, j; s32 average; /* Bottom half of Link Quality calculation. */ if (dev->phy.type != B43_PHYTYPE_G) return; /* Possible race condition: It might be possible that the user * changed to a different channel in the meantime since we * started the calculation. We ignore that fact, since it's * not really that much of a problem. The background noise is * an estimation only anyway. Slightly wrong results will get damped * by the averaging of the 8 sample rounds. Additionally the * value is shortlived. So it will be replaced by the next noise * calculation round soon. */ B43_WARN_ON(!dev->noisecalc.calculation_running); *((__le32 *)noise) = cpu_to_le32(b43_jssi_read(dev)); if (noise[0] == 0x7F || noise[1] == 0x7F || noise[2] == 0x7F || noise[3] == 0x7F) goto generate_new; /* Get the noise samples. */ B43_WARN_ON(dev->noisecalc.nr_samples >= 8); i = dev->noisecalc.nr_samples; noise[0] = clamp_val(noise[0], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); noise[1] = clamp_val(noise[1], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); noise[2] = clamp_val(noise[2], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); noise[3] = clamp_val(noise[3], 0, ARRAY_SIZE(phy->nrssi_lt) - 1); dev->noisecalc.samples[i][0] = phy->nrssi_lt[noise[0]]; dev->noisecalc.samples[i][1] = phy->nrssi_lt[noise[1]]; dev->noisecalc.samples[i][2] = phy->nrssi_lt[noise[2]]; dev->noisecalc.samples[i][3] = phy->nrssi_lt[noise[3]]; dev->noisecalc.nr_samples++; if (dev->noisecalc.nr_samples == 8) { /* Calculate the Link Quality by the noise samples. */ average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += dev->noisecalc.samples[i][j]; } average /= (8 * 4); average *= 125; average += 64; average /= 128; tmp = b43_shm_read16(dev, B43_SHM_SHARED, 0x40C); tmp = (tmp / 128) & 0x1F; if (tmp >= 8) average += 2; else average -= 25; if (tmp == 8) average -= 72; else average -= 48; dev->stats.link_noise = average; dev->noisecalc.calculation_running = 0; return; } generate_new: b43_generate_noise_sample(dev); } static void handle_irq_tbtt_indication(struct b43_wldev *dev) { if (b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) { ///TODO: PS TBTT } else { if (1 /*FIXME: the last PSpoll frame was sent successfully */ ) b43_power_saving_ctl_bits(dev, 0); } if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) dev->dfq_valid = 1; } static void handle_irq_atim_end(struct b43_wldev *dev) { if (dev->dfq_valid) { b43_write32(dev, B43_MMIO_MACCMD, b43_read32(dev, B43_MMIO_MACCMD) | B43_MACCMD_DFQ_VALID); dev->dfq_valid = 0; } } static void handle_irq_pmq(struct b43_wldev *dev) { u32 tmp; //TODO: AP mode. while (1) { tmp = b43_read32(dev, B43_MMIO_PS_STATUS); if (!(tmp & 0x00000008)) break; } /* 16bit write is odd, but correct. */ b43_write16(dev, B43_MMIO_PS_STATUS, 0x0002); } static void b43_write_template_common(struct b43_wldev *dev, const u8 *data, u16 size, u16 ram_offset, u16 shm_size_offset, u8 rate) { u32 i, tmp; struct b43_plcp_hdr4 plcp; plcp.data = 0; b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); b43_ram_write(dev, ram_offset, le32_to_cpu(plcp.data)); ram_offset += sizeof(u32); /* The PLCP is 6 bytes long, but we only wrote 4 bytes, yet. * So leave the first two bytes of the next write blank. */ tmp = (u32) (data[0]) << 16; tmp |= (u32) (data[1]) << 24; b43_ram_write(dev, ram_offset, tmp); ram_offset += sizeof(u32); for (i = 2; i < size; i += sizeof(u32)) { tmp = (u32) (data[i + 0]); if (i + 1 < size) tmp |= (u32) (data[i + 1]) << 8; if (i + 2 < size) tmp |= (u32) (data[i + 2]) << 16; if (i + 3 < size) tmp |= (u32) (data[i + 3]) << 24; b43_ram_write(dev, ram_offset + i - 2, tmp); } b43_shm_write16(dev, B43_SHM_SHARED, shm_size_offset, size + sizeof(struct b43_plcp_hdr6)); } /* Check if the use of the antenna that ieee80211 told us to * use is possible. This will fall back to DEFAULT. * "antenna_nr" is the antenna identifier we got from ieee80211. */ u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev, u8 antenna_nr) { u8 antenna_mask; if (antenna_nr == 0) { /* Zero means "use default antenna". That's always OK. */ return 0; } /* Get the mask of available antennas. */ if (dev->phy.gmode) antenna_mask = dev->dev->bus->sprom.ant_available_bg; else antenna_mask = dev->dev->bus->sprom.ant_available_a; if (!(antenna_mask & (1 << (antenna_nr - 1)))) { /* This antenna is not available. Fall back to default. */ return 0; } return antenna_nr; } /* Convert a b43 antenna number value to the PHY TX control value. */ static u16 b43_antenna_to_phyctl(int antenna) { switch (antenna) { case B43_ANTENNA0: return B43_TXH_PHY_ANT0; case B43_ANTENNA1: return B43_TXH_PHY_ANT1; case B43_ANTENNA2: return B43_TXH_PHY_ANT2; case B43_ANTENNA3: return B43_TXH_PHY_ANT3; case B43_ANTENNA_AUTO0: case B43_ANTENNA_AUTO1: return B43_TXH_PHY_ANT01AUTO; } B43_WARN_ON(1); return 0; } static void b43_write_beacon_template(struct b43_wldev *dev, u16 ram_offset, u16 shm_size_offset) { unsigned int i, len, variable_len; const struct ieee80211_mgmt *bcn; const u8 *ie; bool tim_found = 0; unsigned int rate; u16 ctl; int antenna; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(dev->wl->current_beacon); bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); len = min((size_t) dev->wl->current_beacon->len, 0x200 - sizeof(struct b43_plcp_hdr6)); rate = ieee80211_get_tx_rate(dev->wl->hw, info)->hw_value; b43_write_template_common(dev, (const u8 *)bcn, len, ram_offset, shm_size_offset, rate); /* Write the PHY TX control parameters. */ antenna = B43_ANTENNA_DEFAULT; antenna = b43_antenna_to_phyctl(antenna); ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL); /* We can't send beacons with short preamble. Would get PHY errors. */ ctl &= ~B43_TXH_PHY_SHORTPRMBL; ctl &= ~B43_TXH_PHY_ANT; ctl &= ~B43_TXH_PHY_ENC; ctl |= antenna; if (b43_is_cck_rate(rate)) ctl |= B43_TXH_PHY_ENC_CCK; else ctl |= B43_TXH_PHY_ENC_OFDM; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl); /* Find the position of the TIM and the DTIM_period value * and write them to SHM. */ ie = bcn->u.beacon.variable; variable_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable); for (i = 0; i < variable_len - 2; ) { uint8_t ie_id, ie_len; ie_id = ie[i]; ie_len = ie[i + 1]; if (ie_id == 5) { u16 tim_position; u16 dtim_period; /* This is the TIM Information Element */ /* Check whether the ie_len is in the beacon data range. */ if (variable_len < ie_len + 2 + i) break; /* A valid TIM is at least 4 bytes long. */ if (ie_len < 4) break; tim_found = 1; tim_position = sizeof(struct b43_plcp_hdr6); tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable); tim_position += i; dtim_period = ie[i + 3]; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_TIMBPOS, tim_position); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_DTIMPER, dtim_period); break; } i += ie_len + 2; } if (!tim_found) { /* * If ucode wants to modify TIM do it behind the beacon, this * will happen, for example, when doing mesh networking. */ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_TIMBPOS, len + sizeof(struct b43_plcp_hdr6)); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_DTIMPER, 0); } b43dbg(dev->wl, "Updated beacon template at 0x%x\n", ram_offset); } static void b43_upload_beacon0(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; if (wl->beacon0_uploaded) return; b43_write_beacon_template(dev, 0x68, 0x18); wl->beacon0_uploaded = 1; } static void b43_upload_beacon1(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; if (wl->beacon1_uploaded) return; b43_write_beacon_template(dev, 0x468, 0x1A); wl->beacon1_uploaded = 1; } static void handle_irq_beacon(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; u32 cmd, beacon0_valid, beacon1_valid; if (!b43_is_mode(wl, NL80211_IFTYPE_AP) && !b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) return; /* This is the bottom half of the asynchronous beacon update. */ /* Ignore interrupt in the future. */ dev->irq_mask &= ~B43_IRQ_BEACON; cmd = b43_read32(dev, B43_MMIO_MACCMD); beacon0_valid = (cmd & B43_MACCMD_BEACON0_VALID); beacon1_valid = (cmd & B43_MACCMD_BEACON1_VALID); /* Schedule interrupt manually, if busy. */ if (beacon0_valid && beacon1_valid) { b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON); dev->irq_mask |= B43_IRQ_BEACON; return; } if (unlikely(wl->beacon_templates_virgin)) { /* We never uploaded a beacon before. * Upload both templates now, but only mark one valid. */ wl->beacon_templates_virgin = 0; b43_upload_beacon0(dev); b43_upload_beacon1(dev); cmd = b43_read32(dev, B43_MMIO_MACCMD); cmd |= B43_MACCMD_BEACON0_VALID; b43_write32(dev, B43_MMIO_MACCMD, cmd); } else { if (!beacon0_valid) { b43_upload_beacon0(dev); cmd = b43_read32(dev, B43_MMIO_MACCMD); cmd |= B43_MACCMD_BEACON0_VALID; b43_write32(dev, B43_MMIO_MACCMD, cmd); } else if (!beacon1_valid) { b43_upload_beacon1(dev); cmd = b43_read32(dev, B43_MMIO_MACCMD); cmd |= B43_MACCMD_BEACON1_VALID; b43_write32(dev, B43_MMIO_MACCMD, cmd); } } } static void b43_do_beacon_update_trigger_work(struct b43_wldev *dev) { u32 old_irq_mask = dev->irq_mask; /* update beacon right away or defer to irq */ handle_irq_beacon(dev); if (old_irq_mask != dev->irq_mask) { /* The handler updated the IRQ mask. */ B43_WARN_ON(!dev->irq_mask); if (b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)) { b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); } else { /* Device interrupts are currently disabled. That means * we just ran the hardirq handler and scheduled the * IRQ thread. The thread will write the IRQ mask when * it finished, so there's nothing to do here. Writing * the mask _here_ would incorrectly re-enable IRQs. */ } } } static void b43_beacon_update_trigger_work(struct work_struct *work) { struct b43_wl *wl = container_of(work, struct b43_wl, beacon_update_trigger); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) { if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { /* wl->mutex is enough. */ b43_do_beacon_update_trigger_work(dev); mmiowb(); } else { spin_lock_irq(&wl->hardirq_lock); b43_do_beacon_update_trigger_work(dev); mmiowb(); spin_unlock_irq(&wl->hardirq_lock); } } mutex_unlock(&wl->mutex); } /* Asynchronously update the packet templates in template RAM. * Locking: Requires wl->mutex to be locked. */ static void b43_update_templates(struct b43_wl *wl) { struct sk_buff *beacon; /* This is the top half of the ansynchronous beacon update. * The bottom half is the beacon IRQ. * Beacon update must be asynchronous to avoid sending an * invalid beacon. This can happen for example, if the firmware * transmits a beacon while we are updating it. */ /* We could modify the existing beacon and set the aid bit in * the TIM field, but that would probably require resizing and * moving of data within the beacon template. * Simply request a new beacon and let mac80211 do the hard work. */ beacon = ieee80211_beacon_get(wl->hw, wl->vif); if (unlikely(!beacon)) return; if (wl->current_beacon) dev_kfree_skb_any(wl->current_beacon); wl->current_beacon = beacon; wl->beacon0_uploaded = 0; wl->beacon1_uploaded = 0; ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger); } static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int) { b43_time_lock(dev); if (dev->dev->id.revision >= 3) { b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16)); b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10)); } else { b43_write16(dev, 0x606, (beacon_int >> 6)); b43_write16(dev, 0x610, beacon_int); } b43_time_unlock(dev); b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int); } static void b43_handle_firmware_panic(struct b43_wldev *dev) { u16 reason; /* Read the register that contains the reason code for the panic. */ reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_FWPANIC_REASON_REG); b43err(dev->wl, "Whoopsy, firmware panic! Reason: %u\n", reason); switch (reason) { default: b43dbg(dev->wl, "The panic reason is unknown.\n"); /* fallthrough */ case B43_FWPANIC_DIE: /* Do not restart the controller or firmware. * The device is nonfunctional from now on. * Restarting would result in this panic to trigger again, * so we avoid that recursion. */ break; case B43_FWPANIC_RESTART: b43_controller_restart(dev, "Microcode panic"); break; } } static void handle_irq_ucode_debug(struct b43_wldev *dev) { unsigned int i, cnt; u16 reason, marker_id, marker_line; __le16 *buf; /* The proprietary firmware doesn't have this IRQ. */ if (!dev->fw.opensource) return; /* Read the register that contains the reason code for this IRQ. */ reason = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_DEBUGIRQ_REASON_REG); switch (reason) { case B43_DEBUGIRQ_PANIC: b43_handle_firmware_panic(dev); break; case B43_DEBUGIRQ_DUMP_SHM: if (!B43_DEBUG) break; /* Only with driver debugging enabled. */ buf = kmalloc(4096, GFP_ATOMIC); if (!buf) { b43dbg(dev->wl, "SHM-dump: Failed to allocate memory\n"); goto out; } for (i = 0; i < 4096; i += 2) { u16 tmp = b43_shm_read16(dev, B43_SHM_SHARED, i); buf[i / 2] = cpu_to_le16(tmp); } b43info(dev->wl, "Shared memory dump:\n"); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 2, buf, 4096, 1); kfree(buf); break; case B43_DEBUGIRQ_DUMP_REGS: if (!B43_DEBUG) break; /* Only with driver debugging enabled. */ b43info(dev->wl, "Microcode register dump:\n"); for (i = 0, cnt = 0; i < 64; i++) { u16 tmp = b43_shm_read16(dev, B43_SHM_SCRATCH, i); if (cnt == 0) printk(KERN_INFO); printk("r%02u: 0x%04X ", i, tmp); cnt++; if (cnt == 6) { printk("\n"); cnt = 0; } } printk("\n"); break; case B43_DEBUGIRQ_MARKER: if (!B43_DEBUG) break; /* Only with driver debugging enabled. */ marker_id = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_MARKER_ID_REG); marker_line = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_MARKER_LINE_REG); b43info(dev->wl, "The firmware just executed the MARKER(%u) " "at line number %u\n", marker_id, marker_line); break; default: b43dbg(dev->wl, "Debug-IRQ triggered for unknown reason: %u\n", reason); } out: /* Acknowledge the debug-IRQ, so the firmware can continue. */ b43_shm_write16(dev, B43_SHM_SCRATCH, B43_DEBUGIRQ_REASON_REG, B43_DEBUGIRQ_ACK); } static void b43_do_interrupt_thread(struct b43_wldev *dev) { u32 reason; u32 dma_reason[ARRAY_SIZE(dev->dma_reason)]; u32 merged_dma_reason = 0; int i; if (unlikely(b43_status(dev) != B43_STAT_STARTED)) return; reason = dev->irq_reason; for (i = 0; i < ARRAY_SIZE(dma_reason); i++) { dma_reason[i] = dev->dma_reason[i]; merged_dma_reason |= dma_reason[i]; } if (unlikely(reason & B43_IRQ_MAC_TXERR)) b43err(dev->wl, "MAC transmission error\n"); if (unlikely(reason & B43_IRQ_PHY_TXERR)) { b43err(dev->wl, "PHY transmission error\n"); rmb(); if (unlikely(atomic_dec_and_test(&dev->phy.txerr_cnt))) { atomic_set(&dev->phy.txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); b43err(dev->wl, "Too many PHY TX errors, " "restarting the controller\n"); b43_controller_restart(dev, "PHY TX errors"); } } if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | B43_DMAIRQ_NONFATALMASK))) { if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { b43err(dev->wl, "Fatal DMA error: " "0x%08X, 0x%08X, 0x%08X, " "0x%08X, 0x%08X, 0x%08X\n", dma_reason[0], dma_reason[1], dma_reason[2], dma_reason[3], dma_reason[4], dma_reason[5]); b43_controller_restart(dev, "DMA error"); return; } if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { b43err(dev->wl, "DMA error: " "0x%08X, 0x%08X, 0x%08X, " "0x%08X, 0x%08X, 0x%08X\n", dma_reason[0], dma_reason[1], dma_reason[2], dma_reason[3], dma_reason[4], dma_reason[5]); } } if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) handle_irq_ucode_debug(dev); if (reason & B43_IRQ_TBTT_INDI) handle_irq_tbtt_indication(dev); if (reason & B43_IRQ_ATIM_END) handle_irq_atim_end(dev); if (reason & B43_IRQ_BEACON) handle_irq_beacon(dev); if (reason & B43_IRQ_PMQ) handle_irq_pmq(dev); if (reason & B43_IRQ_TXFIFO_FLUSH_OK) ;/* TODO */ if (reason & B43_IRQ_NOISESAMPLE_OK) handle_irq_noise(dev); /* Check the DMA reason registers for received data. */ if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { if (b43_using_pio_transfers(dev)) b43_pio_rx(dev->pio.rx_queue); else b43_dma_rx(dev->dma.rx_ring); } B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); B43_WARN_ON(dma_reason[3] & B43_DMAIRQ_RX_DONE); B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE); B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE); if (reason & B43_IRQ_TX_OK) handle_irq_transmit_status(dev); /* Re-enable interrupts on the device by restoring the current interrupt mask. */ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); #if B43_DEBUG if (b43_debug(dev, B43_DBG_VERBOSESTATS)) { dev->irq_count++; for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) { if (reason & (1 << i)) dev->irq_bit_count[i]++; } } #endif } /* Interrupt thread handler. Handles device interrupts in thread context. */ static irqreturn_t b43_interrupt_thread_handler(int irq, void *dev_id) { struct b43_wldev *dev = dev_id; mutex_lock(&dev->wl->mutex); b43_do_interrupt_thread(dev); mmiowb(); mutex_unlock(&dev->wl->mutex); return IRQ_HANDLED; } static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) { u32 reason; /* This code runs under wl->hardirq_lock, but _only_ on non-SDIO busses. * On SDIO, this runs under wl->mutex. */ reason = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); if (reason == 0xffffffff) /* shared IRQ */ return IRQ_NONE; reason &= dev->irq_mask; if (!reason) return IRQ_HANDLED; dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) & 0x0001DC00; dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) & 0x0000DC00; dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) & 0x0000DC00; dev->dma_reason[3] = b43_read32(dev, B43_MMIO_DMA3_REASON) & 0x0001DC00; dev->dma_reason[4] = b43_read32(dev, B43_MMIO_DMA4_REASON) & 0x0000DC00; /* Unused ring dev->dma_reason[5] = b43_read32(dev, B43_MMIO_DMA5_REASON) & 0x0000DC00; */ /* ACK the interrupt. */ b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, reason); b43_write32(dev, B43_MMIO_DMA0_REASON, dev->dma_reason[0]); b43_write32(dev, B43_MMIO_DMA1_REASON, dev->dma_reason[1]); b43_write32(dev, B43_MMIO_DMA2_REASON, dev->dma_reason[2]); b43_write32(dev, B43_MMIO_DMA3_REASON, dev->dma_reason[3]); b43_write32(dev, B43_MMIO_DMA4_REASON, dev->dma_reason[4]); /* Unused ring b43_write32(dev, B43_MMIO_DMA5_REASON, dev->dma_reason[5]); */ /* Disable IRQs on the device. The IRQ thread handler will re-enable them. */ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); /* Save the reason bitmasks for the IRQ thread handler. */ dev->irq_reason = reason; return IRQ_WAKE_THREAD; } /* Interrupt handler top-half. This runs with interrupts disabled. */ static irqreturn_t b43_interrupt_handler(int irq, void *dev_id) { struct b43_wldev *dev = dev_id; irqreturn_t ret; if (unlikely(b43_status(dev) < B43_STAT_STARTED)) return IRQ_NONE; spin_lock(&dev->wl->hardirq_lock); ret = b43_do_interrupt(dev); mmiowb(); spin_unlock(&dev->wl->hardirq_lock); return ret; } /* SDIO interrupt handler. This runs in process context. */ static void b43_sdio_interrupt_handler(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; irqreturn_t ret; mutex_lock(&wl->mutex); ret = b43_do_interrupt(dev); if (ret == IRQ_WAKE_THREAD) b43_do_interrupt_thread(dev); mutex_unlock(&wl->mutex); } void b43_do_release_fw(struct b43_firmware_file *fw) { release_firmware(fw->data); fw->data = NULL; fw->filename = NULL; } static void b43_release_firmware(struct b43_wldev *dev) { b43_do_release_fw(&dev->fw.ucode); b43_do_release_fw(&dev->fw.pcm); b43_do_release_fw(&dev->fw.initvals); b43_do_release_fw(&dev->fw.initvals_band); } static void b43_print_fw_helptext(struct b43_wl *wl, bool error) { const char text[] = "You must go to " \ "http://wireless.kernel.org/en/users/Drivers/b43#devicefirmware " \ "and download the correct firmware for this driver version. " \ "Please carefully read all instructions on this website.\n"; if (error) b43err(wl, text); else b43warn(wl, text); } int b43_do_request_fw(struct b43_request_fw_context *ctx, const char *name, struct b43_firmware_file *fw) { const struct firmware *blob; struct b43_fw_header *hdr; u32 size; int err; if (!name) { /* Don't fetch anything. Free possibly cached firmware. */ /* FIXME: We should probably keep it anyway, to save some headache * on suspend/resume with multiband devices. */ b43_do_release_fw(fw); return 0; } if (fw->filename) { if ((fw->type == ctx->req_type) && (strcmp(fw->filename, name) == 0)) return 0; /* Already have this fw. */ /* Free the cached firmware first. */ /* FIXME: We should probably do this later after we successfully * got the new fw. This could reduce headache with multiband devices. * We could also redesign this to cache the firmware for all possible * bands all the time. */ b43_do_release_fw(fw); } switch (ctx->req_type) { case B43_FWTYPE_PROPRIETARY: snprintf(ctx->fwname, sizeof(ctx->fwname), "b43%s/%s.fw", modparam_fwpostfix, name); break; case B43_FWTYPE_OPENSOURCE: snprintf(ctx->fwname, sizeof(ctx->fwname), "b43-open%s/%s.fw", modparam_fwpostfix, name); break; default: B43_WARN_ON(1); return -ENOSYS; } err = request_firmware(&blob, ctx->fwname, ctx->dev->dev->dev); if (err == -ENOENT) { snprintf(ctx->errors[ctx->req_type], sizeof(ctx->errors[ctx->req_type]), "Firmware file \"%s\" not found\n", ctx->fwname); return err; } else if (err) { snprintf(ctx->errors[ctx->req_type], sizeof(ctx->errors[ctx->req_type]), "Firmware file \"%s\" request failed (err=%d)\n", ctx->fwname, err); return err; } if (blob->size < sizeof(struct b43_fw_header)) goto err_format; hdr = (struct b43_fw_header *)(blob->data); switch (hdr->type) { case B43_FW_TYPE_UCODE: case B43_FW_TYPE_PCM: size = be32_to_cpu(hdr->size); if (size != blob->size - sizeof(struct b43_fw_header)) goto err_format; /* fallthrough */ case B43_FW_TYPE_IV: if (hdr->ver != 1) goto err_format; break; default: goto err_format; } fw->data = blob; fw->filename = name; fw->type = ctx->req_type; return 0; err_format: snprintf(ctx->errors[ctx->req_type], sizeof(ctx->errors[ctx->req_type]), "Firmware file \"%s\" format error.\n", ctx->fwname); release_firmware(blob); return -EPROTO; } static int b43_try_request_fw(struct b43_request_fw_context *ctx) { struct b43_wldev *dev = ctx->dev; struct b43_firmware *fw = &ctx->dev->fw; const u8 rev = ctx->dev->dev->id.revision; const char *filename; u32 tmshigh; int err; /* Get microcode */ tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); if ((rev >= 5) && (rev <= 10)) filename = "ucode5"; else if ((rev >= 11) && (rev <= 12)) filename = "ucode11"; else if (rev == 13) filename = "ucode13"; else if (rev == 14) filename = "ucode14"; else if (rev >= 15) filename = "ucode15"; else goto err_no_ucode; err = b43_do_request_fw(ctx, filename, &fw->ucode); if (err) goto err_load; /* Get PCM code */ if ((rev >= 5) && (rev <= 10)) filename = "pcm5"; else if (rev >= 11) filename = NULL; else goto err_no_pcm; fw->pcm_request_failed = 0; err = b43_do_request_fw(ctx, filename, &fw->pcm); if (err == -ENOENT) { /* We did not find a PCM file? Not fatal, but * core rev <= 10 must do without hwcrypto then. */ fw->pcm_request_failed = 1; } else if (err) goto err_load; /* Get initvals */ switch (dev->phy.type) { case B43_PHYTYPE_A: if ((rev >= 5) && (rev <= 10)) { if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; } else goto err_no_initvals; break; case B43_PHYTYPE_G: if ((rev >= 5) && (rev <= 10)) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto err_no_initvals; break; case B43_PHYTYPE_N: if ((rev >= 11) && (rev <= 12)) filename = "n0initvals11"; else goto err_no_initvals; break; case B43_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto err_no_initvals; break; default: goto err_no_initvals; } err = b43_do_request_fw(ctx, filename, &fw->initvals); if (err) goto err_load; /* Get bandswitch initvals */ switch (dev->phy.type) { case B43_PHYTYPE_A: if ((rev >= 5) && (rev <= 10)) { if (tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto err_no_initvals; break; case B43_PHYTYPE_G: if ((rev >= 5) && (rev <= 10)) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto err_no_initvals; break; case B43_PHYTYPE_N: if ((rev >= 11) && (rev <= 12)) filename = "n0bsinitvals11"; else goto err_no_initvals; break; case B43_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto err_no_initvals; break; default: goto err_no_initvals; } err = b43_do_request_fw(ctx, filename, &fw->initvals_band); if (err) goto err_load; return 0; err_no_ucode: err = ctx->fatal_failure = -EOPNOTSUPP; b43err(dev->wl, "The driver does not know which firmware (ucode) " "is required for your device (wl-core rev %u)\n", rev); goto error; err_no_pcm: err = ctx->fatal_failure = -EOPNOTSUPP; b43err(dev->wl, "The driver does not know which firmware (PCM) " "is required for your device (wl-core rev %u)\n", rev); goto error; err_no_initvals: err = ctx->fatal_failure = -EOPNOTSUPP; b43err(dev->wl, "The driver does not know which firmware (initvals) " "is required for your device (wl-core rev %u)\n", rev); goto error; err_load: /* We failed to load this firmware image. The error message * already is in ctx->errors. Return and let our caller decide * what to do. */ goto error; error: b43_release_firmware(dev); return err; } static int b43_request_firmware(struct b43_wldev *dev) { struct b43_request_fw_context *ctx; unsigned int i; int err; const char *errmsg; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; ctx->req_type = B43_FWTYPE_PROPRIETARY; err = b43_try_request_fw(ctx); if (!err) goto out; /* Successfully loaded it. */ err = ctx->fatal_failure; if (err) goto out; ctx->req_type = B43_FWTYPE_OPENSOURCE; err = b43_try_request_fw(ctx); if (!err) goto out; /* Successfully loaded it. */ err = ctx->fatal_failure; if (err) goto out; /* Could not find a usable firmware. Print the errors. */ for (i = 0; i < B43_NR_FWTYPES; i++) { errmsg = ctx->errors[i]; if (strlen(errmsg)) b43err(dev->wl, errmsg); } b43_print_fw_helptext(dev->wl, 1); err = -ENOENT; out: kfree(ctx); return err; } static int b43_upload_microcode(struct b43_wldev *dev) { const size_t hdr_len = sizeof(struct b43_fw_header); const __be32 *data; unsigned int i, len; u16 fwrev, fwpatch, fwdate, fwtime; u32 tmp, macctl; int err = 0; /* Jump the microcode PSM to offset 0 */ macctl = b43_read32(dev, B43_MMIO_MACCTL); B43_WARN_ON(macctl & B43_MACCTL_PSM_RUN); macctl |= B43_MACCTL_PSM_JMP0; b43_write32(dev, B43_MMIO_MACCTL, macctl); /* Zero out all microcode PSM registers and shared memory. */ for (i = 0; i < 64; i++) b43_shm_write16(dev, B43_SHM_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) b43_shm_write16(dev, B43_SHM_SHARED, i, 0); /* Upload Microcode. */ data = (__be32 *) (dev->fw.ucode.data->data + hdr_len); len = (dev->fw.ucode.data->size - hdr_len) / sizeof(__be32); b43_shm_control_word(dev, B43_SHM_UCODE | B43_SHM_AUTOINC_W, 0x0000); for (i = 0; i < len; i++) { b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); udelay(10); } if (dev->fw.pcm.data) { /* Upload PCM data. */ data = (__be32 *) (dev->fw.pcm.data->data + hdr_len); len = (dev->fw.pcm.data->size - hdr_len) / sizeof(__be32); b43_shm_control_word(dev, B43_SHM_HW, 0x01EA); b43_write32(dev, B43_MMIO_SHM_DATA, 0x00004000); /* No need for autoinc bit in SHM_HW */ b43_shm_control_word(dev, B43_SHM_HW, 0x01EB); for (i = 0; i < len; i++) { b43_write32(dev, B43_MMIO_SHM_DATA, be32_to_cpu(data[i])); udelay(10); } } b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_ALL); /* Start the microcode PSM */ macctl = b43_read32(dev, B43_MMIO_MACCTL); macctl &= ~B43_MACCTL_PSM_JMP0; macctl |= B43_MACCTL_PSM_RUN; b43_write32(dev, B43_MMIO_MACCTL, macctl); /* Wait for the microcode to load and respond */ i = 0; while (1) { tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); if (tmp == B43_IRQ_MAC_SUSPENDED) break; i++; if (i >= 20) { b43err(dev->wl, "Microcode not responding\n"); b43_print_fw_helptext(dev->wl, 1); err = -ENODEV; goto error; } msleep(50); } b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); /* dummy read */ /* Get and check the revisions. */ fwrev = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEREV); fwpatch = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEPATCH); fwdate = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODEDATE); fwtime = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODETIME); if (fwrev <= 0x128) { b43err(dev->wl, "YOUR FIRMWARE IS TOO OLD. Firmware from " "binary drivers older than version 4.x is unsupported. " "You must upgrade your firmware files.\n"); b43_print_fw_helptext(dev->wl, 1); err = -EOPNOTSUPP; goto error; } dev->fw.rev = fwrev; dev->fw.patch = fwpatch; dev->fw.opensource = (fwdate == 0xFFFF); /* Default to use-all-queues. */ dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues; dev->qos_enabled = !!modparam_qos; /* Default to firmware/hardware crypto acceleration. */ dev->hwcrypto_enabled = 1; if (dev->fw.opensource) { u16 fwcapa; /* Patchlevel info is encoded in the "time" field. */ dev->fw.patch = fwtime; b43info(dev->wl, "Loading OpenSource firmware version %u.%u\n", dev->fw.rev, dev->fw.patch); fwcapa = b43_fwcapa_read(dev); if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) { b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n"); /* Disable hardware crypto and fall back to software crypto. */ dev->hwcrypto_enabled = 0; } if (!(fwcapa & B43_FWCAPA_QOS)) { b43info(dev->wl, "QoS not supported by firmware\n"); /* Disable QoS. Tweak hw->queues to 1. It will be restored before * ieee80211_unregister to make sure the networking core can * properly free possible resources. */ dev->wl->hw->queues = 1; dev->qos_enabled = 0; } } else { b43info(dev->wl, "Loading firmware version %u.%u " "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", fwrev, fwpatch, (fwdate >> 12) & 0xF, (fwdate >> 8) & 0xF, fwdate & 0xFF, (fwtime >> 11) & 0x1F, (fwtime >> 5) & 0x3F, fwtime & 0x1F); if (dev->fw.pcm_request_failed) { b43warn(dev->wl, "No \"pcm5.fw\" firmware file found. " "Hardware accelerated cryptography is disabled.\n"); b43_print_fw_helptext(dev->wl, 0); } } if (b43_is_old_txhdr_format(dev)) { /* We're over the deadline, but we keep support for old fw * until it turns out to be in major conflict with something new. */ b43warn(dev->wl, "You are using an old firmware image. " "Support for old firmware will be removed soon " "(official deadline was July 2008).\n"); b43_print_fw_helptext(dev->wl, 0); } return 0; error: macctl = b43_read32(dev, B43_MMIO_MACCTL); macctl &= ~B43_MACCTL_PSM_RUN; macctl |= B43_MACCTL_PSM_JMP0; b43_write32(dev, B43_MMIO_MACCTL, macctl); return err; } static int b43_write_initvals(struct b43_wldev *dev, const struct b43_iv *ivals, size_t count, size_t array_size) { const struct b43_iv *iv; u16 offset; size_t i; bool bit32; BUILD_BUG_ON(sizeof(struct b43_iv) != 6); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto err_format; array_size -= sizeof(iv->offset_size); offset = be16_to_cpu(iv->offset_size); bit32 = !!(offset & B43_IV_32BIT); offset &= B43_IV_OFFSET_MASK; if (offset >= 0x1000) goto err_format; if (bit32) { u32 value; if (array_size < sizeof(iv->data.d32)) goto err_format; array_size -= sizeof(iv->data.d32); value = get_unaligned_be32(&iv->data.d32); b43_write32(dev, offset, value); iv = (const struct b43_iv *)((const uint8_t *)iv + sizeof(__be16) + sizeof(__be32)); } else { u16 value; if (array_size < sizeof(iv->data.d16)) goto err_format; array_size -= sizeof(iv->data.d16); value = be16_to_cpu(iv->data.d16); b43_write16(dev, offset, value); iv = (const struct b43_iv *)((const uint8_t *)iv + sizeof(__be16) + sizeof(__be16)); } } if (array_size) goto err_format; return 0; err_format: b43err(dev->wl, "Initial Values Firmware file-format error.\n"); b43_print_fw_helptext(dev->wl, 1); return -EPROTO; } static int b43_upload_initvals(struct b43_wldev *dev) { const size_t hdr_len = sizeof(struct b43_fw_header); const struct b43_fw_header *hdr; struct b43_firmware *fw = &dev->fw; const struct b43_iv *ivals; size_t count; int err; hdr = (const struct b43_fw_header *)(fw->initvals.data->data); ivals = (const struct b43_iv *)(fw->initvals.data->data + hdr_len); count = be32_to_cpu(hdr->size); err = b43_write_initvals(dev, ivals, count, fw->initvals.data->size - hdr_len); if (err) goto out; if (fw->initvals_band.data) { hdr = (const struct b43_fw_header *)(fw->initvals_band.data->data); ivals = (const struct b43_iv *)(fw->initvals_band.data->data + hdr_len); count = be32_to_cpu(hdr->size); err = b43_write_initvals(dev, ivals, count, fw->initvals_band.data->size - hdr_len); if (err) goto out; } out: return err; } /* Initialize the GPIOs * http://bcm-specs.sipsolutions.net/GPIO */ static int b43_gpio_init(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->bus; struct ssb_device *gpiodev, *pcidev = NULL; u32 mask, set; b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) & ~B43_MACCTL_GPOUTSMSK); b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x000F); mask = 0x0000001F; set = 0x0000000F; if (dev->dev->bus->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; } if (0 /* FIXME: conditional unknown */ ) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0100); mask |= 0x0180; set |= 0x0180; } if (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } if (dev->dev->id.revision >= 2) mask |= 0x0010; /* FIXME: This is redundant. */ #ifdef CONFIG_SSB_DRIVER_PCICORE pcidev = bus->pcicore.dev; #endif gpiodev = bus->chipco.dev ? : pcidev; if (!gpiodev) return 0; ssb_write32(gpiodev, B43_GPIO_CONTROL, (ssb_read32(gpiodev, B43_GPIO_CONTROL) & mask) | set); return 0; } /* Turn off all GPIO stuff. Call this on module unload, for example. */ static void b43_gpio_cleanup(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->bus; struct ssb_device *gpiodev, *pcidev = NULL; #ifdef CONFIG_SSB_DRIVER_PCICORE pcidev = bus->pcicore.dev; #endif gpiodev = bus->chipco.dev ? : pcidev; if (!gpiodev) return; ssb_write32(gpiodev, B43_GPIO_CONTROL, 0); } /* http://bcm-specs.sipsolutions.net/EnableMac */ void b43_mac_enable(struct b43_wldev *dev) { if (b43_debug(dev, B43_DBG_FIRMWARE)) { u16 fwstate; fwstate = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_UCODESTAT); if ((fwstate != B43_SHM_SH_UCODESTAT_SUSP) && (fwstate != B43_SHM_SH_UCODESTAT_SLEEP)) { b43err(dev->wl, "b43_mac_enable(): The firmware " "should be suspended, but current state is %u\n", fwstate); } } dev->mac_suspended--; B43_WARN_ON(dev->mac_suspended < 0); if (dev->mac_suspended == 0) { b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) | B43_MACCTL_ENABLED); b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_MAC_SUSPENDED); /* Commit writes */ b43_read32(dev, B43_MMIO_MACCTL); b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); b43_power_saving_ctl_bits(dev, 0); } } /* http://bcm-specs.sipsolutions.net/SuspendMAC */ void b43_mac_suspend(struct b43_wldev *dev) { int i; u32 tmp; might_sleep(); B43_WARN_ON(dev->mac_suspended < 0); if (dev->mac_suspended == 0) { b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) & ~B43_MACCTL_ENABLED); /* force pci to flush the write */ b43_read32(dev, B43_MMIO_MACCTL); for (i = 35; i; i--) { tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); if (tmp & B43_IRQ_MAC_SUSPENDED) goto out; udelay(10); } /* Hm, it seems this will take some time. Use msleep(). */ for (i = 40; i; i--) { tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); if (tmp & B43_IRQ_MAC_SUSPENDED) goto out; msleep(1); } b43err(dev->wl, "MAC suspend failed\n"); } out: dev->mac_suspended++; } static void b43_adjust_opmode(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; u32 ctl; u16 cfp_pretbtt; ctl = b43_read32(dev, B43_MMIO_MACCTL); /* Reset status to STA infrastructure mode. */ ctl &= ~B43_MACCTL_AP; ctl &= ~B43_MACCTL_KEEP_CTL; ctl &= ~B43_MACCTL_KEEP_BADPLCP; ctl &= ~B43_MACCTL_KEEP_BAD; ctl &= ~B43_MACCTL_PROMISC; ctl &= ~B43_MACCTL_BEACPROMISC; ctl |= B43_MACCTL_INFRA; if (b43_is_mode(wl, NL80211_IFTYPE_AP) || b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT)) ctl |= B43_MACCTL_AP; else if (b43_is_mode(wl, NL80211_IFTYPE_ADHOC)) ctl &= ~B43_MACCTL_INFRA; if (wl->filter_flags & FIF_CONTROL) ctl |= B43_MACCTL_KEEP_CTL; if (wl->filter_flags & FIF_FCSFAIL) ctl |= B43_MACCTL_KEEP_BAD; if (wl->filter_flags & FIF_PLCPFAIL) ctl |= B43_MACCTL_KEEP_BADPLCP; if (wl->filter_flags & FIF_PROMISC_IN_BSS) ctl |= B43_MACCTL_PROMISC; if (wl->filter_flags & FIF_BCN_PRBRESP_PROMISC) ctl |= B43_MACCTL_BEACPROMISC; /* Workaround: On old hardware the HW-MAC-address-filter * doesn't work properly, so always run promisc in filter * it in software. */ if (dev->dev->id.revision <= 4) ctl |= B43_MACCTL_PROMISC; b43_write32(dev, B43_MMIO_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & B43_MACCTL_INFRA) && !(ctl & B43_MACCTL_AP)) { if (dev->dev->bus->chip_id == 0x4306 && dev->dev->bus->chip_rev == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } b43_write16(dev, 0x612, cfp_pretbtt); /* FIXME: We don't currently implement the PMQ mechanism, * so always disable it. If we want to implement PMQ, * we need to enable it here (clear DISCPMQ) in AP mode. */ if (0 /* ctl & B43_MACCTL_AP */) { b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) & ~B43_MACCTL_DISCPMQ); } else { b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) | B43_MACCTL_DISCPMQ); } } static void b43_rate_memory_write(struct b43_wldev *dev, u16 rate, int is_ofdm) { u16 offset; if (is_ofdm) { offset = 0x480; offset += (b43_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2; } else { offset = 0x4C0; offset += (b43_plcp_get_ratecode_cck(rate) & 0x000F) * 2; } b43_shm_write16(dev, B43_SHM_SHARED, offset + 0x20, b43_shm_read16(dev, B43_SHM_SHARED, offset)); } static void b43_rate_memory_init(struct b43_wldev *dev) { switch (dev->phy.type) { case B43_PHYTYPE_A: case B43_PHYTYPE_G: case B43_PHYTYPE_N: case B43_PHYTYPE_LP: b43_rate_memory_write(dev, B43_OFDM_RATE_6MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_12MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_18MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_24MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1); b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1); if (dev->phy.type == B43_PHYTYPE_A) break; /* fallthrough */ case B43_PHYTYPE_B: b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0); b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0); b43_rate_memory_write(dev, B43_CCK_RATE_5MB, 0); b43_rate_memory_write(dev, B43_CCK_RATE_11MB, 0); break; default: B43_WARN_ON(1); } } /* Set the default values for the PHY TX Control Words. */ static void b43_set_phytxctl_defaults(struct b43_wldev *dev) { u16 ctl = 0; ctl |= B43_TXH_PHY_ENC_CCK; ctl |= B43_TXH_PHY_ANT01AUTO; ctl |= B43_TXH_PHY_TXPWR; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, ctl); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, ctl); } /* Set the TX-Antenna for management frames sent by firmware. */ static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna) { u16 ant; u16 tmp; ant = b43_antenna_to_phyctl(antenna); /* For ACK/CTS */ tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL); tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, tmp); /* For Probe Resposes */ tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL); tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, tmp); } /* This is the opposite of b43_chip_init() */ static void b43_chip_exit(struct b43_wldev *dev) { b43_phy_exit(dev); b43_gpio_cleanup(dev); /* firmware is released later */ } /* Initialize the chip * http://bcm-specs.sipsolutions.net/ChipInit */ static int b43_chip_init(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; int err; u32 value32, macctl; u16 value16; /* Initialize the MAC control */ macctl = B43_MACCTL_IHR_ENABLED | B43_MACCTL_SHM_ENABLED; if (dev->phy.gmode) macctl |= B43_MACCTL_GMODE; macctl |= B43_MACCTL_INFRA; b43_write32(dev, B43_MMIO_MACCTL, macctl); err = b43_request_firmware(dev); if (err) goto out; err = b43_upload_microcode(dev); if (err) goto out; /* firmware is released later */ err = b43_gpio_init(dev); if (err) goto out; /* firmware is released later */ err = b43_upload_initvals(dev); if (err) goto err_gpio_clean; /* Turn the Analog on and initialize the PHY. */ phy->ops->switch_analog(dev, 1); err = b43_phy_init(dev); if (err) goto err_gpio_clean; /* Disable Interference Mitigation. */ if (phy->ops->interf_mitigation) phy->ops->interf_mitigation(dev, B43_INTERFMODE_NONE); /* Select the antennae */ if (phy->ops->set_rx_antenna) phy->ops->set_rx_antenna(dev, B43_ANTENNA_DEFAULT); b43_mgmtframe_txantenna(dev, B43_ANTENNA_DEFAULT); if (phy->type == B43_PHYTYPE_B) { value16 = b43_read16(dev, 0x005E); value16 |= 0x0004; b43_write16(dev, 0x005E, value16); } b43_write32(dev, 0x0100, 0x01000000); if (dev->dev->id.revision < 5) b43_write32(dev, 0x010C, 0x01000000); b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) & ~B43_MACCTL_INFRA); b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL) | B43_MACCTL_INFRA); /* Probe Response Timeout value */ /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */ b43_shm_write16(dev, B43_SHM_SHARED, 0x0074, 0x0000); /* Initially set the wireless operation mode. */ b43_adjust_opmode(dev); if (dev->dev->id.revision < 3) { b43_write16(dev, 0x060E, 0x0000); b43_write16(dev, 0x0610, 0x8000); b43_write16(dev, 0x0604, 0x0000); b43_write16(dev, 0x0606, 0x0200); } else { b43_write32(dev, 0x0188, 0x80000000); b43_write32(dev, 0x018C, 0x02000000); } b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); b43_write32(dev, B43_MMIO_DMA4_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA5_IRQ_MASK, 0x0000DC00); value32 = ssb_read32(dev->dev, SSB_TMSLOW); value32 |= 0x00100000; ssb_write32(dev->dev, SSB_TMSLOW, value32); b43_write16(dev, B43_MMIO_POWERUP_DELAY, dev->dev->bus->chipco.fast_pwrup_delay); err = 0; b43dbg(dev->wl, "Chip initialized\n"); out: return err; err_gpio_clean: b43_gpio_cleanup(dev); return err; } static void b43_periodic_every60sec(struct b43_wldev *dev) { const struct b43_phy_operations *ops = dev->phy.ops; if (ops->pwork_60sec) ops->pwork_60sec(dev); /* Force check the TX power emission now. */ b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME); } static void b43_periodic_every30sec(struct b43_wldev *dev) { /* Update device statistics. */ b43_calculate_link_quality(dev); } static void b43_periodic_every15sec(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u16 wdr; if (dev->fw.opensource) { /* Check if the firmware is still alive. * It will reset the watchdog counter to 0 in its idle loop. */ wdr = b43_shm_read16(dev, B43_SHM_SCRATCH, B43_WATCHDOG_REG); if (unlikely(wdr)) { b43err(dev->wl, "Firmware watchdog: The firmware died!\n"); b43_controller_restart(dev, "Firmware watchdog"); return; } else { b43_shm_write16(dev, B43_SHM_SCRATCH, B43_WATCHDOG_REG, 1); } } if (phy->ops->pwork_15sec) phy->ops->pwork_15sec(dev); atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); wmb(); #if B43_DEBUG if (b43_debug(dev, B43_DBG_VERBOSESTATS)) { unsigned int i; b43dbg(dev->wl, "Stats: %7u IRQs/sec, %7u TX/sec, %7u RX/sec\n", dev->irq_count / 15, dev->tx_count / 15, dev->rx_count / 15); dev->irq_count = 0; dev->tx_count = 0; dev->rx_count = 0; for (i = 0; i < ARRAY_SIZE(dev->irq_bit_count); i++) { if (dev->irq_bit_count[i]) { b43dbg(dev->wl, "Stats: %7u IRQ-%02u/sec (0x%08X)\n", dev->irq_bit_count[i] / 15, i, (1 << i)); dev->irq_bit_count[i] = 0; } } } #endif } static void do_periodic_work(struct b43_wldev *dev) { unsigned int state; state = dev->periodic_state; if (state % 4 == 0) b43_periodic_every60sec(dev); if (state % 2 == 0) b43_periodic_every30sec(dev); b43_periodic_every15sec(dev); } /* Periodic work locking policy: * The whole periodic work handler is protected by * wl->mutex. If another lock is needed somewhere in the * pwork callchain, it's aquired in-place, where it's needed. */ static void b43_periodic_work_handler(struct work_struct *work) { struct b43_wldev *dev = container_of(work, struct b43_wldev, periodic_work.work); struct b43_wl *wl = dev->wl; unsigned long delay; mutex_lock(&wl->mutex); if (unlikely(b43_status(dev) != B43_STAT_STARTED)) goto out; if (b43_debug(dev, B43_DBG_PWORK_STOP)) goto out_requeue; do_periodic_work(dev); dev->periodic_state++; out_requeue: if (b43_debug(dev, B43_DBG_PWORK_FAST)) delay = msecs_to_jiffies(50); else delay = round_jiffies_relative(HZ * 15); ieee80211_queue_delayed_work(wl->hw, &dev->periodic_work, delay); out: mutex_unlock(&wl->mutex); } static void b43_periodic_tasks_setup(struct b43_wldev *dev) { struct delayed_work *work = &dev->periodic_work; dev->periodic_state = 0; INIT_DELAYED_WORK(work, b43_periodic_work_handler); ieee80211_queue_delayed_work(dev->wl->hw, work, 0); } /* Check if communication with the device works correctly. */ static int b43_validate_chipaccess(struct b43_wldev *dev) { u32 v, backup0, backup4; backup0 = b43_shm_read32(dev, B43_SHM_SHARED, 0); backup4 = b43_shm_read32(dev, B43_SHM_SHARED, 4); /* Check for read/write and endianness problems. */ b43_shm_write32(dev, B43_SHM_SHARED, 0, 0x55AAAA55); if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0x55AAAA55) goto error; b43_shm_write32(dev, B43_SHM_SHARED, 0, 0xAA5555AA); if (b43_shm_read32(dev, B43_SHM_SHARED, 0) != 0xAA5555AA) goto error; /* Check if unaligned 32bit SHM_SHARED access works properly. * However, don't bail out on failure, because it's noncritical. */ b43_shm_write16(dev, B43_SHM_SHARED, 0, 0x1122); b43_shm_write16(dev, B43_SHM_SHARED, 2, 0x3344); b43_shm_write16(dev, B43_SHM_SHARED, 4, 0x5566); b43_shm_write16(dev, B43_SHM_SHARED, 6, 0x7788); if (b43_shm_read32(dev, B43_SHM_SHARED, 2) != 0x55663344) b43warn(dev->wl, "Unaligned 32bit SHM read access is broken\n"); b43_shm_write32(dev, B43_SHM_SHARED, 2, 0xAABBCCDD); if (b43_shm_read16(dev, B43_SHM_SHARED, 0) != 0x1122 || b43_shm_read16(dev, B43_SHM_SHARED, 2) != 0xCCDD || b43_shm_read16(dev, B43_SHM_SHARED, 4) != 0xAABB || b43_shm_read16(dev, B43_SHM_SHARED, 6) != 0x7788) b43warn(dev->wl, "Unaligned 32bit SHM write access is broken\n"); b43_shm_write32(dev, B43_SHM_SHARED, 0, backup0); b43_shm_write32(dev, B43_SHM_SHARED, 4, backup4); if ((dev->dev->id.revision >= 3) && (dev->dev->id.revision <= 10)) { /* The 32bit register shadows the two 16bit registers * with update sideeffects. Validate this. */ b43_write16(dev, B43_MMIO_TSF_CFP_START, 0xAAAA); b43_write32(dev, B43_MMIO_TSF_CFP_START, 0xCCCCBBBB); if (b43_read16(dev, B43_MMIO_TSF_CFP_START_LOW) != 0xBBBB) goto error; if (b43_read16(dev, B43_MMIO_TSF_CFP_START_HIGH) != 0xCCCC) goto error; } b43_write32(dev, B43_MMIO_TSF_CFP_START, 0); v = b43_read32(dev, B43_MMIO_MACCTL); v |= B43_MACCTL_GMODE; if (v != (B43_MACCTL_GMODE | B43_MACCTL_IHR_ENABLED)) goto error; return 0; error: b43err(dev->wl, "Failed to validate the chipaccess\n"); return -ENODEV; } static void b43_security_init(struct b43_wldev *dev) { dev->ktp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_KTP); /* KTP is a word address, but we address SHM bytewise. * So multiply by two. */ dev->ktp *= 2; /* Number of RCMTA address slots */ b43_write16(dev, B43_MMIO_RCMTA_COUNT, B43_NR_PAIRWISE_KEYS); /* Clear the key memory. */ b43_clear_keys(dev); } #ifdef CONFIG_B43_HWRNG static int b43_rng_read(struct hwrng *rng, u32 *data) { struct b43_wl *wl = (struct b43_wl *)rng->priv; struct b43_wldev *dev; int count = -ENODEV; mutex_lock(&wl->mutex); dev = wl->current_dev; if (likely(dev && b43_status(dev) >= B43_STAT_INITIALIZED)) { *data = b43_read16(dev, B43_MMIO_RNG); count = sizeof(u16); } mutex_unlock(&wl->mutex); return count; } #endif /* CONFIG_B43_HWRNG */ static void b43_rng_exit(struct b43_wl *wl) { #ifdef CONFIG_B43_HWRNG if (wl->rng_initialized) hwrng_unregister(&wl->rng); #endif /* CONFIG_B43_HWRNG */ } static int b43_rng_init(struct b43_wl *wl) { int err = 0; #ifdef CONFIG_B43_HWRNG snprintf(wl->rng_name, ARRAY_SIZE(wl->rng_name), "%s_%s", KBUILD_MODNAME, wiphy_name(wl->hw->wiphy)); wl->rng.name = wl->rng_name; wl->rng.data_read = b43_rng_read; wl->rng.priv = (unsigned long)wl; wl->rng_initialized = 1; err = hwrng_register(&wl->rng); if (err) { wl->rng_initialized = 0; b43err(wl, "Failed to register the random " "number generator (%d)\n", err); } #endif /* CONFIG_B43_HWRNG */ return err; } static void b43_tx_work(struct work_struct *work) { struct b43_wl *wl = container_of(work, struct b43_wl, tx_work); struct b43_wldev *dev; struct sk_buff *skb; int err = 0; mutex_lock(&wl->mutex); dev = wl->current_dev; if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED)) { mutex_unlock(&wl->mutex); return; } while (skb_queue_len(&wl->tx_queue)) { skb = skb_dequeue(&wl->tx_queue); if (b43_using_pio_transfers(dev)) err = b43_pio_tx(dev, skb); else err = b43_dma_tx(dev, skb); if (unlikely(err)) dev_kfree_skb(skb); /* Drop it */ } #if B43_DEBUG dev->tx_count++; #endif mutex_unlock(&wl->mutex); } static int b43_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct b43_wl *wl = hw_to_b43_wl(hw); if (unlikely(skb->len < 2 + 2 + 6)) { /* Too short, this can't be a valid frame. */ dev_kfree_skb_any(skb); return NETDEV_TX_OK; } B43_WARN_ON(skb_shinfo(skb)->nr_frags); skb_queue_tail(&wl->tx_queue, skb); ieee80211_queue_work(wl->hw, &wl->tx_work); return NETDEV_TX_OK; } static void b43_qos_params_upload(struct b43_wldev *dev, const struct ieee80211_tx_queue_params *p, u16 shm_offset) { u16 params[B43_NR_QOSPARAMS]; int bslots, tmp; unsigned int i; if (!dev->qos_enabled) return; bslots = b43_read16(dev, B43_MMIO_RNG) & p->cw_min; memset(&params, 0, sizeof(params)); params[B43_QOSPARAM_TXOP] = p->txop * 32; params[B43_QOSPARAM_CWMIN] = p->cw_min; params[B43_QOSPARAM_CWMAX] = p->cw_max; params[B43_QOSPARAM_CWCUR] = p->cw_min; params[B43_QOSPARAM_AIFS] = p->aifs; params[B43_QOSPARAM_BSLOTS] = bslots; params[B43_QOSPARAM_REGGAP] = bslots + p->aifs; for (i = 0; i < ARRAY_SIZE(params); i++) { if (i == B43_QOSPARAM_STATUS) { tmp = b43_shm_read16(dev, B43_SHM_SHARED, shm_offset + (i * 2)); /* Mark the parameters as updated. */ tmp |= 0x100; b43_shm_write16(dev, B43_SHM_SHARED, shm_offset + (i * 2), tmp); } else { b43_shm_write16(dev, B43_SHM_SHARED, shm_offset + (i * 2), params[i]); } } } /* Mapping of mac80211 queue numbers to b43 QoS SHM offsets. */ static const u16 b43_qos_shm_offsets[] = { /* [mac80211-queue-nr] = SHM_OFFSET, */ [0] = B43_QOS_VOICE, [1] = B43_QOS_VIDEO, [2] = B43_QOS_BESTEFFORT, [3] = B43_QOS_BACKGROUND, }; /* Update all QOS parameters in hardware. */ static void b43_qos_upload_all(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; struct b43_qos_params *params; unsigned int i; if (!dev->qos_enabled) return; BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != ARRAY_SIZE(wl->qos_params)); b43_mac_suspend(dev); for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { params = &(wl->qos_params[i]); b43_qos_params_upload(dev, &(params->p), b43_qos_shm_offsets[i]); } b43_mac_enable(dev); } static void b43_qos_clear(struct b43_wl *wl) { struct b43_qos_params *params; unsigned int i; /* Initialize QoS parameters to sane defaults. */ BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != ARRAY_SIZE(wl->qos_params)); for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) { params = &(wl->qos_params[i]); switch (b43_qos_shm_offsets[i]) { case B43_QOS_VOICE: params->p.txop = 0; params->p.aifs = 2; params->p.cw_min = 0x0001; params->p.cw_max = 0x0001; break; case B43_QOS_VIDEO: params->p.txop = 0; params->p.aifs = 2; params->p.cw_min = 0x0001; params->p.cw_max = 0x0001; break; case B43_QOS_BESTEFFORT: params->p.txop = 0; params->p.aifs = 3; params->p.cw_min = 0x0001; params->p.cw_max = 0x03FF; break; case B43_QOS_BACKGROUND: params->p.txop = 0; params->p.aifs = 7; params->p.cw_min = 0x0001; params->p.cw_max = 0x03FF; break; default: B43_WARN_ON(1); } } } /* Initialize the core's QOS capabilities */ static void b43_qos_init(struct b43_wldev *dev) { if (!dev->qos_enabled) { /* Disable QOS support. */ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_EDCF); b43_write16(dev, B43_MMIO_IFSCTL, b43_read16(dev, B43_MMIO_IFSCTL) & ~B43_MMIO_IFSCTL_USE_EDCF); b43dbg(dev->wl, "QoS disabled\n"); return; } /* Upload the current QOS parameters. */ b43_qos_upload_all(dev); /* Enable QOS support. */ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF); b43_write16(dev, B43_MMIO_IFSCTL, b43_read16(dev, B43_MMIO_IFSCTL) | B43_MMIO_IFSCTL_USE_EDCF); b43dbg(dev->wl, "QoS enabled\n"); } static int b43_op_conf_tx(struct ieee80211_hw *hw, u16 _queue, const struct ieee80211_tx_queue_params *params) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; unsigned int queue = (unsigned int)_queue; int err = -ENODEV; if (queue >= ARRAY_SIZE(wl->qos_params)) { /* Queue not available or don't support setting * params on this queue. Return success to not * confuse mac80211. */ return 0; } BUILD_BUG_ON(ARRAY_SIZE(b43_qos_shm_offsets) != ARRAY_SIZE(wl->qos_params)); mutex_lock(&wl->mutex); dev = wl->current_dev; if (unlikely(!dev || (b43_status(dev) < B43_STAT_INITIALIZED))) goto out_unlock; memcpy(&(wl->qos_params[queue].p), params, sizeof(*params)); b43_mac_suspend(dev); b43_qos_params_upload(dev, &(wl->qos_params[queue].p), b43_qos_shm_offsets[queue]); b43_mac_enable(dev); err = 0; out_unlock: mutex_unlock(&wl->mutex); return err; } static int b43_op_get_tx_stats(struct ieee80211_hw *hw, struct ieee80211_tx_queue_stats *stats) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; int err = -ENODEV; mutex_lock(&wl->mutex); dev = wl->current_dev; if (dev && b43_status(dev) >= B43_STAT_STARTED) { if (b43_using_pio_transfers(dev)) b43_pio_get_tx_stats(dev, stats); else b43_dma_get_tx_stats(dev, stats); err = 0; } mutex_unlock(&wl->mutex); return err; } static int b43_op_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct b43_wl *wl = hw_to_b43_wl(hw); mutex_lock(&wl->mutex); memcpy(stats, &wl->ieee_stats, sizeof(*stats)); mutex_unlock(&wl->mutex); return 0; } static u64 b43_op_get_tsf(struct ieee80211_hw *hw) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; u64 tsf; mutex_lock(&wl->mutex); dev = wl->current_dev; if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) b43_tsf_read(dev, &tsf); else tsf = 0; mutex_unlock(&wl->mutex); return tsf; } static void b43_op_set_tsf(struct ieee80211_hw *hw, u64 tsf) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) b43_tsf_write(dev, tsf); mutex_unlock(&wl->mutex); } static void b43_put_phy_into_reset(struct b43_wldev *dev) { struct ssb_device *sdev = dev->dev; u32 tmslow; tmslow = ssb_read32(sdev, SSB_TMSLOW); tmslow &= ~B43_TMSLOW_GMODE; tmslow |= B43_TMSLOW_PHYRESET; tmslow |= SSB_TMSLOW_FGC; ssb_write32(sdev, SSB_TMSLOW, tmslow); msleep(1); tmslow = ssb_read32(sdev, SSB_TMSLOW); tmslow &= ~SSB_TMSLOW_FGC; tmslow |= B43_TMSLOW_PHYRESET; ssb_write32(sdev, SSB_TMSLOW, tmslow); msleep(1); } static const char *band_to_string(enum ieee80211_band band) { switch (band) { case IEEE80211_BAND_5GHZ: return "5"; case IEEE80211_BAND_2GHZ: return "2.4"; default: break; } B43_WARN_ON(1); return ""; } /* Expects wl->mutex locked */ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan) { struct b43_wldev *up_dev = NULL; struct b43_wldev *down_dev; struct b43_wldev *d; int err; bool uninitialized_var(gmode); int prev_status; /* Find a device and PHY which supports the band. */ list_for_each_entry(d, &wl->devlist, list) { switch (chan->band) { case IEEE80211_BAND_5GHZ: if (d->phy.supports_5ghz) { up_dev = d; gmode = 0; } break; case IEEE80211_BAND_2GHZ: if (d->phy.supports_2ghz) { up_dev = d; gmode = 1; } break; default: B43_WARN_ON(1); return -EINVAL; } if (up_dev) break; } if (!up_dev) { b43err(wl, "Could not find a device for %s-GHz band operation\n", band_to_string(chan->band)); return -ENODEV; } if ((up_dev == wl->current_dev) && (!!wl->current_dev->phy.gmode == !!gmode)) { /* This device is already running. */ return 0; } b43dbg(wl, "Switching to %s-GHz band\n", band_to_string(chan->band)); down_dev = wl->current_dev; prev_status = b43_status(down_dev); /* Shutdown the currently running core. */ if (prev_status >= B43_STAT_STARTED) down_dev = b43_wireless_core_stop(down_dev); if (prev_status >= B43_STAT_INITIALIZED) b43_wireless_core_exit(down_dev); if (down_dev != up_dev) { /* We switch to a different core, so we put PHY into * RESET on the old core. */ b43_put_phy_into_reset(down_dev); } /* Now start the new core. */ up_dev->phy.gmode = gmode; if (prev_status >= B43_STAT_INITIALIZED) { err = b43_wireless_core_init(up_dev); if (err) { b43err(wl, "Fatal: Could not initialize device for " "selected %s-GHz band\n", band_to_string(chan->band)); goto init_failure; } } if (prev_status >= B43_STAT_STARTED) { err = b43_wireless_core_start(up_dev); if (err) { b43err(wl, "Fatal: Coult not start device for " "selected %s-GHz band\n", band_to_string(chan->band)); b43_wireless_core_exit(up_dev); goto init_failure; } } B43_WARN_ON(b43_status(up_dev) != prev_status); wl->current_dev = up_dev; return 0; init_failure: /* Whoops, failed to init the new core. No core is operating now. */ wl->current_dev = NULL; return err; } /* Write the short and long frame retry limit values. */ static void b43_set_retry_limits(struct b43_wldev *dev, unsigned int short_retry, unsigned int long_retry) { /* The retry limit is a 4-bit counter. Enforce this to avoid overflowing * the chip-internal counter. */ short_retry = min(short_retry, (unsigned int)0xF); long_retry = min(long_retry, (unsigned int)0xF); b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_SRLIMIT, short_retry); b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_LRLIMIT, long_retry); } static int b43_op_config(struct ieee80211_hw *hw, u32 changed) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; struct b43_phy *phy; struct ieee80211_conf *conf = &hw->conf; int antenna; int err = 0; mutex_lock(&wl->mutex); /* Switch the band (if necessary). This might change the active core. */ err = b43_switch_band(wl, conf->channel); if (err) goto out_unlock_mutex; dev = wl->current_dev; phy = &dev->phy; b43_mac_suspend(dev); if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) b43_set_retry_limits(dev, conf->short_frame_max_tx_count, conf->long_frame_max_tx_count); changed &= ~IEEE80211_CONF_CHANGE_RETRY_LIMITS; if (!changed) goto out_mac_enable; /* Switch to the requested channel. * The firmware takes care of races with the TX handler. */ if (conf->channel->hw_value != phy->channel) b43_switch_channel(dev, conf->channel->hw_value); dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); /* Adjust the desired TX power level. */ if (conf->power_level != 0) { if (conf->power_level != phy->desired_txpower) { phy->desired_txpower = conf->power_level; b43_phy_txpower_check(dev, B43_TXPWR_IGNORE_TIME | B43_TXPWR_IGNORE_TSSI); } } /* Antennas for RX and management frame TX. */ antenna = B43_ANTENNA_DEFAULT; b43_mgmtframe_txantenna(dev, antenna); antenna = B43_ANTENNA_DEFAULT; if (phy->ops->set_rx_antenna) phy->ops->set_rx_antenna(dev, antenna); if (wl->radio_enabled != phy->radio_on) { if (wl->radio_enabled) { b43_software_rfkill(dev, false); b43info(dev->wl, "Radio turned on by software\n"); if (!dev->radio_hw_enable) { b43info(dev->wl, "The hardware RF-kill button " "still turns the radio physically off. " "Press the button to turn it on.\n"); } } else { b43_software_rfkill(dev, true); b43info(dev->wl, "Radio turned off by software\n"); } } out_mac_enable: b43_mac_enable(dev); out_unlock_mutex: mutex_unlock(&wl->mutex); return err; } static void b43_update_basic_rates(struct b43_wldev *dev, u32 brates) { struct ieee80211_supported_band *sband = dev->wl->hw->wiphy->bands[b43_current_band(dev->wl)]; struct ieee80211_rate *rate; int i; u16 basic, direct, offset, basic_offset, rateptr; for (i = 0; i < sband->n_bitrates; i++) { rate = &sband->bitrates[i]; if (b43_is_cck_rate(rate->hw_value)) { direct = B43_SHM_SH_CCKDIRECT; basic = B43_SHM_SH_CCKBASIC; offset = b43_plcp_get_ratecode_cck(rate->hw_value); offset &= 0xF; } else { direct = B43_SHM_SH_OFDMDIRECT; basic = B43_SHM_SH_OFDMBASIC; offset = b43_plcp_get_ratecode_ofdm(rate->hw_value); offset &= 0xF; } rate = ieee80211_get_response_rate(sband, brates, rate->bitrate); if (b43_is_cck_rate(rate->hw_value)) { basic_offset = b43_plcp_get_ratecode_cck(rate->hw_value); basic_offset &= 0xF; } else { basic_offset = b43_plcp_get_ratecode_ofdm(rate->hw_value); basic_offset &= 0xF; } /* * Get the pointer that we need to point to * from the direct map */ rateptr = b43_shm_read16(dev, B43_SHM_SHARED, direct + 2 * basic_offset); /* and write it to the basic map */ b43_shm_write16(dev, B43_SHM_SHARED, basic + 2 * offset, rateptr); } } static void b43_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *conf, u32 changed) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev || b43_status(dev) < B43_STAT_STARTED) goto out_unlock_mutex; B43_WARN_ON(wl->vif != vif); if (changed & BSS_CHANGED_BSSID) { if (conf->bssid) memcpy(wl->bssid, conf->bssid, ETH_ALEN); else memset(wl->bssid, 0, ETH_ALEN); } if (b43_status(dev) >= B43_STAT_INITIALIZED) { if (changed & BSS_CHANGED_BEACON && (b43_is_mode(wl, NL80211_IFTYPE_AP) || b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) || b43_is_mode(wl, NL80211_IFTYPE_ADHOC))) b43_update_templates(wl); if (changed & BSS_CHANGED_BSSID) b43_write_mac_bssid_templates(dev); } b43_mac_suspend(dev); /* Update templates for AP/mesh mode. */ if (changed & BSS_CHANGED_BEACON_INT && (b43_is_mode(wl, NL80211_IFTYPE_AP) || b43_is_mode(wl, NL80211_IFTYPE_MESH_POINT) || b43_is_mode(wl, NL80211_IFTYPE_ADHOC))) b43_set_beacon_int(dev, conf->beacon_int); if (changed & BSS_CHANGED_BASIC_RATES) b43_update_basic_rates(dev, conf->basic_rates); if (changed & BSS_CHANGED_ERP_SLOT) { if (conf->use_short_slot) b43_short_slot_timing_enable(dev); else b43_short_slot_timing_disable(dev); } b43_mac_enable(dev); out_unlock_mutex: mutex_unlock(&wl->mutex); } static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; u8 algorithm; u8 index; int err; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (modparam_nohwcrypt) return -ENOSPC; /* User disabled HW-crypto */ mutex_lock(&wl->mutex); dev = wl->current_dev; err = -ENODEV; if (!dev || b43_status(dev) < B43_STAT_INITIALIZED) goto out_unlock; if (dev->fw.pcm_request_failed || !dev->hwcrypto_enabled) { /* We don't have firmware for the crypto engine. * Must use software-crypto. */ err = -EOPNOTSUPP; goto out_unlock; } err = -EINVAL; switch (key->alg) { case ALG_WEP: if (key->keylen == WLAN_KEY_LEN_WEP40) algorithm = B43_SEC_ALGO_WEP40; else algorithm = B43_SEC_ALGO_WEP104; break; case ALG_TKIP: algorithm = B43_SEC_ALGO_TKIP; break; case ALG_CCMP: algorithm = B43_SEC_ALGO_AES; break; default: B43_WARN_ON(1); goto out_unlock; } index = (u8) (key->keyidx); if (index > 3) goto out_unlock; switch (cmd) { case SET_KEY: if (algorithm == B43_SEC_ALGO_TKIP && (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE) || !modparam_hwtkip)) { /* We support only pairwise key */ err = -EOPNOTSUPP; goto out_unlock; } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) { if (WARN_ON(!sta)) { err = -EOPNOTSUPP; goto out_unlock; } /* Pairwise key with an assigned MAC address. */ err = b43_key_write(dev, -1, algorithm, key->key, key->keylen, sta->addr, key); } else { /* Group key */ err = b43_key_write(dev, index, algorithm, key->key, key->keylen, NULL, key); } if (err) goto out_unlock; if (algorithm == B43_SEC_ALGO_WEP40 || algorithm == B43_SEC_ALGO_WEP104) { b43_hf_write(dev, b43_hf_read(dev) | B43_HF_USEDEFKEYS); } else { b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_USEDEFKEYS); } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; if (algorithm == B43_SEC_ALGO_TKIP) key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; break; case DISABLE_KEY: { err = b43_key_clear(dev, key->hw_key_idx); if (err) goto out_unlock; break; } default: B43_WARN_ON(1); } out_unlock: if (!err) { b43dbg(wl, "%s hardware based encryption for keyidx: %d, " "mac: %pM\n", cmd == SET_KEY ? "Using" : "Disabling", key->keyidx, sta ? sta->addr : bcast_addr); b43_dump_keymemory(dev); } mutex_unlock(&wl->mutex); return err; } static void b43_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed, unsigned int *fflags, u64 multicast) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev) { *fflags = 0; goto out_unlock; } *fflags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; changed &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC; wl->filter_flags = *fflags; if (changed && b43_status(dev) >= B43_STAT_INITIALIZED) b43_adjust_opmode(dev); out_unlock: mutex_unlock(&wl->mutex); } /* Locking: wl->mutex * Returns the current dev. This might be different from the passed in dev, * because the core might be gone away while we unlocked the mutex. */ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; struct b43_wldev *orig_dev; u32 mask; redo: if (!dev || b43_status(dev) < B43_STAT_STARTED) return dev; /* Cancel work. Unlock to avoid deadlocks. */ mutex_unlock(&wl->mutex); cancel_delayed_work_sync(&dev->periodic_work); cancel_work_sync(&wl->tx_work); mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev || b43_status(dev) < B43_STAT_STARTED) { /* Whoops, aliens ate up the device while we were unlocked. */ return dev; } /* Disable interrupts on the device. */ b43_set_status(dev, B43_STAT_INITIALIZED); if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { /* wl->mutex is locked. That is enough. */ b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ } else { spin_lock_irq(&wl->hardirq_lock); b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, 0); b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); /* Flush */ spin_unlock_irq(&wl->hardirq_lock); } /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ orig_dev = dev; mutex_unlock(&wl->mutex); if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { b43_sdio_free_irq(dev); } else { synchronize_irq(dev->dev->irq); free_irq(dev->dev->irq, dev); } mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev) return dev; if (dev != orig_dev) { if (b43_status(dev) >= B43_STAT_STARTED) goto redo; return dev; } mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); B43_WARN_ON(mask != 0xFFFFFFFF && mask); /* Drain the TX queue */ while (skb_queue_len(&wl->tx_queue)) dev_kfree_skb(skb_dequeue(&wl->tx_queue)); b43_mac_suspend(dev); b43_leds_exit(dev); b43dbg(wl, "Wireless interface stopped\n"); return dev; } /* Locking: wl->mutex */ static int b43_wireless_core_start(struct b43_wldev *dev) { int err; B43_WARN_ON(b43_status(dev) != B43_STAT_INITIALIZED); drain_txstatus_queue(dev); if (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) { err = b43_sdio_request_irq(dev, b43_sdio_interrupt_handler); if (err) { b43err(dev->wl, "Cannot request SDIO IRQ\n"); goto out; } } else { err = request_threaded_irq(dev->dev->irq, b43_interrupt_handler, b43_interrupt_thread_handler, IRQF_SHARED, KBUILD_MODNAME, dev); if (err) { b43err(dev->wl, "Cannot request IRQ-%d\n", dev->dev->irq); goto out; } } /* We are ready to run. */ b43_set_status(dev, B43_STAT_STARTED); /* Start data flow (TX/RX). */ b43_mac_enable(dev); b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask); /* Start maintainance work */ b43_periodic_tasks_setup(dev); b43_leds_init(dev); b43dbg(dev->wl, "Wireless interface started\n"); out: return err; } /* Get PHY and RADIO versioning numbers */ static int b43_phy_versioning(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; u32 tmp; u8 analog_type; u8 phy_type; u8 phy_rev; u16 radio_manuf; u16 radio_ver; u16 radio_rev; int unsupported = 0; /* Get PHY versioning */ tmp = b43_read16(dev, B43_MMIO_PHY_VER); analog_type = (tmp & B43_PHYVER_ANALOG) >> B43_PHYVER_ANALOG_SHIFT; phy_type = (tmp & B43_PHYVER_TYPE) >> B43_PHYVER_TYPE_SHIFT; phy_rev = (tmp & B43_PHYVER_VERSION); switch (phy_type) { case B43_PHYTYPE_A: if (phy_rev >= 4) unsupported = 1; break; case B43_PHYTYPE_B: if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6 && phy_rev != 7) unsupported = 1; break; case B43_PHYTYPE_G: if (phy_rev > 9) unsupported = 1; break; #ifdef CONFIG_B43_NPHY case B43_PHYTYPE_N: if (phy_rev > 4) unsupported = 1; break; #endif #ifdef CONFIG_B43_PHY_LP case B43_PHYTYPE_LP: if (phy_rev > 2) unsupported = 1; break; #endif default: unsupported = 1; }; if (unsupported) { b43err(dev->wl, "FOUND UNSUPPORTED PHY " "(Analog %u, Type %u, Revision %u)\n", analog_type, phy_type, phy_rev); return -EOPNOTSUPP; } b43dbg(dev->wl, "Found PHY: Analog %u, Type %u, Revision %u\n", analog_type, phy_type, phy_rev); /* Get RADIO versioning */ if (dev->dev->bus->chip_id == 0x4317) { if (dev->dev->bus->chip_rev == 0) tmp = 0x3205017F; else if (dev->dev->bus->chip_rev == 1) tmp = 0x4205017F; else tmp = 0x5205017F; } else { b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); tmp = b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); b43_write16(dev, B43_MMIO_RADIO_CONTROL, B43_RADIOCTL_ID); tmp |= (u32)b43_read16(dev, B43_MMIO_RADIO_DATA_HIGH) << 16; } radio_manuf = (tmp & 0x00000FFF); radio_ver = (tmp & 0x0FFFF000) >> 12; radio_rev = (tmp & 0xF0000000) >> 28; if (radio_manuf != 0x17F /* Broadcom */) unsupported = 1; switch (phy_type) { case B43_PHYTYPE_A: if (radio_ver != 0x2060) unsupported = 1; if (radio_rev != 1) unsupported = 1; if (radio_manuf != 0x17F) unsupported = 1; break; case B43_PHYTYPE_B: if ((radio_ver & 0xFFF0) != 0x2050) unsupported = 1; break; case B43_PHYTYPE_G: if (radio_ver != 0x2050) unsupported = 1; break; case B43_PHYTYPE_N: if (radio_ver != 0x2055 && radio_ver != 0x2056) unsupported = 1; break; case B43_PHYTYPE_LP: if (radio_ver != 0x2062 && radio_ver != 0x2063) unsupported = 1; break; default: B43_WARN_ON(1); } if (unsupported) { b43err(dev->wl, "FOUND UNSUPPORTED RADIO " "(Manuf 0x%X, Version 0x%X, Revision %u)\n", radio_manuf, radio_ver, radio_rev); return -EOPNOTSUPP; } b43dbg(dev->wl, "Found Radio: Manuf 0x%X, Version 0x%X, Revision %u\n", radio_manuf, radio_ver, radio_rev); phy->radio_manuf = radio_manuf; phy->radio_ver = radio_ver; phy->radio_rev = radio_rev; phy->analog = analog_type; phy->type = phy_type; phy->rev = phy_rev; return 0; } static void setup_struct_phy_for_init(struct b43_wldev *dev, struct b43_phy *phy) { phy->hardware_power_control = !!modparam_hwpctl; phy->next_txpwr_check_time = jiffies; /* PHY TX errors counter. */ atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT); #if B43_DEBUG phy->phy_locked = 0; phy->radio_locked = 0; #endif } static void setup_struct_wldev_for_init(struct b43_wldev *dev) { dev->dfq_valid = 0; /* Assume the radio is enabled. If it's not enabled, the state will * immediately get fixed on the first periodic work run. */ dev->radio_hw_enable = 1; /* Stats */ memset(&dev->stats, 0, sizeof(dev->stats)); setup_struct_phy_for_init(dev, &dev->phy); /* IRQ related flags */ dev->irq_reason = 0; memset(dev->dma_reason, 0, sizeof(dev->dma_reason)); dev->irq_mask = B43_IRQ_MASKTEMPLATE; if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) dev->irq_mask &= ~B43_IRQ_PHY_TXERR; dev->mac_suspended = 1; /* Noise calculation context */ memset(&dev->noisecalc, 0, sizeof(dev->noisecalc)); } static void b43_bluetooth_coext_enable(struct b43_wldev *dev) { struct ssb_sprom *sprom = &dev->dev->bus->sprom; u64 hf; if (!modparam_btcoex) return; if (!(sprom->boardflags_lo & B43_BFL_BTCOEXIST)) return; if (dev->phy.type != B43_PHYTYPE_B && !dev->phy.gmode) return; hf = b43_hf_read(dev); if (sprom->boardflags_lo & B43_BFL_BTCMOD) hf |= B43_HF_BTCOEXALT; else hf |= B43_HF_BTCOEX; b43_hf_write(dev, hf); } static void b43_bluetooth_coext_disable(struct b43_wldev *dev) { if (!modparam_btcoex) return; //TODO } static void b43_imcfglo_timeouts_workaround(struct b43_wldev *dev) { #ifdef CONFIG_SSB_DRIVER_PCICORE struct ssb_bus *bus = dev->dev->bus; u32 tmp; if (bus->pcicore.dev && bus->pcicore.dev->id.coreid == SSB_DEV_PCI && bus->pcicore.dev->id.revision <= 5) { /* IMCFGLO timeouts workaround. */ tmp = ssb_read32(dev->dev, SSB_IMCFGLO); switch (bus->bustype) { case SSB_BUSTYPE_PCI: case SSB_BUSTYPE_PCMCIA: tmp &= ~SSB_IMCFGLO_REQTO; tmp &= ~SSB_IMCFGLO_SERTO; tmp |= 0x32; break; case SSB_BUSTYPE_SSB: tmp &= ~SSB_IMCFGLO_REQTO; tmp &= ~SSB_IMCFGLO_SERTO; tmp |= 0x53; break; default: break; } ssb_write32(dev->dev, SSB_IMCFGLO, tmp); } #endif /* CONFIG_SSB_DRIVER_PCICORE */ } static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle) { u16 pu_delay; /* The time value is in microseconds. */ if (dev->phy.type == B43_PHYTYPE_A) pu_delay = 3700; else pu_delay = 1050; if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC) || idle) pu_delay = 500; if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8)) pu_delay = max(pu_delay, (u16)2400); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SPUWKUP, pu_delay); } /* Set the TSF CFP pre-TargetBeaconTransmissionTime. */ static void b43_set_pretbtt(struct b43_wldev *dev) { u16 pretbtt; /* The time value is in microseconds. */ if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC)) { pretbtt = 2; } else { if (dev->phy.type == B43_PHYTYPE_A) pretbtt = 120; else pretbtt = 250; } b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt); b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt); } /* Shutdown a wireless core */ /* Locking: wl->mutex */ static void b43_wireless_core_exit(struct b43_wldev *dev) { u32 macctl; B43_WARN_ON(dev && b43_status(dev) > B43_STAT_INITIALIZED); if (!dev || b43_status(dev) != B43_STAT_INITIALIZED) return; b43_set_status(dev, B43_STAT_UNINIT); /* Stop the microcode PSM. */ macctl = b43_read32(dev, B43_MMIO_MACCTL); macctl &= ~B43_MACCTL_PSM_RUN; macctl |= B43_MACCTL_PSM_JMP0; b43_write32(dev, B43_MMIO_MACCTL, macctl); b43_dma_free(dev); b43_pio_free(dev); b43_chip_exit(dev); dev->phy.ops->switch_analog(dev, 0); if (dev->wl->current_beacon) { dev_kfree_skb_any(dev->wl->current_beacon); dev->wl->current_beacon = NULL; } ssb_device_disable(dev->dev, 0); ssb_bus_may_powerdown(dev->dev->bus); } /* Initialize a wireless core */ static int b43_wireless_core_init(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->bus; struct ssb_sprom *sprom = &bus->sprom; struct b43_phy *phy = &dev->phy; int err; u64 hf; u32 tmp; B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); err = ssb_bus_powerup(bus, 0); if (err) goto out; if (!ssb_device_is_enabled(dev->dev)) { tmp = phy->gmode ? B43_TMSLOW_GMODE : 0; b43_wireless_core_reset(dev, tmp); } /* Reset all data structures. */ setup_struct_wldev_for_init(dev); phy->ops->prepare_structs(dev); /* Enable IRQ routing to this device. */ ssb_pcicore_dev_irqvecs_enable(&bus->pcicore, dev->dev); b43_imcfglo_timeouts_workaround(dev); b43_bluetooth_coext_disable(dev); if (phy->ops->prepare_hardware) { err = phy->ops->prepare_hardware(dev); if (err) goto err_busdown; } err = b43_chip_init(dev); if (err) goto err_busdown; b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_WLCOREREV, dev->dev->id.revision); hf = b43_hf_read(dev); if (phy->type == B43_PHYTYPE_G) { hf |= B43_HF_SYMW; if (phy->rev == 1) hf |= B43_HF_GDCW; if (sprom->boardflags_lo & B43_BFL_PACTRL) hf |= B43_HF_OFDMPABOOST; } if (phy->radio_ver == 0x2050) { if (phy->radio_rev == 6) hf |= B43_HF_4318TSSI; if (phy->radio_rev < 6) hf |= B43_HF_VCORECALC; } if (sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW) hf |= B43_HF_DSCRQ; /* Disable slowclock requests from ucode. */ #ifdef CONFIG_SSB_DRIVER_PCICORE if ((bus->bustype == SSB_BUSTYPE_PCI) && (bus->pcicore.dev->id.revision <= 10)) hf |= B43_HF_PCISCW; /* PCI slow clock workaround. */ #endif hf &= ~B43_HF_SKCFPUP; b43_hf_write(dev, hf); b43_set_retry_limits(dev, B43_DEFAULT_SHORT_RETRY_LIMIT, B43_DEFAULT_LONG_RETRY_LIMIT); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SFFBLIM, 3); b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_LFFBLIM, 2); /* Disable sending probe responses from firmware. * Setting the MaxTime to one usec will always trigger * a timeout, so we never send any probe resp. * A timeout of zero is infinite. */ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1); b43_rate_memory_init(dev); b43_set_phytxctl_defaults(dev); /* Minimum Contention Window */ if (phy->type == B43_PHYTYPE_B) { b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); } else { b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); } /* Maximum Contention Window */ b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || (dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) || B43_FORCE_PIO) { dev->__using_pio_transfers = 1; err = b43_pio_init(dev); } else { dev->__using_pio_transfers = 0; err = b43_dma_init(dev); } if (err) goto err_chip_exit; b43_qos_init(dev); b43_set_synth_pu_delay(dev, 1); b43_bluetooth_coext_enable(dev); ssb_bus_powerup(bus, !(sprom->boardflags_lo & B43_BFL_XTAL_NOSLOW)); b43_upload_card_macaddress(dev); b43_security_init(dev); ieee80211_wake_queues(dev->wl->hw); ieee80211_wake_queues(dev->wl->hw); b43_set_status(dev, B43_STAT_INITIALIZED); out: return err; err_chip_exit: b43_chip_exit(dev); err_busdown: ssb_bus_may_powerdown(bus); B43_WARN_ON(b43_status(dev) != B43_STAT_UNINIT); return err; } static int b43_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; int err = -EOPNOTSUPP; /* TODO: allow WDS/AP devices to coexist */ if (conf->type != NL80211_IFTYPE_AP && conf->type != NL80211_IFTYPE_MESH_POINT && conf->type != NL80211_IFTYPE_STATION && conf->type != NL80211_IFTYPE_WDS && conf->type != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; mutex_lock(&wl->mutex); if (wl->operating) goto out_mutex_unlock; b43dbg(wl, "Adding Interface type %d\n", conf->type); dev = wl->current_dev; wl->operating = 1; wl->vif = conf->vif; wl->if_type = conf->type; memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN); b43_adjust_opmode(dev); b43_set_pretbtt(dev); b43_set_synth_pu_delay(dev, 0); b43_upload_card_macaddress(dev); err = 0; out_mutex_unlock: mutex_unlock(&wl->mutex); return err; } static void b43_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; b43dbg(wl, "Removing Interface type %d\n", conf->type); mutex_lock(&wl->mutex); B43_WARN_ON(!wl->operating); B43_WARN_ON(wl->vif != conf->vif); wl->vif = NULL; wl->operating = 0; b43_adjust_opmode(dev); memset(wl->mac_addr, 0, ETH_ALEN); b43_upload_card_macaddress(dev); mutex_unlock(&wl->mutex); } static int b43_op_start(struct ieee80211_hw *hw) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; int did_init = 0; int err = 0; /* Kill all old instance specific information to make sure * the card won't use it in the short timeframe between start * and mac80211 reconfiguring it. */ memset(wl->bssid, 0, ETH_ALEN); memset(wl->mac_addr, 0, ETH_ALEN); wl->filter_flags = 0; wl->radiotap_enabled = 0; b43_qos_clear(wl); wl->beacon0_uploaded = 0; wl->beacon1_uploaded = 0; wl->beacon_templates_virgin = 1; wl->radio_enabled = 1; mutex_lock(&wl->mutex); if (b43_status(dev) < B43_STAT_INITIALIZED) { err = b43_wireless_core_init(dev); if (err) goto out_mutex_unlock; did_init = 1; } if (b43_status(dev) < B43_STAT_STARTED) { err = b43_wireless_core_start(dev); if (err) { if (did_init) b43_wireless_core_exit(dev); goto out_mutex_unlock; } } /* XXX: only do if device doesn't support rfkill irq */ wiphy_rfkill_start_polling(hw->wiphy); out_mutex_unlock: mutex_unlock(&wl->mutex); return err; } static void b43_op_stop(struct ieee80211_hw *hw) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; cancel_work_sync(&(wl->beacon_update_trigger)); mutex_lock(&wl->mutex); if (b43_status(dev) >= B43_STAT_STARTED) { dev = b43_wireless_core_stop(dev); if (!dev) goto out_unlock; } b43_wireless_core_exit(dev); wl->radio_enabled = 0; out_unlock: mutex_unlock(&wl->mutex); cancel_work_sync(&(wl->txpower_adjust_work)); } static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct b43_wl *wl = hw_to_b43_wl(hw); /* FIXME: add locking */ b43_update_templates(wl); return 0; } static void b43_op_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta) { struct b43_wl *wl = hw_to_b43_wl(hw); B43_WARN_ON(!vif || wl->vif != vif); } static void b43_op_sw_scan_start_notifier(struct ieee80211_hw *hw) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) { /* Disable CFP update during scan on other channels. */ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_SKCFPUP); } mutex_unlock(&wl->mutex); } static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev; mutex_lock(&wl->mutex); dev = wl->current_dev; if (dev && (b43_status(dev) >= B43_STAT_INITIALIZED)) { /* Re-enable CFP update. */ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_SKCFPUP); } mutex_unlock(&wl->mutex); } static const struct ieee80211_ops b43_hw_ops = { .tx = b43_op_tx, .conf_tx = b43_op_conf_tx, .add_interface = b43_op_add_interface, .remove_interface = b43_op_remove_interface, .config = b43_op_config, .bss_info_changed = b43_op_bss_info_changed, .configure_filter = b43_op_configure_filter, .set_key = b43_op_set_key, .update_tkip_key = b43_op_update_tkip_key, .get_stats = b43_op_get_stats, .get_tx_stats = b43_op_get_tx_stats, .get_tsf = b43_op_get_tsf, .set_tsf = b43_op_set_tsf, .start = b43_op_start, .stop = b43_op_stop, .set_tim = b43_op_beacon_set_tim, .sta_notify = b43_op_sta_notify, .sw_scan_start = b43_op_sw_scan_start_notifier, .sw_scan_complete = b43_op_sw_scan_complete_notifier, .rfkill_poll = b43_rfkill_poll, }; /* Hard-reset the chip. Do not call this directly. * Use b43_controller_restart() */ static void b43_chip_reset(struct work_struct *work) { struct b43_wldev *dev = container_of(work, struct b43_wldev, restart_work); struct b43_wl *wl = dev->wl; int err = 0; int prev_status; mutex_lock(&wl->mutex); prev_status = b43_status(dev); /* Bring the device down... */ if (prev_status >= B43_STAT_STARTED) { dev = b43_wireless_core_stop(dev); if (!dev) { err = -ENODEV; goto out; } } if (prev_status >= B43_STAT_INITIALIZED) b43_wireless_core_exit(dev); /* ...and up again. */ if (prev_status >= B43_STAT_INITIALIZED) { err = b43_wireless_core_init(dev); if (err) goto out; } if (prev_status >= B43_STAT_STARTED) { err = b43_wireless_core_start(dev); if (err) { b43_wireless_core_exit(dev); goto out; } } out: if (err) wl->current_dev = NULL; /* Failed to init the dev. */ mutex_unlock(&wl->mutex); if (err) b43err(wl, "Controller restart FAILED\n"); else b43info(wl, "Controller restarted\n"); } static int b43_setup_bands(struct b43_wldev *dev, bool have_2ghz_phy, bool have_5ghz_phy) { struct ieee80211_hw *hw = dev->wl->hw; if (have_2ghz_phy) hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &b43_band_2GHz; if (dev->phy.type == B43_PHYTYPE_N) { if (have_5ghz_phy) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_nphy; } else { if (have_5ghz_phy) hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy; } dev->phy.supports_2ghz = have_2ghz_phy; dev->phy.supports_5ghz = have_5ghz_phy; return 0; } static void b43_wireless_core_detach(struct b43_wldev *dev) { /* We release firmware that late to not be required to re-request * is all the time when we reinit the core. */ b43_release_firmware(dev); b43_phy_free(dev); } static int b43_wireless_core_attach(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; struct ssb_bus *bus = dev->dev->bus; struct pci_dev *pdev = bus->host_pci; int err; bool have_2ghz_phy = 0, have_5ghz_phy = 0; u32 tmp; /* Do NOT do any device initialization here. * Do it in wireless_core_init() instead. * This function is for gathering basic information about the HW, only. * Also some structs may be set up here. But most likely you want to have * that in core_init(), too. */ err = ssb_bus_powerup(bus, 0); if (err) { b43err(wl, "Bus powerup failed\n"); goto out; } /* Get the PHY type. */ if (dev->dev->id.revision >= 5) { u32 tmshigh; tmshigh = ssb_read32(dev->dev, SSB_TMSHIGH); have_2ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_2GHZ_PHY); have_5ghz_phy = !!(tmshigh & B43_TMSHIGH_HAVE_5GHZ_PHY); } else B43_WARN_ON(1); dev->phy.gmode = have_2ghz_phy; dev->phy.radio_on = 1; tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; b43_wireless_core_reset(dev, tmp); err = b43_phy_versioning(dev); if (err) goto err_powerdown; /* Check if this device supports multiband. */ if (!pdev || (pdev->device != 0x4312 && pdev->device != 0x4319 && pdev->device != 0x4324)) { /* No multiband support. */ have_2ghz_phy = 0; have_5ghz_phy = 0; switch (dev->phy.type) { case B43_PHYTYPE_A: have_5ghz_phy = 1; break; case B43_PHYTYPE_LP: //FIXME not always! #if 0 //FIXME enabling 5GHz causes a NULL pointer dereference have_5ghz_phy = 1; #endif case B43_PHYTYPE_G: case B43_PHYTYPE_N: have_2ghz_phy = 1; break; default: B43_WARN_ON(1); } } if (dev->phy.type == B43_PHYTYPE_A) { /* FIXME */ b43err(wl, "IEEE 802.11a devices are unsupported\n"); err = -EOPNOTSUPP; goto err_powerdown; } if (1 /* disable A-PHY */) { /* FIXME: For now we disable the A-PHY on multi-PHY devices. */ if (dev->phy.type != B43_PHYTYPE_N && dev->phy.type != B43_PHYTYPE_LP) { have_2ghz_phy = 1; have_5ghz_phy = 0; } } err = b43_phy_allocate(dev); if (err) goto err_powerdown; dev->phy.gmode = have_2ghz_phy; tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0; b43_wireless_core_reset(dev, tmp); err = b43_validate_chipaccess(dev); if (err) goto err_phy_free; err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy); if (err) goto err_phy_free; /* Now set some default "current_dev" */ if (!wl->current_dev) wl->current_dev = dev; INIT_WORK(&dev->restart_work, b43_chip_reset); dev->phy.ops->switch_analog(dev, 0); ssb_device_disable(dev->dev, 0); ssb_bus_may_powerdown(bus); out: return err; err_phy_free: b43_phy_free(dev); err_powerdown: ssb_bus_may_powerdown(bus); return err; } static void b43_one_core_detach(struct ssb_device *dev) { struct b43_wldev *wldev; struct b43_wl *wl; /* Do not cancel ieee80211-workqueue based work here. * See comment in b43_remove(). */ wldev = ssb_get_drvdata(dev); wl = wldev->wl; b43_debugfs_remove_device(wldev); b43_wireless_core_detach(wldev); list_del(&wldev->list); wl->nr_devs--; ssb_set_drvdata(dev, NULL); kfree(wldev); } static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl) { struct b43_wldev *wldev; struct pci_dev *pdev; int err = -ENOMEM; if (!list_empty(&wl->devlist)) { /* We are not the first core on this chip. */ pdev = dev->bus->host_pci; /* Only special chips support more than one wireless * core, although some of the other chips have more than * one wireless core as well. Check for this and * bail out early. */ if (!pdev || ((pdev->device != 0x4321) && (pdev->device != 0x4313) && (pdev->device != 0x431A))) { b43dbg(wl, "Ignoring unconnected 802.11 core\n"); return -ENODEV; } } wldev = kzalloc(sizeof(*wldev), GFP_KERNEL); if (!wldev) goto out; wldev->dev = dev; wldev->wl = wl; b43_set_status(wldev, B43_STAT_UNINIT); wldev->bad_frames_preempt = modparam_bad_frames_preempt; INIT_LIST_HEAD(&wldev->list); err = b43_wireless_core_attach(wldev); if (err) goto err_kfree_wldev; list_add(&wldev->list, &wl->devlist); wl->nr_devs++; ssb_set_drvdata(dev, wldev); b43_debugfs_add_device(wldev); out: return err; err_kfree_wldev: kfree(wldev); return err; } #define IS_PDEV(pdev, _vendor, _device, _subvendor, _subdevice) ( \ (pdev->vendor == PCI_VENDOR_ID_##_vendor) && \ (pdev->device == _device) && \ (pdev->subsystem_vendor == PCI_VENDOR_ID_##_subvendor) && \ (pdev->subsystem_device == _subdevice) ) static void b43_sprom_fixup(struct ssb_bus *bus) { struct pci_dev *pdev; /* boardflags workarounds */ if (bus->boardinfo.vendor == SSB_BOARDVENDOR_DELL && bus->chip_id == 0x4301 && bus->boardinfo.rev == 0x74) bus->sprom.boardflags_lo |= B43_BFL_BTCOEXIST; if (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE && bus->boardinfo.type == 0x4E && bus->boardinfo.rev > 0x40) bus->sprom.boardflags_lo |= B43_BFL_PACTRL; if (bus->bustype == SSB_BUSTYPE_PCI) { pdev = bus->host_pci; if (IS_PDEV(pdev, BROADCOM, 0x4318, ASUSTEK, 0x100F) || IS_PDEV(pdev, BROADCOM, 0x4320, DELL, 0x0003) || IS_PDEV(pdev, BROADCOM, 0x4320, HP, 0x12f8) || IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0015) || IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0014) || IS_PDEV(pdev, BROADCOM, 0x4320, LINKSYS, 0x0013) || IS_PDEV(pdev, BROADCOM, 0x4320, MOTOROLA, 0x7010)) bus->sprom.boardflags_lo &= ~B43_BFL_BTCOEXIST; } } static void b43_wireless_exit(struct ssb_device *dev, struct b43_wl *wl) { struct ieee80211_hw *hw = wl->hw; ssb_set_devtypedata(dev, NULL); ieee80211_free_hw(hw); } static int b43_wireless_init(struct ssb_device *dev) { struct ssb_sprom *sprom = &dev->bus->sprom; struct ieee80211_hw *hw; struct b43_wl *wl; int err = -ENOMEM; b43_sprom_fixup(dev->bus); hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops); if (!hw) { b43err(NULL, "Could not allocate ieee80211 device\n"); goto out; } wl = hw_to_b43_wl(hw); /* fill hw info */ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_WDS) | BIT(NL80211_IFTYPE_ADHOC); hw->queues = modparam_qos ? 4 : 1; wl->mac80211_initially_registered_queues = hw->queues; hw->max_rates = 2; SET_IEEE80211_DEV(hw, dev->dev); if (is_valid_ether_addr(sprom->et1mac)) SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); else SET_IEEE80211_PERM_ADDR(hw, sprom->il0mac); /* Initialize struct b43_wl */ wl->hw = hw; mutex_init(&wl->mutex); spin_lock_init(&wl->hardirq_lock); INIT_LIST_HEAD(&wl->devlist); INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work); INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work); INIT_WORK(&wl->tx_work, b43_tx_work); skb_queue_head_init(&wl->tx_queue); ssb_set_devtypedata(dev, wl); b43info(wl, "Broadcom %04X WLAN found (core revision %u)\n", dev->bus->chip_id, dev->id.revision); err = 0; out: return err; } static int b43_probe(struct ssb_device *dev, const struct ssb_device_id *id) { struct b43_wl *wl; int err; int first = 0; wl = ssb_get_devtypedata(dev); if (!wl) { /* Probing the first core. Must setup common struct b43_wl */ first = 1; err = b43_wireless_init(dev); if (err) goto out; wl = ssb_get_devtypedata(dev); B43_WARN_ON(!wl); } err = b43_one_core_attach(dev, wl); if (err) goto err_wireless_exit; if (first) { err = ieee80211_register_hw(wl->hw); if (err) goto err_one_core_detach; b43_leds_register(wl->current_dev); b43_rng_init(wl); } out: return err; err_one_core_detach: b43_one_core_detach(dev); err_wireless_exit: if (first) b43_wireless_exit(dev, wl); return err; } static void b43_remove(struct ssb_device *dev) { struct b43_wl *wl = ssb_get_devtypedata(dev); struct b43_wldev *wldev = ssb_get_drvdata(dev); /* We must cancel any work here before unregistering from ieee80211, * as the ieee80211 unreg will destroy the workqueue. */ cancel_work_sync(&wldev->restart_work); B43_WARN_ON(!wl); if (wl->current_dev == wldev) { /* Restore the queues count before unregistering, because firmware detect * might have modified it. Restoring is important, so the networking * stack can properly free resources. */ wl->hw->queues = wl->mac80211_initially_registered_queues; b43_leds_stop(wldev); ieee80211_unregister_hw(wl->hw); } b43_one_core_detach(dev); if (list_empty(&wl->devlist)) { b43_rng_exit(wl); b43_leds_unregister(wl); /* Last core on the chip unregistered. * We can destroy common struct b43_wl. */ b43_wireless_exit(dev, wl); } } /* Perform a hardware reset. This can be called from any context. */ void b43_controller_restart(struct b43_wldev *dev, const char *reason) { /* Must avoid requeueing, if we are in shutdown. */ if (b43_status(dev) < B43_STAT_INITIALIZED) return; b43info(dev->wl, "Controller RESET (%s) ...\n", reason); ieee80211_queue_work(dev->wl->hw, &dev->restart_work); } static struct ssb_driver b43_ssb_driver = { .name = KBUILD_MODNAME, .id_table = b43_ssb_tbl, .probe = b43_probe, .remove = b43_remove, }; static void b43_print_driverinfo(void) { const char *feat_pci = "", *feat_pcmcia = "", *feat_nphy = "", *feat_leds = "", *feat_sdio = ""; #ifdef CONFIG_B43_PCI_AUTOSELECT feat_pci = "P"; #endif #ifdef CONFIG_B43_PCMCIA feat_pcmcia = "M"; #endif #ifdef CONFIG_B43_NPHY feat_nphy = "N"; #endif #ifdef CONFIG_B43_LEDS feat_leds = "L"; #endif #ifdef CONFIG_B43_SDIO feat_sdio = "S"; #endif printk(KERN_INFO "Broadcom 43xx driver loaded " "[ Features: %s%s%s%s%s, Firmware-ID: " B43_SUPPORTED_FIRMWARE_ID " ]\n", feat_pci, feat_pcmcia, feat_nphy, feat_leds, feat_sdio); } static int __init b43_init(void) { int err; b43_debugfs_init(); err = b43_pcmcia_init(); if (err) goto err_dfs_exit; err = b43_sdio_init(); if (err) goto err_pcmcia_exit; err = ssb_driver_register(&b43_ssb_driver); if (err) goto err_sdio_exit; b43_print_driverinfo(); return err; err_sdio_exit: b43_sdio_exit(); err_pcmcia_exit: b43_pcmcia_exit(); err_dfs_exit: b43_debugfs_exit(); return err; } static void __exit b43_exit(void) { ssb_driver_unregister(&b43_ssb_driver); b43_sdio_exit(); b43_pcmcia_exit(); b43_debugfs_exit(); } module_init(b43_init) module_exit(b43_exit)
gpl-2.0
CSE3320/kernel-code
linux-5.8/net/bluetooth/bnep/netdev.c
550
5898
/* BNEP implementation for Linux Bluetooth stack (BlueZ). Copyright (C) 2001-2002 Inventel Systemes Written 2001-2002 by Clément Moreau <clement.moreau@inventel.fr> David Libault <david.libault@inventel.fr> Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation; THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #include <linux/etherdevice.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/l2cap.h> #include "bnep.h" #define BNEP_TX_QUEUE_LEN 20 static int bnep_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; } static int bnep_net_close(struct net_device *dev) { netif_stop_queue(dev); return 0; } static void bnep_net_set_mc_list(struct net_device *dev) { #ifdef CONFIG_BT_BNEP_MC_FILTER struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; struct bnep_set_filter_req *r; struct sk_buff *skb; int size; BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { BT_ERR("%s Multicast list allocation failed", dev->name); return; } r = (void *) skb->data; __skb_put(skb, sizeof(*r)); r->type = BNEP_CONTROL; r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { u8 start[ETH_ALEN] = { 0x01 }; /* Request all addresses */ __skb_put_data(skb, start, ETH_ALEN); __skb_put_data(skb, dev->broadcast, ETH_ALEN); r->len = htons(ETH_ALEN * 2); } else { struct netdev_hw_addr *ha; int i, len = skb->len; if (dev->flags & IFF_BROADCAST) { __skb_put_data(skb, dev->broadcast, ETH_ALEN); __skb_put_data(skb, dev->broadcast, ETH_ALEN); } /* FIXME: We should group addresses here. */ i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == BNEP_MAX_MULTICAST_FILTERS) break; __skb_put_data(skb, ha->addr, ETH_ALEN); __skb_put_data(skb, ha->addr, ETH_ALEN); i++; } r->len = htons(skb->len - len); } skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); #endif } static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) { BT_DBG("%s", dev->name); return 0; } static void bnep_net_timeout(struct net_device *dev, unsigned int txqueue) { BT_DBG("net_timeout"); netif_wake_queue(dev); } #ifdef CONFIG_BT_BNEP_MC_FILTER static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) return 1; return 0; } #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER /* Determine ether protocol. Based on eth_type_trans. */ static u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); if (proto >= ETH_P_802_3_MIN) return proto; if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) return ETH_P_802_3; return ETH_P_802_2; } static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; int i; for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { if (proto >= f[i].start && proto <= f[i].end) return 0; } BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); return 1; } #endif static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; BT_DBG("skb %p, dev %p", skb, dev); #ifdef CONFIG_BT_BNEP_MC_FILTER if (bnep_net_mc_filter(skb, s)) { kfree_skb(skb); return NETDEV_TX_OK; } #endif #ifdef CONFIG_BT_BNEP_PROTO_FILTER if (bnep_net_proto_filter(skb, s)) { kfree_skb(skb); return NETDEV_TX_OK; } #endif /* * We cannot send L2CAP packets from here as we are potentially in a bh. * So we have to queue them and wake up session thread which is sleeping * on the sk_sleep(sk). */ netif_trans_update(dev); skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { BT_DBG("tx queue is full"); /* Stop queuing. * Session thread will do netif_wake_queue() */ netif_stop_queue(dev); } return NETDEV_TX_OK; } static const struct net_device_ops bnep_netdev_ops = { .ndo_open = bnep_net_open, .ndo_stop = bnep_net_close, .ndo_start_xmit = bnep_net_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = bnep_net_set_mc_list, .ndo_set_mac_address = bnep_net_set_mac_addr, .ndo_tx_timeout = bnep_net_timeout, }; void bnep_net_setup(struct net_device *dev) { eth_broadcast_addr(dev->broadcast); dev->addr_len = ETH_ALEN; ether_setup(dev); dev->min_mtu = 0; dev->max_mtu = ETH_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2; }
gpl-2.0
zhiweix-dong/linux-yocto-micro-3.19
sound/isa/gus/interwave.c
550
26646
/* * Driver for AMD InterWave soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * 1999/07/22 Erik Inge Bolso <knan@mo.himolde.no> * * mixer group handlers * */ #include <linux/init.h> #include <linux/err.h> #include <linux/isa.h> #include <linux/delay.h> #include <linux/pnp.h> #include <linux/module.h> #include <asm/dma.h> #include <sound/core.h> #include <sound/gus.h> #include <sound/wss.h> #ifdef SNDRV_STB #include <sound/tea6330t.h> #endif #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_LICENSE("GPL"); #ifndef SNDRV_STB MODULE_DESCRIPTION("AMD InterWave"); MODULE_SUPPORTED_DEVICE("{{Gravis,UltraSound Plug & Play}," "{STB,SoundRage32}," "{MED,MED3210}," "{Dynasonix,Dynasonix Pro}," "{Panasonic,PCA761AW}}"); #else MODULE_DESCRIPTION("AMD InterWave STB with TEA6330T"); MODULE_SUPPORTED_DEVICE("{{AMD,InterWave STB with TEA6330T}}"); #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */ #ifdef CONFIG_PNP static bool isapnp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; #endif static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x210,0x220,0x230,0x240,0x250,0x260 */ #ifdef SNDRV_STB static long port_tc[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* 0x350,0x360,0x370,0x380 */ #endif static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* 2,3,5,9,11,12,15 */ static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* 0,1,3,5,6,7 */ static int joystick_dac[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 29}; /* 0 to 31, (0.59V-4.52V or 0.389V-2.98V) */ static int midi[SNDRV_CARDS]; static int pcm_channels[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; static int effect[SNDRV_CARDS]; #ifdef SNDRV_STB #define PFX "interwave-stb: " #define INTERWAVE_DRIVER "snd_interwave_stb" #define INTERWAVE_PNP_DRIVER "interwave-stb" #else #define PFX "interwave: " #define INTERWAVE_DRIVER "snd_interwave" #define INTERWAVE_PNP_DRIVER "interwave" #endif module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for InterWave soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for InterWave soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable InterWave soundcard."); #ifdef CONFIG_PNP module_param_array(isapnp, bool, NULL, 0444); MODULE_PARM_DESC(isapnp, "ISA PnP detection for specified soundcard."); #endif module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for InterWave driver."); #ifdef SNDRV_STB module_param_array(port_tc, long, NULL, 0444); MODULE_PARM_DESC(port_tc, "Tone control (TEA6330T - i2c bus) port # for InterWave driver."); #endif module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for InterWave driver."); module_param_array(dma1, int, NULL, 0444); MODULE_PARM_DESC(dma1, "DMA1 # for InterWave driver."); module_param_array(dma2, int, NULL, 0444); MODULE_PARM_DESC(dma2, "DMA2 # for InterWave driver."); module_param_array(joystick_dac, int, NULL, 0444); MODULE_PARM_DESC(joystick_dac, "Joystick DAC level 0.59V-4.52V or 0.389V-2.98V for InterWave driver."); module_param_array(midi, int, NULL, 0444); MODULE_PARM_DESC(midi, "MIDI UART enable for InterWave driver."); module_param_array(pcm_channels, int, NULL, 0444); MODULE_PARM_DESC(pcm_channels, "Reserved PCM channels for InterWave driver."); module_param_array(effect, int, NULL, 0444); MODULE_PARM_DESC(effect, "Effects enable for InterWave driver."); struct snd_interwave { int irq; struct snd_card *card; struct snd_gus_card *gus; struct snd_wss *wss; #ifdef SNDRV_STB struct resource *i2c_res; #endif unsigned short gus_status_reg; unsigned short pcm_status_reg; #ifdef CONFIG_PNP struct pnp_dev *dev; #ifdef SNDRV_STB struct pnp_dev *devtc; #endif #endif }; #ifdef CONFIG_PNP static int isa_registered; static int pnp_registered; static struct pnp_card_device_id snd_interwave_pnpids[] = { #ifndef SNDRV_STB /* Gravis UltraSound Plug & Play */ { .id = "GRV0001", .devs = { { .id = "GRV0000" } } }, /* STB SoundRage32 */ { .id = "STB011a", .devs = { { .id = "STB0010" } } }, /* MED3210 */ { .id = "DXP3201", .devs = { { .id = "DXP0010" } } }, /* Dynasonic Pro */ /* This device also have CDC1117:DynaSonix Pro Audio Effects Processor */ { .id = "CDC1111", .devs = { { .id = "CDC1112" } } }, /* Panasonic PCA761AW Audio Card */ { .id = "ADV55ff", .devs = { { .id = "ADV0010" } } }, /* InterWave STB without TEA6330T */ { .id = "ADV550a", .devs = { { .id = "ADV0010" } } }, #else /* InterWave STB with TEA6330T */ { .id = "ADV550a", .devs = { { .id = "ADV0010" }, { .id = "ADV0015" } } }, #endif { .id = "" } }; MODULE_DEVICE_TABLE(pnp_card, snd_interwave_pnpids); #endif /* CONFIG_PNP */ #ifdef SNDRV_STB static void snd_interwave_i2c_setlines(struct snd_i2c_bus *bus, int ctrl, int data) { unsigned long port = bus->private_value; #if 0 printk(KERN_DEBUG "i2c_setlines - 0x%lx <- %i,%i\n", port, ctrl, data); #endif outb((data << 1) | ctrl, port); udelay(10); } static int snd_interwave_i2c_getclockline(struct snd_i2c_bus *bus) { unsigned long port = bus->private_value; unsigned char res; res = inb(port) & 1; #if 0 printk(KERN_DEBUG "i2c_getclockline - 0x%lx -> %i\n", port, res); #endif return res; } static int snd_interwave_i2c_getdataline(struct snd_i2c_bus *bus, int ack) { unsigned long port = bus->private_value; unsigned char res; if (ack) udelay(10); res = (inb(port) & 2) >> 1; #if 0 printk(KERN_DEBUG "i2c_getdataline - 0x%lx -> %i\n", port, res); #endif return res; } static struct snd_i2c_bit_ops snd_interwave_i2c_bit_ops = { .setlines = snd_interwave_i2c_setlines, .getclock = snd_interwave_i2c_getclockline, .getdata = snd_interwave_i2c_getdataline, }; static int snd_interwave_detect_stb(struct snd_interwave *iwcard, struct snd_gus_card *gus, int dev, struct snd_i2c_bus **rbus) { unsigned long port; struct snd_i2c_bus *bus; struct snd_card *card = iwcard->card; char name[32]; int err; *rbus = NULL; port = port_tc[dev]; if (port == SNDRV_AUTO_PORT) { port = 0x350; if (gus->gf1.port == 0x250) { port = 0x360; } while (port <= 0x380) { if ((iwcard->i2c_res = request_region(port, 1, "InterWave (I2C bus)")) != NULL) break; port += 0x10; } } else { iwcard->i2c_res = request_region(port, 1, "InterWave (I2C bus)"); } if (iwcard->i2c_res == NULL) { snd_printk(KERN_ERR "interwave: can't grab i2c bus port\n"); return -ENODEV; } sprintf(name, "InterWave-%i", card->number); if ((err = snd_i2c_bus_create(card, name, NULL, &bus)) < 0) return err; bus->private_value = port; bus->hw_ops.bit = &snd_interwave_i2c_bit_ops; if ((err = snd_tea6330t_detect(bus, 0)) < 0) return err; *rbus = bus; return 0; } #endif static int snd_interwave_detect(struct snd_interwave *iwcard, struct snd_gus_card *gus, int dev #ifdef SNDRV_STB , struct snd_i2c_bus **rbus #endif ) { unsigned long flags; unsigned char rev1, rev2; int d; snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 0); /* reset GF1 */ if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 0) { snd_printdd("[0x%lx] check 1 failed - 0x%x\n", gus->gf1.port, d); return -ENODEV; } udelay(160); snd_gf1_i_write8(gus, SNDRV_GF1_GB_RESET, 1); /* release reset */ udelay(160); if (((d = snd_gf1_i_look8(gus, SNDRV_GF1_GB_RESET)) & 0x07) != 1) { snd_printdd("[0x%lx] check 2 failed - 0x%x\n", gus->gf1.port, d); return -ENODEV; } spin_lock_irqsave(&gus->reg_lock, flags); rev1 = snd_gf1_look8(gus, SNDRV_GF1_GB_VERSION_NUMBER); snd_gf1_write8(gus, SNDRV_GF1_GB_VERSION_NUMBER, ~rev1); rev2 = snd_gf1_look8(gus, SNDRV_GF1_GB_VERSION_NUMBER); snd_gf1_write8(gus, SNDRV_GF1_GB_VERSION_NUMBER, rev1); spin_unlock_irqrestore(&gus->reg_lock, flags); snd_printdd("[0x%lx] InterWave check - rev1=0x%x, rev2=0x%x\n", gus->gf1.port, rev1, rev2); if ((rev1 & 0xf0) == (rev2 & 0xf0) && (rev1 & 0x0f) != (rev2 & 0x0f)) { snd_printdd("[0x%lx] InterWave check - passed\n", gus->gf1.port); gus->interwave = 1; strcpy(gus->card->shortname, "AMD InterWave"); gus->revision = rev1 >> 4; #ifndef SNDRV_STB return 0; /* ok.. We have an InterWave board */ #else return snd_interwave_detect_stb(iwcard, gus, dev, rbus); #endif } snd_printdd("[0x%lx] InterWave check - failed\n", gus->gf1.port); return -ENODEV; } static irqreturn_t snd_interwave_interrupt(int irq, void *dev_id) { struct snd_interwave *iwcard = dev_id; int loop, max = 5; int handled = 0; do { loop = 0; if (inb(iwcard->gus_status_reg)) { handled = 1; snd_gus_interrupt(irq, iwcard->gus); loop++; } if (inb(iwcard->pcm_status_reg) & 0x01) { /* IRQ bit is set? */ handled = 1; snd_wss_interrupt(irq, iwcard->wss); loop++; } } while (loop && --max > 0); return IRQ_RETVAL(handled); } static void snd_interwave_reset(struct snd_gus_card *gus) { snd_gf1_write8(gus, SNDRV_GF1_GB_RESET, 0x00); udelay(160); snd_gf1_write8(gus, SNDRV_GF1_GB_RESET, 0x01); udelay(160); } static void snd_interwave_bank_sizes(struct snd_gus_card *gus, int *sizes) { unsigned int idx; unsigned int local; unsigned char d; for (idx = 0; idx < 4; idx++) { sizes[idx] = 0; d = 0x55; for (local = idx << 22; local < (idx << 22) + 0x400000; local += 0x40000, d++) { snd_gf1_poke(gus, local, d); snd_gf1_poke(gus, local + 1, d + 1); #if 0 printk(KERN_DEBUG "d = 0x%x, local = 0x%x, " "local + 1 = 0x%x, idx << 22 = 0x%x\n", d, snd_gf1_peek(gus, local), snd_gf1_peek(gus, local + 1), snd_gf1_peek(gus, idx << 22)); #endif if (snd_gf1_peek(gus, local) != d || snd_gf1_peek(gus, local + 1) != d + 1 || snd_gf1_peek(gus, idx << 22) != 0x55) break; sizes[idx]++; } } #if 0 printk(KERN_DEBUG "sizes: %i %i %i %i\n", sizes[0], sizes[1], sizes[2], sizes[3]); #endif } struct rom_hdr { /* 000 */ unsigned char iwave[8]; /* 008 */ unsigned char rom_hdr_revision; /* 009 */ unsigned char series_number; /* 010 */ unsigned char series_name[16]; /* 026 */ unsigned char date[10]; /* 036 */ unsigned short vendor_revision_major; /* 038 */ unsigned short vendor_revision_minor; /* 040 */ unsigned int rom_size; /* 044 */ unsigned char copyright[128]; /* 172 */ unsigned char vendor_name[64]; /* 236 */ unsigned char rom_description[128]; /* 364 */ unsigned char pad[147]; /* 511 */ unsigned char csum; }; static void snd_interwave_detect_memory(struct snd_gus_card *gus) { static unsigned int lmc[13] = { 0x00000001, 0x00000101, 0x01010101, 0x00000401, 0x04040401, 0x00040101, 0x04040101, 0x00000004, 0x00000404, 0x04040404, 0x00000010, 0x00001010, 0x10101010 }; int bank_pos, pages; unsigned int i, lmct; int psizes[4]; unsigned char iwave[8]; unsigned char csum; snd_interwave_reset(gus); snd_gf1_write8(gus, SNDRV_GF1_GB_GLOBAL_MODE, snd_gf1_read8(gus, SNDRV_GF1_GB_GLOBAL_MODE) | 0x01); /* enhanced mode */ snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x01); /* DRAM I/O cycles selected */ snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xff10) | 0x004c); /* ok.. simple test of memory size */ pages = 0; snd_gf1_poke(gus, 0, 0x55); snd_gf1_poke(gus, 1, 0xaa); #if 1 if (snd_gf1_peek(gus, 0) == 0x55 && snd_gf1_peek(gus, 1) == 0xaa) #else if (0) /* ok.. for testing of 0k RAM */ #endif { snd_interwave_bank_sizes(gus, psizes); lmct = (psizes[3] << 24) | (psizes[2] << 16) | (psizes[1] << 8) | psizes[0]; #if 0 printk(KERN_DEBUG "lmct = 0x%08x\n", lmct); #endif for (i = 0; i < ARRAY_SIZE(lmc); i++) if (lmct == lmc[i]) { #if 0 printk(KERN_DEBUG "found !!! %i\n", i); #endif snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xfff0) | i); snd_interwave_bank_sizes(gus, psizes); break; } if (i >= ARRAY_SIZE(lmc) && !gus->gf1.enh_mode) snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xfff0) | 2); for (i = 0; i < 4; i++) { gus->gf1.mem_alloc.banks_8[i].address = gus->gf1.mem_alloc.banks_16[i].address = i << 22; gus->gf1.mem_alloc.banks_8[i].size = gus->gf1.mem_alloc.banks_16[i].size = psizes[i] << 18; pages += psizes[i]; } } pages <<= 18; gus->gf1.memory = pages; snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x03); /* select ROM */ snd_gf1_write16(gus, SNDRV_GF1_GW_MEMORY_CONFIG, (snd_gf1_look16(gus, SNDRV_GF1_GW_MEMORY_CONFIG) & 0xff1f) | (4 << 5)); gus->gf1.rom_banks = 0; gus->gf1.rom_memory = 0; for (bank_pos = 0; bank_pos < 16L * 1024L * 1024L; bank_pos += 4L * 1024L * 1024L) { for (i = 0; i < 8; ++i) iwave[i] = snd_gf1_peek(gus, bank_pos + i); if (strncmp(iwave, "INTRWAVE", 8)) continue; /* first check */ csum = 0; for (i = 0; i < sizeof(struct rom_hdr); i++) csum += snd_gf1_peek(gus, bank_pos + i); if (csum != 0) continue; /* not valid rom */ gus->gf1.rom_banks++; gus->gf1.rom_present |= 1 << (bank_pos >> 22); gus->gf1.rom_memory = snd_gf1_peek(gus, bank_pos + 40) | (snd_gf1_peek(gus, bank_pos + 41) << 8) | (snd_gf1_peek(gus, bank_pos + 42) << 16) | (snd_gf1_peek(gus, bank_pos + 43) << 24); } #if 0 if (gus->gf1.rom_memory > 0) { if (gus->gf1.rom_banks == 1 && gus->gf1.rom_present == 8) gus->card->type = SNDRV_CARD_TYPE_IW_DYNASONIC; } #endif snd_gf1_write8(gus, SNDRV_GF1_GB_MEMORY_CONTROL, 0x00); /* select RAM */ if (!gus->gf1.enh_mode) snd_interwave_reset(gus); } static void snd_interwave_init(int dev, struct snd_gus_card *gus) { unsigned long flags; /* ok.. some InterWave specific initialization */ spin_lock_irqsave(&gus->reg_lock, flags); snd_gf1_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, 0x00); snd_gf1_write8(gus, SNDRV_GF1_GB_COMPATIBILITY, 0x1f); snd_gf1_write8(gus, SNDRV_GF1_GB_DECODE_CONTROL, 0x49); snd_gf1_write8(gus, SNDRV_GF1_GB_VERSION_NUMBER, 0x11); snd_gf1_write8(gus, SNDRV_GF1_GB_MPU401_CONTROL_A, 0x00); snd_gf1_write8(gus, SNDRV_GF1_GB_MPU401_CONTROL_B, 0x30); snd_gf1_write8(gus, SNDRV_GF1_GB_EMULATION_IRQ, 0x00); spin_unlock_irqrestore(&gus->reg_lock, flags); gus->equal_irq = 1; gus->codec_flag = 1; gus->interwave = 1; gus->max_flag = 1; gus->joystick_dac = joystick_dac[dev]; } static struct snd_kcontrol_new snd_interwave_controls[] = { WSS_DOUBLE("Master Playback Switch", 0, CS4231_LINE_LEFT_OUTPUT, CS4231_LINE_RIGHT_OUTPUT, 7, 7, 1, 1), WSS_DOUBLE("Master Playback Volume", 0, CS4231_LINE_LEFT_OUTPUT, CS4231_LINE_RIGHT_OUTPUT, 0, 0, 31, 1), WSS_DOUBLE("Mic Playback Switch", 0, CS4231_LEFT_MIC_INPUT, CS4231_RIGHT_MIC_INPUT, 7, 7, 1, 1), WSS_DOUBLE("Mic Playback Volume", 0, CS4231_LEFT_MIC_INPUT, CS4231_RIGHT_MIC_INPUT, 0, 0, 31, 1) }; static int snd_interwave_mixer(struct snd_wss *chip) { struct snd_card *card = chip->card; struct snd_ctl_elem_id id1, id2; unsigned int idx; int err; memset(&id1, 0, sizeof(id1)); memset(&id2, 0, sizeof(id2)); id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER; #if 0 /* remove mono microphone controls */ strcpy(id1.name, "Mic Playback Switch"); if ((err = snd_ctl_remove_id(card, &id1)) < 0) return err; strcpy(id1.name, "Mic Playback Volume"); if ((err = snd_ctl_remove_id(card, &id1)) < 0) return err; #endif /* add new master and mic controls */ for (idx = 0; idx < ARRAY_SIZE(snd_interwave_controls); idx++) if ((err = snd_ctl_add(card, snd_ctl_new1(&snd_interwave_controls[idx], chip))) < 0) return err; snd_wss_out(chip, CS4231_LINE_LEFT_OUTPUT, 0x9f); snd_wss_out(chip, CS4231_LINE_RIGHT_OUTPUT, 0x9f); snd_wss_out(chip, CS4231_LEFT_MIC_INPUT, 0x9f); snd_wss_out(chip, CS4231_RIGHT_MIC_INPUT, 0x9f); /* reassign AUXA to SYNTHESIZER */ strcpy(id1.name, "Aux Playback Switch"); strcpy(id2.name, "Synth Playback Switch"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; strcpy(id1.name, "Aux Playback Volume"); strcpy(id2.name, "Synth Playback Volume"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; /* reassign AUXB to CD */ strcpy(id1.name, "Aux Playback Switch"); id1.index = 1; strcpy(id2.name, "CD Playback Switch"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; strcpy(id1.name, "Aux Playback Volume"); strcpy(id2.name, "CD Playback Volume"); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; return 0; } #ifdef CONFIG_PNP static int snd_interwave_pnp(int dev, struct snd_interwave *iwcard, struct pnp_card_link *card, const struct pnp_card_device_id *id) { struct pnp_dev *pdev; int err; iwcard->dev = pnp_request_card_device(card, id->devs[0].id, NULL); if (iwcard->dev == NULL) return -EBUSY; #ifdef SNDRV_STB iwcard->devtc = pnp_request_card_device(card, id->devs[1].id, NULL); if (iwcard->devtc == NULL) return -EBUSY; #endif /* Synth & Codec initialization */ pdev = iwcard->dev; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "InterWave PnP configure failure (out of resources?)\n"); return err; } if (pnp_port_start(pdev, 0) + 0x100 != pnp_port_start(pdev, 1) || pnp_port_start(pdev, 0) + 0x10c != pnp_port_start(pdev, 2)) { snd_printk(KERN_ERR "PnP configure failure (wrong ports)\n"); return -ENOENT; } port[dev] = pnp_port_start(pdev, 0); dma1[dev] = pnp_dma(pdev, 0); if (dma2[dev] >= 0) dma2[dev] = pnp_dma(pdev, 1); irq[dev] = pnp_irq(pdev, 0); snd_printdd("isapnp IW: sb port=0x%llx, gf1 port=0x%llx, codec port=0x%llx\n", (unsigned long long)pnp_port_start(pdev, 0), (unsigned long long)pnp_port_start(pdev, 1), (unsigned long long)pnp_port_start(pdev, 2)); snd_printdd("isapnp IW: dma1=%i, dma2=%i, irq=%i\n", dma1[dev], dma2[dev], irq[dev]); #ifdef SNDRV_STB /* Tone Control initialization */ pdev = iwcard->devtc; err = pnp_activate_dev(pdev); if (err < 0) { snd_printk(KERN_ERR "InterWave ToneControl PnP configure failure (out of resources?)\n"); return err; } port_tc[dev] = pnp_port_start(pdev, 0); snd_printdd("isapnp IW: tone control port=0x%lx\n", port_tc[dev]); #endif return 0; } #endif /* CONFIG_PNP */ static void snd_interwave_free(struct snd_card *card) { struct snd_interwave *iwcard = card->private_data; if (iwcard == NULL) return; #ifdef SNDRV_STB release_and_free_resource(iwcard->i2c_res); #endif if (iwcard->irq >= 0) free_irq(iwcard->irq, (void *)iwcard); } static int snd_interwave_card_new(struct device *pdev, int dev, struct snd_card **cardp) { struct snd_card *card; struct snd_interwave *iwcard; int err; err = snd_card_new(pdev, index[dev], id[dev], THIS_MODULE, sizeof(struct snd_interwave), &card); if (err < 0) return err; iwcard = card->private_data; iwcard->card = card; iwcard->irq = -1; card->private_free = snd_interwave_free; *cardp = card; return 0; } static int snd_interwave_probe(struct snd_card *card, int dev) { int xirq, xdma1, xdma2; struct snd_interwave *iwcard = card->private_data; struct snd_wss *wss; struct snd_gus_card *gus; #ifdef SNDRV_STB struct snd_i2c_bus *i2c_bus; #endif struct snd_pcm *pcm; char *str; int err; xirq = irq[dev]; xdma1 = dma1[dev]; xdma2 = dma2[dev]; if ((err = snd_gus_create(card, port[dev], -xirq, xdma1, xdma2, 0, 32, pcm_channels[dev], effect[dev], &gus)) < 0) return err; if ((err = snd_interwave_detect(iwcard, gus, dev #ifdef SNDRV_STB , &i2c_bus #endif )) < 0) return err; iwcard->gus_status_reg = gus->gf1.reg_irqstat; iwcard->pcm_status_reg = gus->gf1.port + 0x10c + 2; snd_interwave_init(dev, gus); snd_interwave_detect_memory(gus); if ((err = snd_gus_initialize(gus)) < 0) return err; if (request_irq(xirq, snd_interwave_interrupt, 0, "InterWave", iwcard)) { snd_printk(KERN_ERR PFX "unable to grab IRQ %d\n", xirq); return -EBUSY; } iwcard->irq = xirq; err = snd_wss_create(card, gus->gf1.port + 0x10c, -1, xirq, xdma2 < 0 ? xdma1 : xdma2, xdma1, WSS_HW_INTERWAVE, WSS_HWSHARE_IRQ | WSS_HWSHARE_DMA1 | WSS_HWSHARE_DMA2, &wss); if (err < 0) return err; err = snd_wss_pcm(wss, 0, &pcm); if (err < 0) return err; sprintf(pcm->name + strlen(pcm->name), " rev %c", gus->revision + 'A'); strcat(pcm->name, " (codec)"); err = snd_wss_timer(wss, 2, NULL); if (err < 0) return err; err = snd_wss_mixer(wss); if (err < 0) return err; if (pcm_channels[dev] > 0) { err = snd_gf1_pcm_new(gus, 1, 1, NULL); if (err < 0) return err; } err = snd_interwave_mixer(wss); if (err < 0) return err; #ifdef SNDRV_STB { struct snd_ctl_elem_id id1, id2; memset(&id1, 0, sizeof(id1)); memset(&id2, 0, sizeof(id2)); id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(id1.name, "Master Playback Switch"); strcpy(id2.name, id1.name); id2.index = 1; if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; strcpy(id1.name, "Master Playback Volume"); strcpy(id2.name, id1.name); if ((err = snd_ctl_rename_id(card, &id1, &id2)) < 0) return err; if ((err = snd_tea6330t_update_mixer(card, i2c_bus, 0, 1)) < 0) return err; } #endif gus->uart_enable = midi[dev]; if ((err = snd_gf1_rawmidi_new(gus, 0, NULL)) < 0) return err; #ifndef SNDRV_STB str = "AMD InterWave"; if (gus->gf1.rom_banks == 1 && gus->gf1.rom_present == 8) str = "Dynasonic 3-D"; #else str = "InterWave STB"; #endif strcpy(card->driver, str); strcpy(card->shortname, str); sprintf(card->longname, "%s at 0x%lx, irq %i, dma %d", str, gus->gf1.port, xirq, xdma1); if (xdma2 >= 0) sprintf(card->longname + strlen(card->longname), "&%d", xdma2); err = snd_card_register(card); if (err < 0) return err; iwcard->wss = wss; iwcard->gus = gus; return 0; } static int snd_interwave_isa_probe1(int dev, struct device *devptr) { struct snd_card *card; int err; err = snd_interwave_card_new(devptr, dev, &card); if (err < 0) return err; if ((err = snd_interwave_probe(card, dev)) < 0) { snd_card_free(card); return err; } dev_set_drvdata(devptr, card); return 0; } static int snd_interwave_isa_match(struct device *pdev, unsigned int dev) { if (!enable[dev]) return 0; #ifdef CONFIG_PNP if (isapnp[dev]) return 0; #endif return 1; } static int snd_interwave_isa_probe(struct device *pdev, unsigned int dev) { int err; static int possible_irqs[] = {5, 11, 12, 9, 7, 15, 3, -1}; static int possible_dmas[] = {0, 1, 3, 5, 6, 7, -1}; if (irq[dev] == SNDRV_AUTO_IRQ) { if ((irq[dev] = snd_legacy_find_free_irq(possible_irqs)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free IRQ\n"); return -EBUSY; } } if (dma1[dev] == SNDRV_AUTO_DMA) { if ((dma1[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA1\n"); return -EBUSY; } } if (dma2[dev] == SNDRV_AUTO_DMA) { if ((dma2[dev] = snd_legacy_find_free_dma(possible_dmas)) < 0) { snd_printk(KERN_ERR PFX "unable to find a free DMA2\n"); return -EBUSY; } } if (port[dev] != SNDRV_AUTO_PORT) return snd_interwave_isa_probe1(dev, pdev); else { static long possible_ports[] = {0x210, 0x220, 0x230, 0x240, 0x250, 0x260}; int i; for (i = 0; i < ARRAY_SIZE(possible_ports); i++) { port[dev] = possible_ports[i]; err = snd_interwave_isa_probe1(dev, pdev); if (! err) return 0; } return err; } } static int snd_interwave_isa_remove(struct device *devptr, unsigned int dev) { snd_card_free(dev_get_drvdata(devptr)); return 0; } static struct isa_driver snd_interwave_driver = { .match = snd_interwave_isa_match, .probe = snd_interwave_isa_probe, .remove = snd_interwave_isa_remove, /* FIXME: suspend,resume */ .driver = { .name = INTERWAVE_DRIVER }, }; #ifdef CONFIG_PNP static int snd_interwave_pnp_detect(struct pnp_card_link *pcard, const struct pnp_card_device_id *pid) { static int dev; struct snd_card *card; int res; for ( ; dev < SNDRV_CARDS; dev++) { if (enable[dev] && isapnp[dev]) break; } if (dev >= SNDRV_CARDS) return -ENODEV; res = snd_interwave_card_new(&pcard->card->dev, dev, &card); if (res < 0) return res; if ((res = snd_interwave_pnp(dev, card->private_data, pcard, pid)) < 0) { snd_card_free(card); return res; } if ((res = snd_interwave_probe(card, dev)) < 0) { snd_card_free(card); return res; } pnp_set_card_drvdata(pcard, card); dev++; return 0; } static void snd_interwave_pnp_remove(struct pnp_card_link *pcard) { snd_card_free(pnp_get_card_drvdata(pcard)); pnp_set_card_drvdata(pcard, NULL); } static struct pnp_card_driver interwave_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, .name = INTERWAVE_PNP_DRIVER, .id_table = snd_interwave_pnpids, .probe = snd_interwave_pnp_detect, .remove = snd_interwave_pnp_remove, /* FIXME: suspend,resume */ }; #endif /* CONFIG_PNP */ static int __init alsa_card_interwave_init(void) { int err; err = isa_register_driver(&snd_interwave_driver, SNDRV_CARDS); #ifdef CONFIG_PNP if (!err) isa_registered = 1; err = pnp_register_card_driver(&interwave_pnpc_driver); if (!err) pnp_registered = 1; if (isa_registered) err = 0; #endif return err; } static void __exit alsa_card_interwave_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_card_driver(&interwave_pnpc_driver); if (isa_registered) #endif isa_unregister_driver(&snd_interwave_driver); } module_init(alsa_card_interwave_init) module_exit(alsa_card_interwave_exit)
gpl-2.0
prakhya/linux_sai
arch/x86/crypto/fpu.c
806
4483
/* * FPU: Wrapper for blkcipher touching fpu * * Copyright (c) Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/crypto.h> #include <asm/fpu/api.h> struct crypto_fpu_ctx { struct crypto_blkcipher *child; }; static int crypto_fpu_setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(parent); struct crypto_blkcipher *child = ctx->child; int err; crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_blkcipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); err = crypto_blkcipher_setkey(child, key, keylen); crypto_tfm_set_flags(parent, crypto_blkcipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int err; struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); struct crypto_blkcipher *child = ctx->child; struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); err = crypto_blkcipher_crt(desc.tfm)->encrypt(&desc, dst, src, nbytes); kernel_fpu_end(); return err; } static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { int err; struct crypto_fpu_ctx *ctx = crypto_blkcipher_ctx(desc_in->tfm); struct crypto_blkcipher *child = ctx->child; struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); err = crypto_blkcipher_crt(desc.tfm)->decrypt(&desc, dst, src, nbytes); kernel_fpu_end(); return err; } static int crypto_fpu_init_tfm(struct crypto_tfm *tfm) { struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_blkcipher *cipher; cipher = crypto_spawn_blkcipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; return 0; } static void crypto_fpu_exit_tfm(struct crypto_tfm *tfm) { struct crypto_fpu_ctx *ctx = crypto_tfm_ctx(tfm); crypto_free_blkcipher(ctx->child); } static struct crypto_instance *crypto_fpu_alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("fpu", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = alg->cra_flags; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = alg->cra_type; inst->alg.cra_blkcipher.ivsize = alg->cra_blkcipher.ivsize; inst->alg.cra_blkcipher.min_keysize = alg->cra_blkcipher.min_keysize; inst->alg.cra_blkcipher.max_keysize = alg->cra_blkcipher.max_keysize; inst->alg.cra_ctxsize = sizeof(struct crypto_fpu_ctx); inst->alg.cra_init = crypto_fpu_init_tfm; inst->alg.cra_exit = crypto_fpu_exit_tfm; inst->alg.cra_blkcipher.setkey = crypto_fpu_setkey; inst->alg.cra_blkcipher.encrypt = crypto_fpu_encrypt; inst->alg.cra_blkcipher.decrypt = crypto_fpu_decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void crypto_fpu_free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_fpu_tmpl = { .name = "fpu", .alloc = crypto_fpu_alloc, .free = crypto_fpu_free, .module = THIS_MODULE, }; int __init crypto_fpu_init(void) { return crypto_register_template(&crypto_fpu_tmpl); } void crypto_fpu_exit(void) { crypto_unregister_template(&crypto_fpu_tmpl); } MODULE_ALIAS_CRYPTO("fpu");
gpl-2.0
shakalaca/ASUS_ZenFone_ZE550ML_ZE551ML
linux/kernel/sound/soc/spear/spdif_in.c
2086
7375
/* * ALSA SoC SPDIF In Audio Layer for spear processors * * Copyright (C) 2012 ST Microelectronics * Vipin Kumar <vipin.kumar@st.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/spear_dma.h> #include <sound/spear_spdif.h> #include "spdif_in_regs.h" struct spdif_in_params { u32 format; }; struct spdif_in_dev { struct clk *clk; struct spear_dma_data dma_params; struct spdif_in_params saved_params; void *io_base; struct device *dev; void (*reset_perip)(void); int irq; }; static void spdif_in_configure(struct spdif_in_dev *host) { u32 ctrl = SPDIF_IN_PRTYEN | SPDIF_IN_STATEN | SPDIF_IN_USREN | SPDIF_IN_VALEN | SPDIF_IN_BLKEN; ctrl |= SPDIF_MODE_16BIT | SPDIF_FIFO_THRES_16; writel(ctrl, host->io_base + SPDIF_IN_CTRL); writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK); } static int spdif_in_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(cpu_dai); if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return -EINVAL; snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)&host->dma_params); return 0; } static void spdif_in_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return; writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK); snd_soc_dai_set_dma_data(dai, substream, NULL); } static void spdif_in_format(struct spdif_in_dev *host, u32 format) { u32 ctrl = readl(host->io_base + SPDIF_IN_CTRL); switch (format) { case SNDRV_PCM_FORMAT_S16_LE: ctrl |= SPDIF_XTRACT_16BIT; break; case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: ctrl &= ~SPDIF_XTRACT_16BIT; break; } writel(ctrl, host->io_base + SPDIF_IN_CTRL); } static int spdif_in_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); u32 format; if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return -EINVAL; format = params_format(params); host->saved_params.format = format; return 0; } static int spdif_in_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct spdif_in_dev *host = snd_soc_dai_get_drvdata(dai); u32 ctrl; int ret = 0; if (substream->stream != SNDRV_PCM_STREAM_CAPTURE) return -EINVAL; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: clk_enable(host->clk); spdif_in_configure(host); spdif_in_format(host, host->saved_params.format); ctrl = readl(host->io_base + SPDIF_IN_CTRL); ctrl |= SPDIF_IN_SAMPLE | SPDIF_IN_ENB; writel(ctrl, host->io_base + SPDIF_IN_CTRL); writel(0xF, host->io_base + SPDIF_IN_IRQ_MASK); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ctrl = readl(host->io_base + SPDIF_IN_CTRL); ctrl &= ~(SPDIF_IN_SAMPLE | SPDIF_IN_ENB); writel(ctrl, host->io_base + SPDIF_IN_CTRL); writel(0x0, host->io_base + SPDIF_IN_IRQ_MASK); if (host->reset_perip) host->reset_perip(); clk_disable(host->clk); break; default: ret = -EINVAL; break; } return ret; } static struct snd_soc_dai_ops spdif_in_dai_ops = { .startup = spdif_in_startup, .shutdown = spdif_in_shutdown, .trigger = spdif_in_trigger, .hw_params = spdif_in_hw_params, }; struct snd_soc_dai_driver spdif_in_dai = { .capture = { .channels_min = 2, .channels_max = 2, .rates = (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000 | \ SNDRV_PCM_RATE_192000), .formats = SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE, }, .ops = &spdif_in_dai_ops, }; static const struct snd_soc_component_driver spdif_in_component = { .name = "spdif-in", }; static irqreturn_t spdif_in_irq(int irq, void *arg) { struct spdif_in_dev *host = (struct spdif_in_dev *)arg; u32 irq_status = readl(host->io_base + SPDIF_IN_IRQ); if (!irq_status) return IRQ_NONE; if (irq_status & SPDIF_IRQ_FIFOWRITE) dev_err(host->dev, "spdif in: fifo write error"); if (irq_status & SPDIF_IRQ_EMPTYFIFOREAD) dev_err(host->dev, "spdif in: empty fifo read error"); if (irq_status & SPDIF_IRQ_FIFOFULL) dev_err(host->dev, "spdif in: fifo full error"); if (irq_status & SPDIF_IRQ_OUTOFRANGE) dev_err(host->dev, "spdif in: out of range error"); writel(0, host->io_base + SPDIF_IN_IRQ); return IRQ_HANDLED; } static int spdif_in_probe(struct platform_device *pdev) { struct spdif_in_dev *host; struct spear_spdif_platform_data *pdata; struct resource *res, *res_fifo; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; res_fifo = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!res_fifo) return -EINVAL; if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { dev_warn(&pdev->dev, "Failed to get memory resourse\n"); return -ENOENT; } host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) { dev_warn(&pdev->dev, "kzalloc fail\n"); return -ENOMEM; } host->io_base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!host->io_base) { dev_warn(&pdev->dev, "ioremap failed\n"); return -ENOMEM; } host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) return -EINVAL; host->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) return PTR_ERR(host->clk); pdata = dev_get_platdata(&pdev->dev); if (!pdata) return -EINVAL; host->dma_params.data = pdata->dma_params; host->dma_params.addr = res_fifo->start; host->dma_params.max_burst = 16; host->dma_params.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; host->dma_params.filter = pdata->filter; host->reset_perip = pdata->reset_perip; host->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, host); ret = devm_request_irq(&pdev->dev, host->irq, spdif_in_irq, 0, "spdif-in", host); if (ret) { clk_put(host->clk); dev_warn(&pdev->dev, "request_irq failed\n"); return ret; } ret = snd_soc_register_component(&pdev->dev, &spdif_in_component, &spdif_in_dai, 1); if (ret != 0) { clk_put(host->clk); return ret; } return 0; } static int spdif_in_remove(struct platform_device *pdev) { struct spdif_in_dev *host = dev_get_drvdata(&pdev->dev); snd_soc_unregister_component(&pdev->dev); dev_set_drvdata(&pdev->dev, NULL); clk_put(host->clk); return 0; } static struct platform_driver spdif_in_driver = { .probe = spdif_in_probe, .remove = spdif_in_remove, .driver = { .name = "spdif-in", .owner = THIS_MODULE, }, }; module_platform_driver(spdif_in_driver); MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>"); MODULE_DESCRIPTION("SPEAr SPDIF IN SoC Interface"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:spdif_in");
gpl-2.0
xhteam/kernel_imx
arch/x86/mm/init_64.c
2086
24554
/* * linux/arch/x86_64/mm/init.c * * Copyright (C) 1995 Linus Torvalds * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz> * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de> */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/memory.h> #include <linux/memory_hotplug.h> #include <linux/nmi.h> #include <linux/gfp.h> #include <asm/processor.h> #include <asm/bios_ebda.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/tlb.h> #include <asm/mmu_context.h> #include <asm/proto.h> #include <asm/smp.h> #include <asm/sections.h> #include <asm/kdebug.h> #include <asm/numa.h> #include <asm/cacheflush.h> #include <asm/init.h> #include <asm/uv/uv.h> #include <asm/setup.h> static int __init parse_direct_gbpages_off(char *arg) { direct_gbpages = 0; return 0; } early_param("nogbpages", parse_direct_gbpages_off); static int __init parse_direct_gbpages_on(char *arg) { direct_gbpages = 1; return 0; } early_param("gbpages", parse_direct_gbpages_on); /* * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the * physical space so we can cache the place of the first one and move * around without checking the pgd every time. */ pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; EXPORT_SYMBOL_GPL(__supported_pte_mask); int force_personality32; /* * noexec32=on|off * Control non executable heap for 32bit processes. * To control the stack too use noexec=off * * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default) * off PROT_READ implies PROT_EXEC */ static int __init nonx32_setup(char *str) { if (!strcmp(str, "on")) force_personality32 &= ~READ_IMPLIES_EXEC; else if (!strcmp(str, "off")) force_personality32 |= READ_IMPLIES_EXEC; return 1; } __setup("noexec32=", nonx32_setup); /* * When memory was added/removed make sure all the processes MM have * suitable PGD entries in the local PGD level page. */ void sync_global_pgds(unsigned long start, unsigned long end) { unsigned long address; for (address = start; address <= end; address += PGDIR_SIZE) { const pgd_t *pgd_ref = pgd_offset_k(address); struct page *page; if (pgd_none(*pgd_ref)) continue; spin_lock(&pgd_lock); list_for_each_entry(page, &pgd_list, lru) { pgd_t *pgd; spinlock_t *pgt_lock; pgd = (pgd_t *)page_address(page) + pgd_index(address); /* the pgt_lock only for Xen */ pgt_lock = &pgd_page_get_mm(page)->page_table_lock; spin_lock(pgt_lock); if (pgd_none(*pgd)) set_pgd(pgd, *pgd_ref); else BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); spin_unlock(pgt_lock); } spin_unlock(&pgd_lock); } } /* * NOTE: This function is marked __ref because it calls __init function * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0. */ static __ref void *spp_getpage(void) { void *ptr; if (after_bootmem) ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); else ptr = alloc_bootmem_pages(PAGE_SIZE); if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem ? "after bootmem" : ""); } pr_debug("spp_getpage %p\n", ptr); return ptr; } static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) { if (pgd_none(*pgd)) { pud_t *pud = (pud_t *)spp_getpage(); pgd_populate(&init_mm, pgd, pud); if (pud != pud_offset(pgd, 0)) printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", pud, pud_offset(pgd, 0)); } return pud_offset(pgd, vaddr); } static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) { if (pud_none(*pud)) { pmd_t *pmd = (pmd_t *) spp_getpage(); pud_populate(&init_mm, pud, pmd); if (pmd != pmd_offset(pud, 0)) printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud, 0)); } return pmd_offset(pud, vaddr); } static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) { if (pmd_none(*pmd)) { pte_t *pte = (pte_t *) spp_getpage(); pmd_populate_kernel(&init_mm, pmd, pte); if (pte != pte_offset_kernel(pmd, 0)) printk(KERN_ERR "PAGETABLE BUG #02!\n"); } return pte_offset_kernel(pmd, vaddr); } void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) { pud_t *pud; pmd_t *pmd; pte_t *pte; pud = pud_page + pud_index(vaddr); pmd = fill_pmd(pud, vaddr); pte = fill_pte(pmd, vaddr); set_pte(pte, new_pte); /* * It's enough to flush this one mapping. * (PGE mappings get flushed as well) */ __flush_tlb_one(vaddr); } void set_pte_vaddr(unsigned long vaddr, pte_t pteval) { pgd_t *pgd; pud_t *pud_page; pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval)); pgd = pgd_offset_k(vaddr); if (pgd_none(*pgd)) { printk(KERN_ERR "PGD FIXMAP MISSING, it should be setup in head.S!\n"); return; } pud_page = (pud_t*)pgd_page_vaddr(*pgd); set_pte_vaddr_pud(pud_page, vaddr, pteval); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { pgd_t *pgd; pud_t *pud; pgd = pgd_offset_k(vaddr); pud = fill_pud(pgd, vaddr); return fill_pmd(pud, vaddr); } pte_t * __init populate_extra_pte(unsigned long vaddr) { pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return fill_pte(pmd, vaddr); } /* * Create large page table mappings for a range of physical addresses. */ static void __init __init_extra_mapping(unsigned long phys, unsigned long size, pgprot_t prot) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK)); for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { pgd = pgd_offset_k((unsigned long)__va(phys)); if (pgd_none(*pgd)) { pud = (pud_t *) spp_getpage(); set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | _PAGE_USER)); } pud = pud_offset(pgd, (unsigned long)__va(phys)); if (pud_none(*pud)) { pmd = (pmd_t *) spp_getpage(); set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER)); } pmd = pmd_offset(pud, phys); BUG_ON(!pmd_none(*pmd)); set_pmd(pmd, __pmd(phys | pgprot_val(prot))); } } void __init init_extra_mapping_wb(unsigned long phys, unsigned long size) { __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE); } void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) { __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE); } /* * The head.S code sets up the kernel high mapping: * * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) * * phys_addr holds the negative offset to the kernel, which is added * to the compile time generated pmds. This results in invalid pmds up * to the point where we hit the physaddr 0 mapping. * * We limit the mappings to the region from _text to _brk_end. _brk_end * is rounded up to the 2MB boundary. This catches the invalid pmds as * well, as they are located before _text: */ void __init cleanup_highmap(void) { unsigned long vaddr = __START_KERNEL_map; unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; pmd_t *pmd = level2_kernel_pgt; for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { if (pmd_none(*pmd)) continue; if (vaddr < (unsigned long) _text || vaddr > end) set_pmd(pmd, __pmd(0)); } } static __ref void *alloc_low_page(unsigned long *phys) { unsigned long pfn = pgt_buf_end++; void *adr; if (after_bootmem) { adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK); *phys = __pa(adr); return adr; } if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE); clear_page(adr); *phys = pfn * PAGE_SIZE; return adr; } static __ref void *map_low_page(void *virt) { void *adr; unsigned long phys, left; if (after_bootmem) return virt; phys = __pa(virt); left = phys & (PAGE_SIZE - 1); adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE); adr = (void *)(((unsigned long)adr) | left); return adr; } static __ref void unmap_low_page(void *adr) { if (after_bootmem) return; early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE); } static unsigned long __meminit phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end, pgprot_t prot) { unsigned pages = 0; unsigned long last_map_addr = end; int i; pte_t *pte = pte_page + pte_index(addr); for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) { if (addr >= end) { if (!after_bootmem) { for(; i < PTRS_PER_PTE; i++, pte++) set_pte(pte, __pte(0)); } break; } /* * We will re-use the existing mapping. * Xen for example has some special requirements, like mapping * pagetable pages as RO. So assume someone who pre-setup * these mappings are more intelligent. */ if (pte_val(*pte)) { pages++; continue; } if (0) printk(" pte=%p addr=%lx pte=%016lx\n", pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte); pages++; set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot)); last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE; } update_page_count(PG_LEVEL_4K, pages); return last_map_addr; } static unsigned long __meminit phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, unsigned long page_size_mask, pgprot_t prot) { unsigned long pages = 0; unsigned long last_map_addr = end; int i = pmd_index(address); for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { unsigned long pte_phys; pmd_t *pmd = pmd_page + pmd_index(address); pte_t *pte; pgprot_t new_prot = prot; if (address >= end) { if (!after_bootmem) { for (; i < PTRS_PER_PMD; i++, pmd++) set_pmd(pmd, __pmd(0)); } break; } if (pmd_val(*pmd)) { if (!pmd_large(*pmd)) { spin_lock(&init_mm.page_table_lock); pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd)); last_map_addr = phys_pte_init(pte, address, end, prot); unmap_low_page(pte); spin_unlock(&init_mm.page_table_lock); continue; } /* * If we are ok with PG_LEVEL_2M mapping, then we will * use the existing mapping, * * Otherwise, we will split the large page mapping but * use the same existing protection bits except for * large page, so that we don't violate Intel's TLB * Application note (317080) which says, while changing * the page sizes, new and old translations should * not differ with respect to page frame and * attributes. */ if (page_size_mask & (1 << PG_LEVEL_2M)) { pages++; continue; } new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd)); } if (page_size_mask & (1<<PG_LEVEL_2M)) { pages++; spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pmd, pfn_pte(address >> PAGE_SHIFT, __pgprot(pgprot_val(prot) | _PAGE_PSE))); spin_unlock(&init_mm.page_table_lock); last_map_addr = (address & PMD_MASK) + PMD_SIZE; continue; } pte = alloc_low_page(&pte_phys); last_map_addr = phys_pte_init(pte, address, end, new_prot); unmap_low_page(pte); spin_lock(&init_mm.page_table_lock); pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); spin_unlock(&init_mm.page_table_lock); } update_page_count(PG_LEVEL_2M, pages); return last_map_addr; } static unsigned long __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, unsigned long page_size_mask) { unsigned long pages = 0; unsigned long last_map_addr = end; int i = pud_index(addr); for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE) { unsigned long pmd_phys; pud_t *pud = pud_page + pud_index(addr); pmd_t *pmd; pgprot_t prot = PAGE_KERNEL; if (addr >= end) break; if (!after_bootmem && !e820_any_mapped(addr, addr+PUD_SIZE, 0)) { set_pud(pud, __pud(0)); continue; } if (pud_val(*pud)) { if (!pud_large(*pud)) { pmd = map_low_page(pmd_offset(pud, 0)); last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, prot); unmap_low_page(pmd); __flush_tlb_all(); continue; } /* * If we are ok with PG_LEVEL_1G mapping, then we will * use the existing mapping. * * Otherwise, we will split the gbpage mapping but use * the same existing protection bits except for large * page, so that we don't violate Intel's TLB * Application note (317080) which says, while changing * the page sizes, new and old translations should * not differ with respect to page frame and * attributes. */ if (page_size_mask & (1 << PG_LEVEL_1G)) { pages++; continue; } prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud)); } if (page_size_mask & (1<<PG_LEVEL_1G)) { pages++; spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pud, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); spin_unlock(&init_mm.page_table_lock); last_map_addr = (addr & PUD_MASK) + PUD_SIZE; continue; } pmd = alloc_low_page(&pmd_phys); last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask, prot); unmap_low_page(pmd); spin_lock(&init_mm.page_table_lock); pud_populate(&init_mm, pud, __va(pmd_phys)); spin_unlock(&init_mm.page_table_lock); } __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); return last_map_addr; } unsigned long __meminit kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { bool pgd_changed = false; unsigned long next, last_map_addr = end; unsigned long addr; start = (unsigned long)__va(start); end = (unsigned long)__va(end); addr = start; for (; start < end; start = next) { pgd_t *pgd = pgd_offset_k(start); unsigned long pud_phys; pud_t *pud; next = (start + PGDIR_SIZE) & PGDIR_MASK; if (next > end) next = end; if (pgd_val(*pgd)) { pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd)); last_map_addr = phys_pud_init(pud, __pa(start), __pa(end), page_size_mask); unmap_low_page(pud); continue; } pud = alloc_low_page(&pud_phys); last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), page_size_mask); unmap_low_page(pud); spin_lock(&init_mm.page_table_lock); pgd_populate(&init_mm, pgd, __va(pud_phys)); spin_unlock(&init_mm.page_table_lock); pgd_changed = true; } if (pgd_changed) sync_global_pgds(addr, end); __flush_tlb_all(); return last_map_addr; } #ifndef CONFIG_NUMA void __init initmem_init(void) { memblock_x86_register_active_regions(0, 0, max_pfn); } #endif void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); #ifdef CONFIG_ZONE_DMA max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; #endif max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; max_zone_pfns[ZONE_NORMAL] = max_pfn; sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); /* * clear the default setting with node 0 * note: don't use nodes_clear here, that is really clearing when * numa support is not compiled in, and later node_set_state * will not set it back. */ node_clear_state(0, N_NORMAL_MEMORY); free_area_init_nodes(max_zone_pfns); } /* * Memory hotplug specific functions */ #ifdef CONFIG_MEMORY_HOTPLUG /* * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need * updating. */ static void update_end_of_memory_vars(u64 start, u64 size) { unsigned long end_pfn = PFN_UP(start + size); if (end_pfn > max_pfn) { max_pfn = end_pfn; max_low_pfn = end_pfn; high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1; } } /* * Memory is added always to NORMAL zone. This means you will never get * additional DMA/DMA32 memory. */ int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdat = NODE_DATA(nid); struct zone *zone = pgdat->node_zones + ZONE_NORMAL; unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; int ret; last_mapped_pfn = init_memory_mapping(start, start + size); if (last_mapped_pfn > max_pfn_mapped) max_pfn_mapped = last_mapped_pfn; ret = __add_pages(nid, zone, start_pfn, nr_pages); WARN_ON_ONCE(ret); /* update max_pfn, max_low_pfn and high_memory */ update_end_of_memory_vars(start, size); return ret; } EXPORT_SYMBOL_GPL(arch_add_memory); #endif /* CONFIG_MEMORY_HOTPLUG */ static struct kcore_list kcore_vsyscall; void __init mem_init(void) { long codesize, reservedpages, datasize, initsize; unsigned long absent_pages; pci_iommu_alloc(); /* clear_bss() already clear the empty_zero_page */ reservedpages = 0; /* this will put all low memory onto the freelists */ #ifdef CONFIG_NUMA totalram_pages = numa_free_all_bootmem(); #else totalram_pages = free_all_bootmem(); #endif absent_pages = absent_pages_in_range(0, max_pfn); reservedpages = max_pfn - totalram_pages - absent_pages; after_bootmem = 1; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; /* Register memory areas for /proc/kcore */ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START, VSYSCALL_END - VSYSCALL_START, KCORE_OTHER); printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, " "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n", nr_free_pages() << (PAGE_SHIFT-10), max_pfn << (PAGE_SHIFT-10), codesize >> 10, absent_pages << (PAGE_SHIFT-10), reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10); } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long end = PFN_ALIGN(__stop___ex_table); if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, end); /* * Make the kernel identity mapping for text RW. Kernel text * mapping will always be RO. Refer to the comment in * static_protections() in pageattr.c */ set_memory_rw(start, (end - start) >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long end = PFN_ALIGN(__stop___ex_table); if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, end); /* * Set the kernel identity mapping for text RO. */ set_memory_ro(start, (end - start) >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long rodata_start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK; unsigned long end = (unsigned long) &__end_rodata_hpage_align; unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table); unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata); unsigned long data_start = (unsigned long) &_sdata; printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", (end - start) >> 10); set_memory_ro(start, (end - start) >> PAGE_SHIFT); kernel_set_to_readonly = 1; /* * The rodata section (but not the kernel text!) should also be * not-executable. */ set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end); set_memory_rw(start, (end-start) >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: again\n"); set_memory_ro(start, (end-start) >> PAGE_SHIFT); #endif free_init_pages("unused kernel memory", (unsigned long) page_address(virt_to_page(text_end)), (unsigned long) page_address(virt_to_page(rodata_start))); free_init_pages("unused kernel memory", (unsigned long) page_address(virt_to_page(rodata_end)), (unsigned long) page_address(virt_to_page(data_start))); } #endif int kern_addr_valid(unsigned long addr) { unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; if (above != 0 && above != -1UL) return 0; pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) return 0; pud = pud_offset(pgd, addr); if (pud_none(*pud)) return 0; pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) return 0; if (pmd_large(*pmd)) return pfn_valid(pmd_pfn(*pmd)); pte = pte_offset_kernel(pmd, addr); if (pte_none(*pte)) return 0; return pfn_valid(pte_pfn(*pte)); } /* * A pseudo VMA to allow ptrace access for the vsyscall page. This only * covers the 64bit vsyscall page now. 32bit has a real VMA now and does * not need special handling anymore: */ static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), .vm_page_prot = PAGE_READONLY_EXEC, .vm_flags = VM_READ | VM_EXEC }; struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { #ifdef CONFIG_IA32_EMULATION if (!mm || mm->context.ia32_compat) return NULL; #endif return &gate_vma; } int in_gate_area(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma = get_gate_vma(mm); if (!vma) return 0; return (addr >= vma->vm_start) && (addr < vma->vm_end); } /* * Use this when you have no reliable mm, typically from interrupt * context. It is less reliable than using a task's mm and may give * false positives. */ int in_gate_area_no_mm(unsigned long addr) { return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) return "[vdso]"; if (vma == &gate_vma) return "[vsyscall]"; return NULL; } #ifdef CONFIG_X86_UV unsigned long memory_block_size_bytes(void) { if (is_uv_system()) { printk(KERN_INFO "UV: memory block size 2GB\n"); return 2UL * 1024 * 1024 * 1024; } return MIN_MEMORY_BLOCK_SIZE; } #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Initialise the sparsemem vmemmap using huge-pages at the PMD level. */ static long __meminitdata addr_start, addr_end; static void __meminitdata *p_start, *p_end; static int __meminitdata node_start; int __meminit vmemmap_populate(struct page *start_page, unsigned long size, int node) { unsigned long addr = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + size); unsigned long next; pgd_t *pgd; pud_t *pud; pmd_t *pmd; for (; addr < end; addr = next) { void *p = NULL; pgd = vmemmap_pgd_populate(addr, node); if (!pgd) return -ENOMEM; pud = vmemmap_pud_populate(pgd, addr, node); if (!pud) return -ENOMEM; if (!cpu_has_pse) { next = (addr + PAGE_SIZE) & PAGE_MASK; pmd = vmemmap_pmd_populate(pud, addr, node); if (!pmd) return -ENOMEM; p = vmemmap_pte_populate(pmd, addr, node); if (!p) return -ENOMEM; addr_end = addr + PAGE_SIZE; p_end = p + PAGE_SIZE; } else { next = pmd_addr_end(addr, end); pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { pte_t entry; p = vmemmap_alloc_block_buf(PMD_SIZE, node); if (!p) return -ENOMEM; entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); /* check to see if we have contiguous blocks */ if (p_end != p || node_start != node) { if (p_start) printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); addr_start = addr; node_start = node; p_start = p; } addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; } else vmemmap_verify((pte_t *)pmd, node, addr, next); } } sync_global_pgds((unsigned long)start_page, end); return 0; } void __meminit vmemmap_populate_print_last(void) { if (p_start) { printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); p_start = NULL; p_end = NULL; node_start = 0; } } #endif
gpl-2.0
jeboo/kernel_KK_i337_ATT_NB1
arch/x86/kernel/ptrace.c
2342
38676
/* By Ross Biro 1/23/92 */ /* * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/ptrace.h> #include <linux/regset.h> #include <linux/tracehook.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/security.h> #include <linux/audit.h> #include <linux/seccomp.h> #include <linux/signal.h> #include <linux/perf_event.h> #include <linux/hw_breakpoint.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/debugreg.h> #include <asm/ldt.h> #include <asm/desc.h> #include <asm/prctl.h> #include <asm/proto.h> #include <asm/hw_breakpoint.h> #include <asm/traps.h> #include "tls.h" #define CREATE_TRACE_POINTS #include <trace/events/syscalls.h> enum x86_regset { REGSET_GENERAL, REGSET_FP, REGSET_XFP, REGSET_IOPERM64 = REGSET_XFP, REGSET_XSTATE, REGSET_TLS, REGSET_IOPERM32, }; struct pt_regs_offset { const char *name; int offset; }; #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} #define REG_OFFSET_END {.name = NULL, .offset = 0} static const struct pt_regs_offset regoffset_table[] = { #ifdef CONFIG_X86_64 REG_OFFSET_NAME(r15), REG_OFFSET_NAME(r14), REG_OFFSET_NAME(r13), REG_OFFSET_NAME(r12), REG_OFFSET_NAME(r11), REG_OFFSET_NAME(r10), REG_OFFSET_NAME(r9), REG_OFFSET_NAME(r8), #endif REG_OFFSET_NAME(bx), REG_OFFSET_NAME(cx), REG_OFFSET_NAME(dx), REG_OFFSET_NAME(si), REG_OFFSET_NAME(di), REG_OFFSET_NAME(bp), REG_OFFSET_NAME(ax), #ifdef CONFIG_X86_32 REG_OFFSET_NAME(ds), REG_OFFSET_NAME(es), REG_OFFSET_NAME(fs), REG_OFFSET_NAME(gs), #endif REG_OFFSET_NAME(orig_ax), REG_OFFSET_NAME(ip), REG_OFFSET_NAME(cs), REG_OFFSET_NAME(flags), REG_OFFSET_NAME(sp), REG_OFFSET_NAME(ss), REG_OFFSET_END, }; /** * regs_query_register_offset() - query register offset from its name * @name: the name of a register * * regs_query_register_offset() returns the offset of a register in struct * pt_regs from its name. If the name is invalid, this returns -EINVAL; */ int regs_query_register_offset(const char *name) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (!strcmp(roff->name, name)) return roff->offset; return -EINVAL; } /** * regs_query_register_name() - query register name from its offset * @offset: the offset of a register in struct pt_regs. * * regs_query_register_name() returns the name of a register from its * offset in struct pt_regs. If the @offset is invalid, this returns NULL; */ const char *regs_query_register_name(unsigned int offset) { const struct pt_regs_offset *roff; for (roff = regoffset_table; roff->name != NULL; roff++) if (roff->offset == offset) return roff->name; return NULL; } static const int arg_offs_table[] = { #ifdef CONFIG_X86_32 [0] = offsetof(struct pt_regs, ax), [1] = offsetof(struct pt_regs, dx), [2] = offsetof(struct pt_regs, cx) #else /* CONFIG_X86_64 */ [0] = offsetof(struct pt_regs, di), [1] = offsetof(struct pt_regs, si), [2] = offsetof(struct pt_regs, dx), [3] = offsetof(struct pt_regs, cx), [4] = offsetof(struct pt_regs, r8), [5] = offsetof(struct pt_regs, r9) #endif }; /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Determines which flags the user has access to [1 = access, 0 = no access]. */ #define FLAG_MASK_32 ((unsigned long) \ (X86_EFLAGS_CF | X86_EFLAGS_PF | \ X86_EFLAGS_AF | X86_EFLAGS_ZF | \ X86_EFLAGS_SF | X86_EFLAGS_TF | \ X86_EFLAGS_DF | X86_EFLAGS_OF | \ X86_EFLAGS_RF | X86_EFLAGS_AC)) /* * Determines whether a value may be installed in a segment register. */ static inline bool invalid_selector(u16 value) { return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL); } #ifdef CONFIG_X86_32 #define FLAG_MASK FLAG_MASK_32 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) { BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); return &regs->bx + (regno >> 2); } static u16 get_segment_reg(struct task_struct *task, unsigned long offset) { /* * Returning the value truncates it to 16 bits. */ unsigned int retval; if (offset != offsetof(struct user_regs_struct, gs)) retval = *pt_regs_access(task_pt_regs(task), offset); else { if (task == current) retval = get_user_gs(task_pt_regs(task)); else retval = task_user_gs(task); } return retval; } static int set_segment_reg(struct task_struct *task, unsigned long offset, u16 value) { /* * The value argument was already truncated to 16 bits. */ if (invalid_selector(value)) return -EIO; /* * For %cs and %ss we cannot permit a null selector. * We can permit a bogus selector as long as it has USER_RPL. * Null selectors are fine for other segment registers, but * we will never get back to user mode with invalid %cs or %ss * and will take the trap in iret instead. Much code relies * on user_mode() to distinguish a user trap frame (which can * safely use invalid selectors) from a kernel trap frame. */ switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ss): if (unlikely(value == 0)) return -EIO; default: *pt_regs_access(task_pt_regs(task), offset) = value; break; case offsetof(struct user_regs_struct, gs): if (task == current) set_user_gs(task_pt_regs(task), value); else task_user_gs(task) = value; } return 0; } #else /* CONFIG_X86_64 */ #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT) static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset) { BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0); return &regs->r15 + (offset / sizeof(regs->r15)); } static u16 get_segment_reg(struct task_struct *task, unsigned long offset) { /* * Returning the value truncates it to 16 bits. */ unsigned int seg; switch (offset) { case offsetof(struct user_regs_struct, fs): if (task == current) { /* Older gas can't assemble movq %?s,%r?? */ asm("movl %%fs,%0" : "=r" (seg)); return seg; } return task->thread.fsindex; case offsetof(struct user_regs_struct, gs): if (task == current) { asm("movl %%gs,%0" : "=r" (seg)); return seg; } return task->thread.gsindex; case offsetof(struct user_regs_struct, ds): if (task == current) { asm("movl %%ds,%0" : "=r" (seg)); return seg; } return task->thread.ds; case offsetof(struct user_regs_struct, es): if (task == current) { asm("movl %%es,%0" : "=r" (seg)); return seg; } return task->thread.es; case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ss): break; } return *pt_regs_access(task_pt_regs(task), offset); } static int set_segment_reg(struct task_struct *task, unsigned long offset, u16 value) { /* * The value argument was already truncated to 16 bits. */ if (invalid_selector(value)) return -EIO; switch (offset) { case offsetof(struct user_regs_struct,fs): /* * If this is setting fs as for normal 64-bit use but * setting fs_base has implicitly changed it, leave it. */ if ((value == FS_TLS_SEL && task->thread.fsindex == 0 && task->thread.fs != 0) || (value == 0 && task->thread.fsindex == FS_TLS_SEL && task->thread.fs == 0)) break; task->thread.fsindex = value; if (task == current) loadsegment(fs, task->thread.fsindex); break; case offsetof(struct user_regs_struct,gs): /* * If this is setting gs as for normal 64-bit use but * setting gs_base has implicitly changed it, leave it. */ if ((value == GS_TLS_SEL && task->thread.gsindex == 0 && task->thread.gs != 0) || (value == 0 && task->thread.gsindex == GS_TLS_SEL && task->thread.gs == 0)) break; task->thread.gsindex = value; if (task == current) load_gs_index(task->thread.gsindex); break; case offsetof(struct user_regs_struct,ds): task->thread.ds = value; if (task == current) loadsegment(ds, task->thread.ds); break; case offsetof(struct user_regs_struct,es): task->thread.es = value; if (task == current) loadsegment(es, task->thread.es); break; /* * Can't actually change these in 64-bit mode. */ case offsetof(struct user_regs_struct,cs): if (unlikely(value == 0)) return -EIO; #ifdef CONFIG_IA32_EMULATION if (test_tsk_thread_flag(task, TIF_IA32)) task_pt_regs(task)->cs = value; #endif break; case offsetof(struct user_regs_struct,ss): if (unlikely(value == 0)) return -EIO; #ifdef CONFIG_IA32_EMULATION if (test_tsk_thread_flag(task, TIF_IA32)) task_pt_regs(task)->ss = value; #endif break; } return 0; } #endif /* CONFIG_X86_32 */ static unsigned long get_flags(struct task_struct *task) { unsigned long retval = task_pt_regs(task)->flags; /* * If the debugger set TF, hide it from the readout. */ if (test_tsk_thread_flag(task, TIF_FORCED_TF)) retval &= ~X86_EFLAGS_TF; return retval; } static int set_flags(struct task_struct *task, unsigned long value) { struct pt_regs *regs = task_pt_regs(task); /* * If the user value contains TF, mark that * it was not "us" (the debugger) that set it. * If not, make sure it stays set if we had. */ if (value & X86_EFLAGS_TF) clear_tsk_thread_flag(task, TIF_FORCED_TF); else if (test_tsk_thread_flag(task, TIF_FORCED_TF)) value |= X86_EFLAGS_TF; regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK); return 0; } static int putreg(struct task_struct *child, unsigned long offset, unsigned long value) { switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ds): case offsetof(struct user_regs_struct, es): case offsetof(struct user_regs_struct, fs): case offsetof(struct user_regs_struct, gs): case offsetof(struct user_regs_struct, ss): return set_segment_reg(child, offset, value); case offsetof(struct user_regs_struct, flags): return set_flags(child, value); #ifdef CONFIG_X86_64 case offsetof(struct user_regs_struct,fs_base): if (value >= TASK_SIZE_OF(child)) return -EIO; /* * When changing the segment base, use do_arch_prctl * to set either thread.fs or thread.fsindex and the * corresponding GDT slot. */ if (child->thread.fs != value) return do_arch_prctl(child, ARCH_SET_FS, value); return 0; case offsetof(struct user_regs_struct,gs_base): /* * Exactly the same here as the %fs handling above. */ if (value >= TASK_SIZE_OF(child)) return -EIO; if (child->thread.gs != value) return do_arch_prctl(child, ARCH_SET_GS, value); return 0; #endif } *pt_regs_access(task_pt_regs(child), offset) = value; return 0; } static unsigned long getreg(struct task_struct *task, unsigned long offset) { switch (offset) { case offsetof(struct user_regs_struct, cs): case offsetof(struct user_regs_struct, ds): case offsetof(struct user_regs_struct, es): case offsetof(struct user_regs_struct, fs): case offsetof(struct user_regs_struct, gs): case offsetof(struct user_regs_struct, ss): return get_segment_reg(task, offset); case offsetof(struct user_regs_struct, flags): return get_flags(task); #ifdef CONFIG_X86_64 case offsetof(struct user_regs_struct, fs_base): { /* * do_arch_prctl may have used a GDT slot instead of * the MSR. To userland, it appears the same either * way, except the %fs segment selector might not be 0. */ unsigned int seg = task->thread.fsindex; if (task->thread.fs != 0) return task->thread.fs; if (task == current) asm("movl %%fs,%0" : "=r" (seg)); if (seg != FS_TLS_SEL) return 0; return get_desc_base(&task->thread.tls_array[FS_TLS]); } case offsetof(struct user_regs_struct, gs_base): { /* * Exactly the same here as the %fs handling above. */ unsigned int seg = task->thread.gsindex; if (task->thread.gs != 0) return task->thread.gs; if (task == current) asm("movl %%gs,%0" : "=r" (seg)); if (seg != GS_TLS_SEL) return 0; return get_desc_base(&task->thread.tls_array[GS_TLS]); } #endif } return *pt_regs_access(task_pt_regs(task), offset); } static int genregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (kbuf) { unsigned long *k = kbuf; while (count >= sizeof(*k)) { *k++ = getreg(target, pos); count -= sizeof(*k); pos += sizeof(*k); } } else { unsigned long __user *u = ubuf; while (count >= sizeof(*u)) { if (__put_user(getreg(target, pos), u++)) return -EFAULT; count -= sizeof(*u); pos += sizeof(*u); } } return 0; } static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret = 0; if (kbuf) { const unsigned long *k = kbuf; while (count >= sizeof(*k) && !ret) { ret = putreg(target, pos, *k++); count -= sizeof(*k); pos += sizeof(*k); } } else { const unsigned long __user *u = ubuf; while (count >= sizeof(*u) && !ret) { unsigned long word; ret = __get_user(word, u++); if (ret) break; ret = putreg(target, pos, word); count -= sizeof(*u); pos += sizeof(*u); } } return ret; } static void ptrace_triggered(struct perf_event *bp, struct perf_sample_data *data, struct pt_regs *regs) { int i; struct thread_struct *thread = &(current->thread); /* * Store in the virtual DR6 register the fact that the breakpoint * was hit so the thread's debugger will see it. */ for (i = 0; i < HBP_NUM; i++) { if (thread->ptrace_bps[i] == bp) break; } thread->debugreg6 |= (DR_TRAP0 << i); } /* * Walk through every ptrace breakpoints for this thread and * build the dr7 value on top of their attributes. * */ static unsigned long ptrace_get_dr7(struct perf_event *bp[]) { int i; int dr7 = 0; struct arch_hw_breakpoint *info; for (i = 0; i < HBP_NUM; i++) { if (bp[i] && !bp[i]->attr.disabled) { info = counter_arch_bp(bp[i]); dr7 |= encode_dr7(i, info->len, info->type); } } return dr7; } static int ptrace_modify_breakpoint(struct perf_event *bp, int len, int type, struct task_struct *tsk, int disabled) { int err; int gen_len, gen_type; struct perf_event_attr attr; /* * We should have at least an inactive breakpoint at this * slot. It means the user is writing dr7 without having * written the address register first */ if (!bp) return -EINVAL; err = arch_bp_generic_fields(len, type, &gen_len, &gen_type); if (err) return err; attr = bp->attr; attr.bp_len = gen_len; attr.bp_type = gen_type; attr.disabled = disabled; return modify_user_hw_breakpoint(bp, &attr); } /* * Handle ptrace writes to debug register 7. */ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data) { struct thread_struct *thread = &(tsk->thread); unsigned long old_dr7; int i, orig_ret = 0, rc = 0; int enabled, second_pass = 0; unsigned len, type; struct perf_event *bp; if (ptrace_get_breakpoints(tsk) < 0) return -ESRCH; data &= ~DR_CONTROL_RESERVED; old_dr7 = ptrace_get_dr7(thread->ptrace_bps); restore: /* * Loop through all the hardware breakpoints, making the * appropriate changes to each. */ for (i = 0; i < HBP_NUM; i++) { enabled = decode_dr7(data, i, &len, &type); bp = thread->ptrace_bps[i]; if (!enabled) { if (bp) { /* * Don't unregister the breakpoints right-away, * unless all register_user_hw_breakpoint() * requests have succeeded. This prevents * any window of opportunity for debug * register grabbing by other users. */ if (!second_pass) continue; rc = ptrace_modify_breakpoint(bp, len, type, tsk, 1); if (rc) break; } continue; } rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0); if (rc) break; } /* * Make a second pass to free the remaining unused breakpoints * or to restore the original breakpoints if an error occurred. */ if (!second_pass) { second_pass = 1; if (rc < 0) { orig_ret = rc; data = old_dr7; } goto restore; } ptrace_put_breakpoints(tsk); return ((orig_ret < 0) ? orig_ret : rc); } /* * Handle PTRACE_PEEKUSR calls for the debug register area. */ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n) { struct thread_struct *thread = &(tsk->thread); unsigned long val = 0; if (n < HBP_NUM) { struct perf_event *bp; if (ptrace_get_breakpoints(tsk) < 0) return -ESRCH; bp = thread->ptrace_bps[n]; if (!bp) val = 0; else val = bp->hw.info.address; ptrace_put_breakpoints(tsk); } else if (n == 6) { val = thread->debugreg6; } else if (n == 7) { val = thread->ptrace_dr7; } return val; } static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr, unsigned long addr) { struct perf_event *bp; struct thread_struct *t = &tsk->thread; struct perf_event_attr attr; int err = 0; if (ptrace_get_breakpoints(tsk) < 0) return -ESRCH; if (!t->ptrace_bps[nr]) { ptrace_breakpoint_init(&attr); /* * Put stub len and type to register (reserve) an inactive but * correct bp */ attr.bp_addr = addr; attr.bp_len = HW_BREAKPOINT_LEN_1; attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; bp = register_user_hw_breakpoint(&attr, ptrace_triggered, NULL, tsk); /* * CHECKME: the previous code returned -EIO if the addr wasn't * a valid task virtual addr. The new one will return -EINVAL in * this case. * -EINVAL may be what we want for in-kernel breakpoints users, * but -EIO looks better for ptrace, since we refuse a register * writing for the user. And anyway this is the previous * behaviour. */ if (IS_ERR(bp)) { err = PTR_ERR(bp); goto put; } t->ptrace_bps[nr] = bp; } else { bp = t->ptrace_bps[nr]; attr = bp->attr; attr.bp_addr = addr; err = modify_user_hw_breakpoint(bp, &attr); } put: ptrace_put_breakpoints(tsk); return err; } /* * Handle PTRACE_POKEUSR calls for the debug register area. */ static int ptrace_set_debugreg(struct task_struct *tsk, int n, unsigned long val) { struct thread_struct *thread = &(tsk->thread); int rc = 0; /* There are no DR4 or DR5 registers */ if (n == 4 || n == 5) return -EIO; if (n == 6) { thread->debugreg6 = val; goto ret_path; } if (n < HBP_NUM) { rc = ptrace_set_breakpoint_addr(tsk, n, val); if (rc) return rc; } /* All that's left is DR7 */ if (n == 7) { rc = ptrace_write_dr7(tsk, val); if (!rc) thread->ptrace_dr7 = val; } ret_path: return rc; } /* * These access the current or another (stopped) task's io permission * bitmap for debugging or core dump. */ static int ioperm_active(struct task_struct *target, const struct user_regset *regset) { return target->thread.io_bitmap_max / regset->size; } static int ioperm_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (!target->thread.io_bitmap_ptr) return -ENXIO; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, target->thread.io_bitmap_ptr, 0, IO_BITMAP_BYTES); } /* * Called by kernel/ptrace.c when detaching.. * * Make sure the single step bit is not set. */ void ptrace_disable(struct task_struct *child) { user_disable_single_step(child); #ifdef TIF_SYSCALL_EMU clear_tsk_thread_flag(child, TIF_SYSCALL_EMU); #endif } #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION static const struct user_regset_view user_x86_32_view; /* Initialized below. */ #endif long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *)data; switch (request) { /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long tmp; ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break; tmp = 0; /* Default return condition */ if (addr < sizeof(struct user_regs_struct)) tmp = getreg(child, addr); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); tmp = ptrace_get_debugreg(child, addr / sizeof(data)); } ret = put_user(tmp, datap); break; } case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user)) break; if (addr < sizeof(struct user_regs_struct)) ret = putreg(child, addr, data); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); ret = ptrace_set_debugreg(child, addr / sizeof(data), data); } break; case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); #ifdef CONFIG_X86_32 case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user_fxsr_struct), datap) ? -EIO : 0; case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ return copy_regset_from_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user_fxsr_struct), datap) ? -EIO : 0; #endif #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION case PTRACE_GET_THREAD_AREA: if ((int) addr < 0) return -EIO; ret = do_get_thread_area(child, addr, (struct user_desc __user *)data); break; case PTRACE_SET_THREAD_AREA: if ((int) addr < 0) return -EIO; ret = do_set_thread_area(child, addr, (struct user_desc __user *)data, 0); break; #endif #ifdef CONFIG_X86_64 /* normal 64bit interface to access TLS data. Works just like arch_prctl, except that the arguments are reversed. */ case PTRACE_ARCH_PRCTL: ret = do_arch_prctl(child, data, addr); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } #ifdef CONFIG_IA32_EMULATION #include <linux/compat.h> #include <linux/syscalls.h> #include <asm/ia32.h> #include <asm/user32.h> #define R32(l,q) \ case offsetof(struct user32, regs.l): \ regs->q = value; break #define SEG32(rs) \ case offsetof(struct user32, regs.rs): \ return set_segment_reg(child, \ offsetof(struct user_regs_struct, rs), \ value); \ break static int putreg32(struct task_struct *child, unsigned regno, u32 value) { struct pt_regs *regs = task_pt_regs(child); switch (regno) { SEG32(cs); SEG32(ds); SEG32(es); SEG32(fs); SEG32(gs); SEG32(ss); R32(ebx, bx); R32(ecx, cx); R32(edx, dx); R32(edi, di); R32(esi, si); R32(ebp, bp); R32(eax, ax); R32(eip, ip); R32(esp, sp); case offsetof(struct user32, regs.orig_eax): /* * A 32-bit debugger setting orig_eax means to restore * the state of the task restarting a 32-bit syscall. * Make sure we interpret the -ERESTART* codes correctly * in case the task is not actually still sitting at the * exit from a 32-bit syscall with TS_COMPAT still set. */ regs->orig_ax = value; if (syscall_get_nr(child, regs) >= 0) task_thread_info(child)->status |= TS_COMPAT; break; case offsetof(struct user32, regs.eflags): return set_flags(child, value); case offsetof(struct user32, u_debugreg[0]) ... offsetof(struct user32, u_debugreg[7]): regno -= offsetof(struct user32, u_debugreg[0]); return ptrace_set_debugreg(child, regno / 4, value); default: if (regno > sizeof(struct user32) || (regno & 3)) return -EIO; /* * Other dummy fields in the virtual user structure * are ignored */ break; } return 0; } #undef R32 #undef SEG32 #define R32(l,q) \ case offsetof(struct user32, regs.l): \ *val = regs->q; break #define SEG32(rs) \ case offsetof(struct user32, regs.rs): \ *val = get_segment_reg(child, \ offsetof(struct user_regs_struct, rs)); \ break static int getreg32(struct task_struct *child, unsigned regno, u32 *val) { struct pt_regs *regs = task_pt_regs(child); switch (regno) { SEG32(ds); SEG32(es); SEG32(fs); SEG32(gs); R32(cs, cs); R32(ss, ss); R32(ebx, bx); R32(ecx, cx); R32(edx, dx); R32(edi, di); R32(esi, si); R32(ebp, bp); R32(eax, ax); R32(orig_eax, orig_ax); R32(eip, ip); R32(esp, sp); case offsetof(struct user32, regs.eflags): *val = get_flags(child); break; case offsetof(struct user32, u_debugreg[0]) ... offsetof(struct user32, u_debugreg[7]): regno -= offsetof(struct user32, u_debugreg[0]); *val = ptrace_get_debugreg(child, regno / 4); break; default: if (regno > sizeof(struct user32) || (regno & 3)) return -EIO; /* * Other dummy fields in the virtual user structure * are ignored */ *val = 0; break; } return 0; } #undef R32 #undef SEG32 static int genregs32_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { if (kbuf) { compat_ulong_t *k = kbuf; while (count >= sizeof(*k)) { getreg32(target, pos, k++); count -= sizeof(*k); pos += sizeof(*k); } } else { compat_ulong_t __user *u = ubuf; while (count >= sizeof(*u)) { compat_ulong_t word; getreg32(target, pos, &word); if (__put_user(word, u++)) return -EFAULT; count -= sizeof(*u); pos += sizeof(*u); } } return 0; } static int genregs32_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret = 0; if (kbuf) { const compat_ulong_t *k = kbuf; while (count >= sizeof(*k) && !ret) { ret = putreg32(target, pos, *k++); count -= sizeof(*k); pos += sizeof(*k); } } else { const compat_ulong_t __user *u = ubuf; while (count >= sizeof(*u) && !ret) { compat_ulong_t word; ret = __get_user(word, u++); if (ret) break; ret = putreg32(target, pos, word); count -= sizeof(*u); pos += sizeof(*u); } } return ret; } #ifdef CONFIG_X86_X32_ABI static long x32_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; void __user *datap = compat_ptr(data); int ret; switch (request) { /* Read 32bits at location addr in the USER area. Only allow to return the lower 32bits of segment and debug registers. */ case PTRACE_PEEKUSR: { u32 tmp; ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || addr < offsetof(struct user_regs_struct, cs)) break; tmp = 0; /* Default return condition */ if (addr < sizeof(struct user_regs_struct)) tmp = getreg(child, addr); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); tmp = ptrace_get_debugreg(child, addr / sizeof(data)); } ret = put_user(tmp, (__u32 __user *)datap); break; } /* Write the word at location addr in the USER area. Only allow to update segment and debug registers with the upper 32bits zero-extended. */ case PTRACE_POKEUSR: ret = -EIO; if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) || addr < offsetof(struct user_regs_struct, cs)) break; if (addr < sizeof(struct user_regs_struct)) ret = putreg(child, addr, data); else if (addr >= offsetof(struct user, u_debugreg[0]) && addr <= offsetof(struct user, u_debugreg[7])) { addr -= offsetof(struct user, u_debugreg[0]); ret = ptrace_set_debugreg(child, addr / sizeof(data), data); } break; case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_GENERAL, 0, sizeof(struct user_regs_struct), datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user(child, task_user_regset_view(current), REGSET_FP, 0, sizeof(struct user_i387_struct), datap); /* normal 64bit interface to access TLS data. Works just like arch_prctl, except that the arguments are reversed. */ case PTRACE_ARCH_PRCTL: return do_arch_prctl(child, data, addr); default: return compat_ptrace_request(child, request, addr, data); } return ret; } #endif long compat_arch_ptrace(struct task_struct *child, compat_long_t request, compat_ulong_t caddr, compat_ulong_t cdata) { unsigned long addr = caddr; unsigned long data = cdata; void __user *datap = compat_ptr(data); int ret; __u32 val; #ifdef CONFIG_X86_X32_ABI if (!is_ia32_task()) return x32_arch_ptrace(child, request, caddr, cdata); #endif switch (request) { case PTRACE_PEEKUSR: ret = getreg32(child, addr, &val); if (ret == 0) ret = put_user(val, (__u32 __user *)datap); break; case PTRACE_POKEUSR: ret = putreg32(child, addr, data); break; case PTRACE_GETREGS: /* Get all gp regs from the child. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_GENERAL, 0, sizeof(struct user_regs_struct32), datap); case PTRACE_SETREGS: /* Set all gp regs in the child. */ return copy_regset_from_user(child, &user_x86_32_view, REGSET_GENERAL, 0, sizeof(struct user_regs_struct32), datap); case PTRACE_GETFPREGS: /* Get the child FPU state. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_FP, 0, sizeof(struct user_i387_ia32_struct), datap); case PTRACE_SETFPREGS: /* Set the child FPU state. */ return copy_regset_from_user( child, &user_x86_32_view, REGSET_FP, 0, sizeof(struct user_i387_ia32_struct), datap); case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */ return copy_regset_to_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user32_fxsr_struct), datap); case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */ return copy_regset_from_user(child, &user_x86_32_view, REGSET_XFP, 0, sizeof(struct user32_fxsr_struct), datap); case PTRACE_GET_THREAD_AREA: case PTRACE_SET_THREAD_AREA: return arch_ptrace(child, request, addr, data); default: return compat_ptrace_request(child, request, addr, data); } return ret; } #endif /* CONFIG_IA32_EMULATION */ #ifdef CONFIG_X86_64 static struct user_regset x86_64_regsets[] __read_mostly = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_regs_struct) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .get = genregs_get, .set = genregs_set }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = sizeof(struct user_i387_struct) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set }, [REGSET_XSTATE] = { .core_note_type = NT_X86_XSTATE, .size = sizeof(u64), .align = sizeof(u64), .active = xstateregs_active, .get = xstateregs_get, .set = xstateregs_set }, [REGSET_IOPERM64] = { .core_note_type = NT_386_IOPERM, .n = IO_BITMAP_LONGS, .size = sizeof(long), .align = sizeof(long), .active = ioperm_active, .get = ioperm_get }, }; static const struct user_regset_view user_x86_64_view = { .name = "x86_64", .e_machine = EM_X86_64, .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets) }; #else /* CONFIG_X86_32 */ #define user_regs_struct32 user_regs_struct #define genregs32_get genregs_get #define genregs32_set genregs_set #define user_i387_ia32_struct user_i387_struct #define user32_fxsr_struct user_fxsr_struct #endif /* CONFIG_X86_64 */ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION static struct user_regset x86_32_regsets[] __read_mostly = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct user_regs_struct32) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .get = genregs32_get, .set = genregs32_set }, [REGSET_FP] = { .core_note_type = NT_PRFPREG, .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = fpregs_active, .get = fpregs_get, .set = fpregs_set }, [REGSET_XFP] = { .core_note_type = NT_PRXFPREG, .n = sizeof(struct user32_fxsr_struct) / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set }, [REGSET_XSTATE] = { .core_note_type = NT_X86_XSTATE, .size = sizeof(u64), .align = sizeof(u64), .active = xstateregs_active, .get = xstateregs_get, .set = xstateregs_set }, [REGSET_TLS] = { .core_note_type = NT_386_TLS, .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN, .size = sizeof(struct user_desc), .align = sizeof(struct user_desc), .active = regset_tls_active, .get = regset_tls_get, .set = regset_tls_set }, [REGSET_IOPERM32] = { .core_note_type = NT_386_IOPERM, .n = IO_BITMAP_BYTES / sizeof(u32), .size = sizeof(u32), .align = sizeof(u32), .active = ioperm_active, .get = ioperm_get }, }; static const struct user_regset_view user_x86_32_view = { .name = "i386", .e_machine = EM_386, .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets) }; #endif /* * This represents bytes 464..511 in the memory layout exported through * the REGSET_XSTATE interface. */ u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; void update_regset_xstate_info(unsigned int size, u64 xstate_mask) { #ifdef CONFIG_X86_64 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64); #endif #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64); #endif xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask; } const struct user_regset_view *task_user_regset_view(struct task_struct *task) { #ifdef CONFIG_IA32_EMULATION if (test_tsk_thread_flag(task, TIF_IA32)) #endif #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION return &user_x86_32_view; #endif #ifdef CONFIG_X86_64 return &user_x86_64_view; #endif } static void fill_sigtrap_info(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code, struct siginfo *info) { tsk->thread.trap_nr = X86_TRAP_DB; tsk->thread.error_code = error_code; memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; } void user_single_step_siginfo(struct task_struct *tsk, struct pt_regs *regs, struct siginfo *info) { fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info); } void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code, int si_code) { struct siginfo info; fill_sigtrap_info(tsk, regs, error_code, si_code, &info); /* Send us the fake SIGTRAP */ force_sig_info(SIGTRAP, &info, tsk); } #ifdef CONFIG_X86_32 # define IS_IA32 1 #elif defined CONFIG_IA32_EMULATION # define IS_IA32 is_compat_task() #else # define IS_IA32 0 #endif /* * We must return the syscall number to actually look up in the table. * This can be -1L to skip running any syscall at all. */ long syscall_trace_enter(struct pt_regs *regs) { long ret = 0; /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. * If user-mode had set TF itself, then it's still clear from * do_debug() and we need to set it again to restore the user * state. If we entered on the slow path, TF was already set. */ if (test_thread_flag(TIF_SINGLESTEP)) regs->flags |= X86_EFLAGS_TF; /* do the secure computing check first */ secure_computing(regs->orig_ax); if (unlikely(test_thread_flag(TIF_SYSCALL_EMU))) ret = -1L; if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) && tracehook_report_syscall_entry(regs)) ret = -1L; if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->orig_ax); if (IS_IA32) audit_syscall_entry(AUDIT_ARCH_I386, regs->orig_ax, regs->bx, regs->cx, regs->dx, regs->si); #ifdef CONFIG_X86_64 else audit_syscall_entry(AUDIT_ARCH_X86_64, regs->orig_ax, regs->di, regs->si, regs->dx, regs->r10); #endif return ret ?: regs->orig_ax; } void syscall_trace_leave(struct pt_regs *regs) { bool step; audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_exit(regs, regs->ax); /* * If TIF_SYSCALL_EMU is set, we only get here because of * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). * We already reported this syscall instruction in * syscall_trace_enter(). */ step = unlikely(test_thread_flag(TIF_SINGLESTEP)) && !test_thread_flag(TIF_SYSCALL_EMU); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); }
gpl-2.0
shesselba/linux-berlin
sound/soc/fsl/phycore-ac97.c
2342
3392
/* * phycore-ac97.c -- SoC audio for imx_phycore in AC97 mode * * Copyright 2009 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/i2c.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include "imx-audmux.h" static struct snd_soc_card imx_phycore; static struct snd_soc_ops imx_phycore_hifi_ops = { }; static struct snd_soc_dai_link imx_phycore_dai_ac97[] = { { .name = "HiFi", .stream_name = "HiFi", .codec_dai_name = "wm9712-hifi", .codec_name = "wm9712-codec", .cpu_dai_name = "imx-ssi.0", .platform_name = "imx-ssi.0", .ops = &imx_phycore_hifi_ops, }, }; static struct snd_soc_card imx_phycore = { .name = "PhyCORE-ac97-audio", .owner = THIS_MODULE, .dai_link = imx_phycore_dai_ac97, .num_links = ARRAY_SIZE(imx_phycore_dai_ac97), }; static struct platform_device *imx_phycore_snd_ac97_device; static struct platform_device *imx_phycore_snd_device; static int __init imx_phycore_init(void) { int ret; if (machine_is_pca100()) { imx_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0, IMX_AUDMUX_V1_PCR_SYN | /* 4wire mode */ IMX_AUDMUX_V1_PCR_TFCSEL(3) | IMX_AUDMUX_V1_PCR_TCLKDIR | /* clock is output */ IMX_AUDMUX_V1_PCR_RXDSEL(3)); imx_audmux_v1_configure_port(3, IMX_AUDMUX_V1_PCR_SYN | /* 4wire mode */ IMX_AUDMUX_V1_PCR_TFCSEL(0) | IMX_AUDMUX_V1_PCR_TFSDIR | IMX_AUDMUX_V1_PCR_RXDSEL(0)); } else if (machine_is_pcm043()) { imx_audmux_v2_configure_port(3, IMX_AUDMUX_V2_PTCR_SYN | /* 4wire mode */ IMX_AUDMUX_V2_PTCR_TFSEL(0) | IMX_AUDMUX_V2_PTCR_TFSDIR, IMX_AUDMUX_V2_PDCR_RXDSEL(0)); imx_audmux_v2_configure_port(0, IMX_AUDMUX_V2_PTCR_SYN | /* 4wire mode */ IMX_AUDMUX_V2_PTCR_TCSEL(3) | IMX_AUDMUX_V2_PTCR_TCLKDIR, /* clock is output */ IMX_AUDMUX_V2_PDCR_RXDSEL(3)); } else { /* return happy. We might run on a totally different machine */ return 0; } imx_phycore_snd_ac97_device = platform_device_alloc("soc-audio", -1); if (!imx_phycore_snd_ac97_device) return -ENOMEM; platform_set_drvdata(imx_phycore_snd_ac97_device, &imx_phycore); ret = platform_device_add(imx_phycore_snd_ac97_device); if (ret) goto fail1; imx_phycore_snd_device = platform_device_alloc("wm9712-codec", -1); if (!imx_phycore_snd_device) { ret = -ENOMEM; goto fail2; } ret = platform_device_add(imx_phycore_snd_device); if (ret) { printk(KERN_ERR "ASoC: Platform device allocation failed\n"); goto fail3; } return 0; fail3: platform_device_put(imx_phycore_snd_device); fail2: platform_device_del(imx_phycore_snd_ac97_device); fail1: platform_device_put(imx_phycore_snd_ac97_device); return ret; } static void __exit imx_phycore_exit(void) { platform_device_unregister(imx_phycore_snd_device); platform_device_unregister(imx_phycore_snd_ac97_device); } late_initcall(imx_phycore_init); module_exit(imx_phycore_exit); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("PhyCORE ALSA SoC driver"); MODULE_LICENSE("GPL");
gpl-2.0
mdeejay/kernel_huawei_omap4
drivers/staging/iio/resolver/ad2s120x.c
2342
7432
/* * ad2s120x.c simple support for the ADI Resolver to Digital Converters: AD2S1200/1205 * * Copyright (c) 2010-2010 Analog Devices Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/types.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/gpio.h> #include "../iio.h" #include "../sysfs.h" #define DRV_NAME "ad2s120x" /* input pin sample and rdvel is controlled by driver */ #define AD2S120X_PN 2 /* input clock on serial interface */ #define AD2S120X_HZ 8192000 /* clock period in nano second */ #define AD2S120X_TSCLK (1000000000/AD2S120X_HZ) struct ad2s120x_state { struct mutex lock; struct iio_dev *idev; struct spi_device *sdev; unsigned short sample; unsigned short rdvel; u8 rx[2]; u8 tx[2]; }; static ssize_t ad2s120x_show_pos_vel(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; u16 pos; s16 vel; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len = sprintf(buf, "%d %c%c%c%c ", pos, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); /* delay 18 ns */ /* ndelay(18); */ gpio_set_value(st->rdvel, 0); /* ndelay(5);*/ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; vel = (st->rx[0] & 0x80) ? 0xf000 : 0; vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len += sprintf(buf + len, "%d %c%c%c%c\n", vel, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: gpio_set_value(st->rdvel, 1); /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t ad2s120x_show_pos(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; u16 pos; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); gpio_set_value(st->rdvel, 1); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; pos = (((u16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len = sprintf(buf, "%d %c%c%c%c ", pos, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static ssize_t ad2s120x_show_vel(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_message msg; struct spi_transfer xfer; int ret = 0; ssize_t len = 0; s16 vel; u8 status; struct iio_dev *idev = dev_get_drvdata(dev); struct ad2s120x_state *st = idev->dev_data; xfer.len = 1; xfer.tx_buf = st->tx; xfer.rx_buf = st->rx; mutex_lock(&st->lock); gpio_set_value(st->sample, 0); /* delay (6 * AD2S120X_TSCLK + 20) nano seconds */ udelay(1); gpio_set_value(st->sample, 1); gpio_set_value(st->rdvel, 0); /* ndelay(5);*/ spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); ret = spi_sync(st->sdev, &msg); if (ret) goto error_ret; status = st->rx[1]; vel = (st->rx[0] & 0x80) ? 0xf000 : 0; vel |= (((s16)(st->rx[0])) << 4) | ((st->rx[1] & 0xF0) >> 4); len += sprintf(buf + len, "%d %c%c%c%c\n", vel, (status & 0x8) ? 'P' : 'V', (status & 0x4) ? 'd' : '_', (status & 0x2) ? 'l' : '_', (status & 0x1) ? '1' : '0'); error_ret: gpio_set_value(st->rdvel, 1); /* delay (2 * AD2S120X_TSCLK + 20) ns for sample pulse */ udelay(1); mutex_unlock(&st->lock); return ret ? ret : len; } static IIO_CONST_ATTR(description, "12-Bit R/D Converter with Reference Oscillator"); static IIO_DEVICE_ATTR(pos_vel, S_IRUGO, ad2s120x_show_pos_vel, NULL, 0); static IIO_DEVICE_ATTR(pos, S_IRUGO, ad2s120x_show_pos, NULL, 0); static IIO_DEVICE_ATTR(vel, S_IRUGO, ad2s120x_show_vel, NULL, 0); static struct attribute *ad2s120x_attributes[] = { &iio_const_attr_description.dev_attr.attr, &iio_dev_attr_pos_vel.dev_attr.attr, &iio_dev_attr_pos.dev_attr.attr, &iio_dev_attr_vel.dev_attr.attr, NULL, }; static const struct attribute_group ad2s120x_attribute_group = { .attrs = ad2s120x_attributes, }; static const struct iio_info ad2s120x_info = { .attrs = &ad2s120x_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad2s120x_probe(struct spi_device *spi) { struct ad2s120x_state *st; int pn, ret = 0; unsigned short *pins = spi->dev.platform_data; for (pn = 0; pn < AD2S120X_PN; pn++) { if (gpio_request(pins[pn], DRV_NAME)) { pr_err("%s: request gpio pin %d failed\n", DRV_NAME, pins[pn]); goto error_ret; } gpio_direction_output(pins[pn], 1); } st = kzalloc(sizeof(*st), GFP_KERNEL); if (st == NULL) { ret = -ENOMEM; goto error_ret; } spi_set_drvdata(spi, st); mutex_init(&st->lock); st->sdev = spi; st->sample = pins[0]; st->rdvel = pins[1]; st->idev = iio_allocate_device(0); if (st->idev == NULL) { ret = -ENOMEM; goto error_free_st; } st->idev->dev.parent = &spi->dev; st->idev->info = &ad2s120x_info; st->idev->dev_data = (void *)(st); st->idev->modes = INDIO_DIRECT_MODE; ret = iio_device_register(st->idev); if (ret) goto error_free_dev; spi->max_speed_hz = AD2S120X_HZ; spi->mode = SPI_MODE_3; spi_setup(spi); return 0; error_free_dev: iio_free_device(st->idev); error_free_st: kfree(st); error_ret: for (--pn; pn >= 0; pn--) gpio_free(pins[pn]); return ret; } static int __devexit ad2s120x_remove(struct spi_device *spi) { struct ad2s120x_state *st = spi_get_drvdata(spi); iio_device_unregister(st->idev); kfree(st); return 0; } static struct spi_driver ad2s120x_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, }, .probe = ad2s120x_probe, .remove = __devexit_p(ad2s120x_remove), }; static __init int ad2s120x_spi_init(void) { return spi_register_driver(&ad2s120x_driver); } module_init(ad2s120x_spi_init); static __exit void ad2s120x_spi_exit(void) { spi_unregister_driver(&ad2s120x_driver); } module_exit(ad2s120x_spi_exit); MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>"); MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
Muyiafan/android_kernel_oneplus_msm8994
sound/soc/davinci/davinci-i2s.c
2598
23796
/* * ALSA SoC I2S (McBSP) Audio Layer for TI DAVINCI processor * * Author: Vladimir Barinov, <vbarinov@embeddedalley.com> * Copyright: (C) 2007 MontaVista Software, Inc., <source@mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/platform_data/davinci_asp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include "davinci-pcm.h" #include "davinci-i2s.h" /* * NOTE: terminology here is confusing. * * - This driver supports the "Audio Serial Port" (ASP), * found on dm6446, dm355, and other DaVinci chips. * * - But it labels it a "Multi-channel Buffered Serial Port" * (McBSP) as on older chips like the dm642 ... which was * backward-compatible, possibly explaining that confusion. * * - OMAP chips have a controller called McBSP, which is * incompatible with the DaVinci flavor of McBSP. * * - Newer DaVinci chips have a controller called McASP, * incompatible with ASP and with either McBSP. * * In short: this uses ASP to implement I2S, not McBSP. * And it won't be the only DaVinci implemention of I2S. */ #define DAVINCI_MCBSP_DRR_REG 0x00 #define DAVINCI_MCBSP_DXR_REG 0x04 #define DAVINCI_MCBSP_SPCR_REG 0x08 #define DAVINCI_MCBSP_RCR_REG 0x0c #define DAVINCI_MCBSP_XCR_REG 0x10 #define DAVINCI_MCBSP_SRGR_REG 0x14 #define DAVINCI_MCBSP_PCR_REG 0x24 #define DAVINCI_MCBSP_SPCR_RRST (1 << 0) #define DAVINCI_MCBSP_SPCR_RINTM(v) ((v) << 4) #define DAVINCI_MCBSP_SPCR_XRST (1 << 16) #define DAVINCI_MCBSP_SPCR_XINTM(v) ((v) << 20) #define DAVINCI_MCBSP_SPCR_GRST (1 << 22) #define DAVINCI_MCBSP_SPCR_FRST (1 << 23) #define DAVINCI_MCBSP_SPCR_FREE (1 << 25) #define DAVINCI_MCBSP_RCR_RWDLEN1(v) ((v) << 5) #define DAVINCI_MCBSP_RCR_RFRLEN1(v) ((v) << 8) #define DAVINCI_MCBSP_RCR_RDATDLY(v) ((v) << 16) #define DAVINCI_MCBSP_RCR_RFIG (1 << 18) #define DAVINCI_MCBSP_RCR_RWDLEN2(v) ((v) << 21) #define DAVINCI_MCBSP_RCR_RFRLEN2(v) ((v) << 24) #define DAVINCI_MCBSP_RCR_RPHASE BIT(31) #define DAVINCI_MCBSP_XCR_XWDLEN1(v) ((v) << 5) #define DAVINCI_MCBSP_XCR_XFRLEN1(v) ((v) << 8) #define DAVINCI_MCBSP_XCR_XDATDLY(v) ((v) << 16) #define DAVINCI_MCBSP_XCR_XFIG (1 << 18) #define DAVINCI_MCBSP_XCR_XWDLEN2(v) ((v) << 21) #define DAVINCI_MCBSP_XCR_XFRLEN2(v) ((v) << 24) #define DAVINCI_MCBSP_XCR_XPHASE BIT(31) #define DAVINCI_MCBSP_SRGR_FWID(v) ((v) << 8) #define DAVINCI_MCBSP_SRGR_FPER(v) ((v) << 16) #define DAVINCI_MCBSP_SRGR_FSGM (1 << 28) #define DAVINCI_MCBSP_SRGR_CLKSM BIT(29) #define DAVINCI_MCBSP_PCR_CLKRP (1 << 0) #define DAVINCI_MCBSP_PCR_CLKXP (1 << 1) #define DAVINCI_MCBSP_PCR_FSRP (1 << 2) #define DAVINCI_MCBSP_PCR_FSXP (1 << 3) #define DAVINCI_MCBSP_PCR_SCLKME (1 << 7) #define DAVINCI_MCBSP_PCR_CLKRM (1 << 8) #define DAVINCI_MCBSP_PCR_CLKXM (1 << 9) #define DAVINCI_MCBSP_PCR_FSRM (1 << 10) #define DAVINCI_MCBSP_PCR_FSXM (1 << 11) enum { DAVINCI_MCBSP_WORD_8 = 0, DAVINCI_MCBSP_WORD_12, DAVINCI_MCBSP_WORD_16, DAVINCI_MCBSP_WORD_20, DAVINCI_MCBSP_WORD_24, DAVINCI_MCBSP_WORD_32, }; static const unsigned char data_type[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = 1, [SNDRV_PCM_FORMAT_S16_LE] = 2, [SNDRV_PCM_FORMAT_S32_LE] = 4, }; static const unsigned char asp_word_length[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = DAVINCI_MCBSP_WORD_8, [SNDRV_PCM_FORMAT_S16_LE] = DAVINCI_MCBSP_WORD_16, [SNDRV_PCM_FORMAT_S32_LE] = DAVINCI_MCBSP_WORD_32, }; static const unsigned char double_fmt[SNDRV_PCM_FORMAT_S32_LE + 1] = { [SNDRV_PCM_FORMAT_S8] = SNDRV_PCM_FORMAT_S16_LE, [SNDRV_PCM_FORMAT_S16_LE] = SNDRV_PCM_FORMAT_S32_LE, }; struct davinci_mcbsp_dev { struct device *dev; struct davinci_pcm_dma_params dma_params[2]; void __iomem *base; #define MOD_DSP_A 0 #define MOD_DSP_B 1 int mode; u32 pcr; struct clk *clk; /* * Combining both channels into 1 element will at least double the * amount of time between servicing the dma channel, increase * effiency, and reduce the chance of overrun/underrun. But, * it will result in the left & right channels being swapped. * * If relabeling the left and right channels is not possible, * you may want to let the codec know to swap them back. * * It may allow x10 the amount of time to service dma requests, * if the codec is master and is using an unnecessarily fast bit clock * (ie. tlvaic23b), independent of the sample rate. So, having an * entire frame at once means it can be serviced at the sample rate * instead of the bit clock rate. * * In the now unlikely case that an underrun still * occurs, both the left and right samples will be repeated * so that no pops are heard, and the left and right channels * won't end up being swapped because of the underrun. */ unsigned enable_channel_combine:1; unsigned int fmt; int clk_div; int clk_input_pin; bool i2s_accurate_sck; }; static inline void davinci_mcbsp_write_reg(struct davinci_mcbsp_dev *dev, int reg, u32 val) { __raw_writel(val, dev->base + reg); } static inline u32 davinci_mcbsp_read_reg(struct davinci_mcbsp_dev *dev, int reg) { return __raw_readl(dev->base + reg); } static void toggle_clock(struct davinci_mcbsp_dev *dev, int playback) { u32 m = playback ? DAVINCI_MCBSP_PCR_CLKXP : DAVINCI_MCBSP_PCR_CLKRP; /* The clock needs to toggle to complete reset. * So, fake it by toggling the clk polarity. */ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr ^ m); davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, dev->pcr); } static void davinci_mcbsp_start(struct davinci_mcbsp_dev *dev, struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_platform *platform = rtd->platform; int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); u32 spcr; u32 mask = playback ? DAVINCI_MCBSP_SPCR_XRST : DAVINCI_MCBSP_SPCR_RRST; spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); if (spcr & mask) { /* start off disabled */ davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr & ~mask); toggle_clock(dev, playback); } if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM)) { /* Start the sample generator */ spcr |= DAVINCI_MCBSP_SPCR_GRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } if (playback) { /* Stop the DMA to avoid data loss */ /* while the transmitter is out of reset to handle XSYNCERR */ if (platform->driver->ops->trigger) { int ret = platform->driver->ops->trigger(substream, SNDRV_PCM_TRIGGER_STOP); if (ret < 0) printk(KERN_DEBUG "Playback DMA stop failed\n"); } /* Enable the transmitter */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr |= DAVINCI_MCBSP_SPCR_XRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); /* wait for any unexpected frame sync error to occur */ udelay(100); /* Disable the transmitter to clear any outstanding XSYNCERR */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr &= ~DAVINCI_MCBSP_SPCR_XRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); toggle_clock(dev, playback); /* Restart the DMA */ if (platform->driver->ops->trigger) { int ret = platform->driver->ops->trigger(substream, SNDRV_PCM_TRIGGER_START); if (ret < 0) printk(KERN_DEBUG "Playback DMA start failed\n"); } } /* Enable transmitter or receiver */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr |= mask; if (dev->pcr & (DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM)) { /* Start frame sync */ spcr |= DAVINCI_MCBSP_SPCR_FRST; } davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } static void davinci_mcbsp_stop(struct davinci_mcbsp_dev *dev, int playback) { u32 spcr; /* Reset transmitter/receiver and sample rate/frame sync generators */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); spcr &= ~(DAVINCI_MCBSP_SPCR_GRST | DAVINCI_MCBSP_SPCR_FRST); spcr &= playback ? ~DAVINCI_MCBSP_SPCR_XRST : ~DAVINCI_MCBSP_SPCR_RRST; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); toggle_clock(dev, playback); } #define DEFAULT_BITPERSAMPLE 16 static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); unsigned int pcr; unsigned int srgr; bool inv_fs = false; /* Attention srgr is updated by hw_params! */ srgr = DAVINCI_MCBSP_SRGR_FSGM | DAVINCI_MCBSP_SRGR_FPER(DEFAULT_BITPERSAMPLE * 2 - 1) | DAVINCI_MCBSP_SRGR_FWID(DEFAULT_BITPERSAMPLE - 1); dev->fmt = fmt; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* cpu is master */ pcr = DAVINCI_MCBSP_PCR_FSXM | DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM; break; case SND_SOC_DAIFMT_CBM_CFS: pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM; /* * Selection of the clock input pin that is the * input for the Sample Rate Generator. * McBSP FSR and FSX are driven by the Sample Rate * Generator. */ switch (dev->clk_input_pin) { case MCBSP_CLKS: pcr |= DAVINCI_MCBSP_PCR_CLKXM | DAVINCI_MCBSP_PCR_CLKRM; break; case MCBSP_CLKR: pcr |= DAVINCI_MCBSP_PCR_SCLKME; break; default: dev_err(dev->dev, "bad clk_input_pin\n"); return -EINVAL; } break; case SND_SOC_DAIFMT_CBM_CFM: /* codec is master */ pcr = 0; break; default: printk(KERN_ERR "%s:bad master\n", __func__); return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* Davinci doesn't support TRUE I2S, but some codecs will have * the left and right channels contiguous. This allows * dsp_a mode to be used with an inverted normal frame clk. * If your codec is master and does not have contiguous * channels, then you will have sound on only one channel. * Try using a different mode, or codec as slave. * * The TLV320AIC33 is an example of a codec where this works. * It has a variable bit clock frequency allowing it to have * valid data on every bit clock. * * The TLV320AIC23 is an example of a codec where this does not * work. It has a fixed bit clock frequency with progressively * more empty bit clock slots between channels as the sample * rate is lowered. */ inv_fs = true; case SND_SOC_DAIFMT_DSP_A: dev->mode = MOD_DSP_A; break; case SND_SOC_DAIFMT_DSP_B: dev->mode = MOD_DSP_B; break; default: printk(KERN_ERR "%s:bad format\n", __func__); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* CLKRP Receive clock polarity, * 1 - sampled on rising edge of CLKR * valid on rising edge * CLKXP Transmit clock polarity, * 1 - clocked on falling edge of CLKX * valid on rising edge * FSRP Receive frame sync pol, 0 - active high * FSXP Transmit frame sync pol, 0 - active high */ pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP); break; case SND_SOC_DAIFMT_IB_IF: /* CLKRP Receive clock polarity, * 0 - sampled on falling edge of CLKR * valid on falling edge * CLKXP Transmit clock polarity, * 0 - clocked on rising edge of CLKX * valid on falling edge * FSRP Receive frame sync pol, 1 - active low * FSXP Transmit frame sync pol, 1 - active low */ pcr |= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); break; case SND_SOC_DAIFMT_NB_IF: /* CLKRP Receive clock polarity, * 1 - sampled on rising edge of CLKR * valid on rising edge * CLKXP Transmit clock polarity, * 1 - clocked on falling edge of CLKX * valid on rising edge * FSRP Receive frame sync pol, 1 - active low * FSXP Transmit frame sync pol, 1 - active low */ pcr |= (DAVINCI_MCBSP_PCR_CLKXP | DAVINCI_MCBSP_PCR_CLKRP | DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); break; case SND_SOC_DAIFMT_IB_NF: /* CLKRP Receive clock polarity, * 0 - sampled on falling edge of CLKR * valid on falling edge * CLKXP Transmit clock polarity, * 0 - clocked on rising edge of CLKX * valid on falling edge * FSRP Receive frame sync pol, 0 - active high * FSXP Transmit frame sync pol, 0 - active high */ break; default: return -EINVAL; } if (inv_fs == true) pcr ^= (DAVINCI_MCBSP_PCR_FSXP | DAVINCI_MCBSP_PCR_FSRP); davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); dev->pcr = pcr; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_PCR_REG, pcr); return 0; } static int davinci_i2s_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(cpu_dai); if (div_id != DAVINCI_MCBSP_CLKGDV) return -ENODEV; dev->clk_div = div; return 0; } static int davinci_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); struct davinci_pcm_dma_params *dma_params = &dev->dma_params[substream->stream]; struct snd_interval *i = NULL; int mcbsp_word_length, master; unsigned int rcr, xcr, srgr, clk_div, freq, framesize; u32 spcr; snd_pcm_format_t fmt; unsigned element_cnt = 1; /* general line settings */ spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { spcr |= DAVINCI_MCBSP_SPCR_RINTM(3) | DAVINCI_MCBSP_SPCR_FREE; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } else { spcr |= DAVINCI_MCBSP_SPCR_XINTM(3) | DAVINCI_MCBSP_SPCR_FREE; davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SPCR_REG, spcr); } master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK; fmt = params_format(params); mcbsp_word_length = asp_word_length[fmt]; switch (master) { case SND_SOC_DAIFMT_CBS_CFS: freq = clk_get_rate(dev->clk); srgr = DAVINCI_MCBSP_SRGR_FSGM | DAVINCI_MCBSP_SRGR_CLKSM; srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1); if (dev->i2s_accurate_sck) { clk_div = 256; do { framesize = (freq / (--clk_div)) / params->rate_num * params->rate_den; } while (((framesize < 33) || (framesize > 4095)) && (clk_div)); clk_div--; srgr |= DAVINCI_MCBSP_SRGR_FPER(framesize - 1); } else { /* symmetric waveforms */ clk_div = freq / (mcbsp_word_length * 16) / params->rate_num * params->rate_den; srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1); } clk_div &= 0xFF; srgr |= clk_div; break; case SND_SOC_DAIFMT_CBM_CFS: srgr = DAVINCI_MCBSP_SRGR_FSGM; clk_div = dev->clk_div - 1; srgr |= DAVINCI_MCBSP_SRGR_FWID(mcbsp_word_length * 8 - 1); srgr |= DAVINCI_MCBSP_SRGR_FPER(mcbsp_word_length * 16 - 1); clk_div &= 0xFF; srgr |= clk_div; break; case SND_SOC_DAIFMT_CBM_CFM: /* Clock and frame sync given from external sources */ i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_SAMPLE_BITS); srgr = DAVINCI_MCBSP_SRGR_FSGM; srgr |= DAVINCI_MCBSP_SRGR_FWID(snd_interval_value(i) - 1); pr_debug("%s - %d FWID set: re-read srgr = %X\n", __func__, __LINE__, snd_interval_value(i) - 1); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_FRAME_BITS); srgr |= DAVINCI_MCBSP_SRGR_FPER(snd_interval_value(i) - 1); break; default: return -EINVAL; } davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_SRGR_REG, srgr); rcr = DAVINCI_MCBSP_RCR_RFIG; xcr = DAVINCI_MCBSP_XCR_XFIG; if (dev->mode == MOD_DSP_B) { rcr |= DAVINCI_MCBSP_RCR_RDATDLY(0); xcr |= DAVINCI_MCBSP_XCR_XDATDLY(0); } else { rcr |= DAVINCI_MCBSP_RCR_RDATDLY(1); xcr |= DAVINCI_MCBSP_XCR_XDATDLY(1); } /* Determine xfer data type */ fmt = params_format(params); if ((fmt > SNDRV_PCM_FORMAT_S32_LE) || !data_type[fmt]) { printk(KERN_WARNING "davinci-i2s: unsupported PCM format\n"); return -EINVAL; } if (params_channels(params) == 2) { element_cnt = 2; if (double_fmt[fmt] && dev->enable_channel_combine) { element_cnt = 1; fmt = double_fmt[fmt]; } switch (master) { case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBS_CFM: rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(0); xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(0); rcr |= DAVINCI_MCBSP_RCR_RPHASE; xcr |= DAVINCI_MCBSP_XCR_XPHASE; break; case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: rcr |= DAVINCI_MCBSP_RCR_RFRLEN2(element_cnt - 1); xcr |= DAVINCI_MCBSP_XCR_XFRLEN2(element_cnt - 1); break; default: return -EINVAL; } } dma_params->acnt = dma_params->data_type = data_type[fmt]; dma_params->fifo_level = 0; mcbsp_word_length = asp_word_length[fmt]; switch (master) { case SND_SOC_DAIFMT_CBS_CFS: case SND_SOC_DAIFMT_CBS_CFM: rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(0); xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(0); break; case SND_SOC_DAIFMT_CBM_CFM: case SND_SOC_DAIFMT_CBM_CFS: rcr |= DAVINCI_MCBSP_RCR_RFRLEN1(element_cnt - 1); xcr |= DAVINCI_MCBSP_XCR_XFRLEN1(element_cnt - 1); break; default: return -EINVAL; } rcr |= DAVINCI_MCBSP_RCR_RWDLEN1(mcbsp_word_length) | DAVINCI_MCBSP_RCR_RWDLEN2(mcbsp_word_length); xcr |= DAVINCI_MCBSP_XCR_XWDLEN1(mcbsp_word_length) | DAVINCI_MCBSP_XCR_XWDLEN2(mcbsp_word_length); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_XCR_REG, xcr); else davinci_mcbsp_write_reg(dev, DAVINCI_MCBSP_RCR_REG, rcr); pr_debug("%s - %d srgr=%X\n", __func__, __LINE__, srgr); pr_debug("%s - %d xcr=%X\n", __func__, __LINE__, xcr); pr_debug("%s - %d rcr=%X\n", __func__, __LINE__, rcr); return 0; } static int davinci_i2s_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); davinci_mcbsp_stop(dev, playback); return 0; } static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int ret = 0; int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: davinci_mcbsp_start(dev, substream); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: davinci_mcbsp_stop(dev, playback); break; default: ret = -EINVAL; } return ret; } static int davinci_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); snd_soc_dai_set_dma_data(dai, substream, dev->dma_params); return 0; } static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai); int playback = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); davinci_mcbsp_stop(dev, playback); } #define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 static const struct snd_soc_dai_ops davinci_i2s_dai_ops = { .startup = davinci_i2s_startup, .shutdown = davinci_i2s_shutdown, .prepare = davinci_i2s_prepare, .trigger = davinci_i2s_trigger, .hw_params = davinci_i2s_hw_params, .set_fmt = davinci_i2s_set_dai_fmt, .set_clkdiv = davinci_i2s_dai_set_clkdiv, }; static struct snd_soc_dai_driver davinci_i2s_dai = { .playback = { .channels_min = 2, .channels_max = 2, .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .capture = { .channels_min = 2, .channels_max = 2, .rates = DAVINCI_I2S_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE,}, .ops = &davinci_i2s_dai_ops, }; static const struct snd_soc_component_driver davinci_i2s_component = { .name = "davinci-i2s", }; static int davinci_i2s_probe(struct platform_device *pdev) { struct snd_platform_data *pdata = pdev->dev.platform_data; struct davinci_mcbsp_dev *dev; struct resource *mem, *ioarea, *res; enum dma_event_q asp_chan_q = EVENTQ_0; enum dma_event_q ram_chan_q = EVENTQ_1; int ret; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no mem resource?\n"); return -ENODEV; } ioarea = devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem), pdev->name); if (!ioarea) { dev_err(&pdev->dev, "McBSP region already claimed\n"); return -EBUSY; } dev = devm_kzalloc(&pdev->dev, sizeof(struct davinci_mcbsp_dev), GFP_KERNEL); if (!dev) return -ENOMEM; if (pdata) { dev->enable_channel_combine = pdata->enable_channel_combine; dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].sram_size = pdata->sram_size_playback; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].sram_size = pdata->sram_size_capture; dev->clk_input_pin = pdata->clk_input_pin; dev->i2s_accurate_sck = pdata->i2s_accurate_sck; asp_chan_q = pdata->asp_chan_q; ram_chan_q = pdata->ram_chan_q; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].asp_chan_q = asp_chan_q; dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].ram_chan_q = ram_chan_q; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].asp_chan_q = asp_chan_q; dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].ram_chan_q = ram_chan_q; dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return -ENODEV; clk_enable(dev->clk); dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); if (!dev->base) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].dma_addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DXR_REG); dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].dma_addr = (dma_addr_t)(mem->start + DAVINCI_MCBSP_DRR_REG); /* first TX, then RX */ res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_PLAYBACK].channel = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!res) { dev_err(&pdev->dev, "no DMA resource\n"); ret = -ENXIO; goto err_release_clk; } dev->dma_params[SNDRV_PCM_STREAM_CAPTURE].channel = res->start; dev->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, dev); ret = snd_soc_register_component(&pdev->dev, &davinci_i2s_component, &davinci_i2s_dai, 1); if (ret != 0) goto err_release_clk; ret = davinci_soc_platform_register(&pdev->dev); if (ret) { dev_err(&pdev->dev, "register PCM failed: %d\n", ret); goto err_unregister_component; } return 0; err_unregister_component: snd_soc_unregister_component(&pdev->dev); err_release_clk: clk_disable(dev->clk); clk_put(dev->clk); return ret; } static int davinci_i2s_remove(struct platform_device *pdev) { struct davinci_mcbsp_dev *dev = dev_get_drvdata(&pdev->dev); snd_soc_unregister_component(&pdev->dev); davinci_soc_platform_unregister(&pdev->dev); clk_disable(dev->clk); clk_put(dev->clk); dev->clk = NULL; return 0; } static struct platform_driver davinci_mcbsp_driver = { .probe = davinci_i2s_probe, .remove = davinci_i2s_remove, .driver = { .name = "davinci-mcbsp", .owner = THIS_MODULE, }, }; module_platform_driver(davinci_mcbsp_driver); MODULE_AUTHOR("Vladimir Barinov"); MODULE_DESCRIPTION("TI DAVINCI I2S (McBSP) SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
embeddedarm/linux-2.6.34-ts471x
arch/x86/pci/bus_numa.c
4134
1985
#include <linux/init.h> #include <linux/pci.h> #include <linux/range.h> #include "bus_numa.h" int pci_root_num; struct pci_root_info pci_root_info[PCI_ROOT_NR]; void x86_pci_root_bus_res_quirks(struct pci_bus *b) { int i; int j; struct pci_root_info *info; /* don't go for it if _CRS is used already */ if (b->resource[0] != &ioport_resource || b->resource[1] != &iomem_resource) return; if (!pci_root_num) return; for (i = 0; i < pci_root_num; i++) { if (pci_root_info[i].bus_min == b->number) break; } if (i == pci_root_num) return; printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n", b->number); pci_bus_remove_resources(b); info = &pci_root_info[i]; for (j = 0; j < info->res_num; j++) { struct resource *res; struct resource *root; res = &info->res[j]; pci_bus_add_resource(b, res, 0); if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; insert_resource(root, res); } } void __devinit update_res(struct pci_root_info *info, resource_size_t start, resource_size_t end, unsigned long flags, int merge) { int i; struct resource *res; if (start > end) return; if (start == MAX_RESOURCE) return; if (!merge) goto addit; /* try to merge it with old one */ for (i = 0; i < info->res_num; i++) { resource_size_t final_start, final_end; resource_size_t common_start, common_end; res = &info->res[i]; if (res->flags != flags) continue; common_start = max(res->start, start); common_end = min(res->end, end); if (common_start > common_end + 1) continue; final_start = min(res->start, start); final_end = max(res->end, end); res->start = final_start; res->end = final_end; return; } addit: /* need to add that */ if (info->res_num >= RES_NUM) return; res = &info->res[info->res_num]; res->name = info->name; res->flags = flags; res->start = start; res->end = end; res->child = NULL; info->res_num++; }
gpl-2.0
b-man/msm-3.4
arch/powerpc/kvm/book3s_64_mmu.c
6950
12192
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * Copyright SUSE Linux Products GmbH 2009 * * Authors: Alexander Graf <agraf@suse.de> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/highmem.h> #include <asm/tlbflush.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> /* #define DEBUG_MMU */ #ifdef DEBUG_MMU #define dprintk(X...) printk(KERN_INFO X) #else #define dprintk(X...) do { } while(0) #endif static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) { kvmppc_set_msr(vcpu, MSR_SF); } static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( struct kvm_vcpu *vcpu, gva_t eaddr) { int i; u64 esid = GET_ESID(eaddr); u64 esid_1t = GET_ESID_1T(eaddr); for (i = 0; i < vcpu->arch.slb_nr; i++) { u64 cmp_esid = esid; if (!vcpu->arch.slb[i].valid) continue; if (vcpu->arch.slb[i].tb) cmp_esid = esid_1t; if (vcpu->arch.slb[i].esid == cmp_esid) return &vcpu->arch.slb[i]; } dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", eaddr, esid, esid_1t); for (i = 0; i < vcpu->arch.slb_nr; i++) { if (vcpu->arch.slb[i].vsid) dprintk(" %d: %c%c%c %llx %llx\n", i, vcpu->arch.slb[i].valid ? 'v' : ' ', vcpu->arch.slb[i].large ? 'l' : ' ', vcpu->arch.slb[i].tb ? 't' : ' ', vcpu->arch.slb[i].esid, vcpu->arch.slb[i].vsid); } return NULL; } static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { struct kvmppc_slb *slb; slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); if (!slb) return 0; if (slb->tb) return (((u64)eaddr >> 12) & 0xfffffff) | (((u64)slb->vsid) << 28); return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16); } static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) { return slbe->large ? 24 : 12; } static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) { int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); return ((eaddr & 0xfffffff) >> p); } static hva_t kvmppc_mmu_book3s_64_get_pteg( struct kvmppc_vcpu_book3s *vcpu_book3s, struct kvmppc_slb *slbe, gva_t eaddr, bool second) { u64 hash, pteg, htabsize; u32 page; hva_t r; page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); hash = slbe->vsid ^ page; if (second) hash = ~hash; hash &= ((1ULL << 39ULL) - 1ULL); hash &= htabsize; hash <<= 7ULL; pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; pteg |= hash; dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n", page, vcpu_book3s->sdr1, pteg, slbe->vsid); /* When running a PAPR guest, SDR1 contains a HVA address instead of a GPA */ if (vcpu_book3s->vcpu.arch.papr_enabled) r = pteg; else r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); if (kvm_is_error_hva(r)) return r; return r | (pteg & ~PAGE_MASK); } static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) { int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); u64 avpn; avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); avpn |= slbe->vsid << (28 - p); if (p < 24) avpn >>= ((80 - p) - 56) - 8; else avpn <<= 8; return avpn; } static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, bool data) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); struct kvmppc_slb *slbe; hva_t ptegp; u64 pteg[16]; u64 avpn = 0; int i; u8 key = 0; bool found = false; bool perm_err = false; int second = 0; ulong mp_ea = vcpu->arch.magic_page_ea; /* Magic page override */ if (unlikely(mp_ea) && unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && !(vcpu->arch.shared->msr & MSR_PR)) { gpte->eaddr = eaddr; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); gpte->raddr &= KVM_PAM; gpte->may_execute = true; gpte->may_read = true; gpte->may_write = true; return 0; } slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); if (!slbe) goto no_seg_found; do_second: ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); if (kvm_is_error_hva(ptegp)) goto no_page_found; avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); goto no_page_found; } if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) key = 4; else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) key = 4; for (i=0; i<16; i+=2) { u64 v = pteg[i]; u64 r = pteg[i+1]; /* Valid check */ if (!(v & HPTE_V_VALID)) continue; /* Hash check */ if ((v & HPTE_V_SECONDARY) != second) continue; /* AVPN compare */ if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) { u8 pp = (r & HPTE_R_PP) | key; int eaddr_mask = 0xFFF; gpte->eaddr = eaddr; gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); if (slbe->large) eaddr_mask = 0xFFFFFF; gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask); gpte->may_execute = ((r & HPTE_R_N) ? false : true); gpte->may_read = false; gpte->may_write = false; switch (pp) { case 0: case 1: case 2: case 6: gpte->may_write = true; /* fall through */ case 3: case 5: case 7: gpte->may_read = true; break; } if (!gpte->may_read) { perm_err = true; continue; } dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " "-> 0x%lx\n", eaddr, avpn, gpte->vpage, gpte->raddr); found = true; break; } } /* Update PTE R and C bits, so the guest's swapper knows we used the * page */ if (found) { u32 oldr = pteg[i+1]; if (gpte->may_read) { /* Set the accessed flag */ pteg[i+1] |= HPTE_R_R; } if (gpte->may_write) { /* Set the dirty flag */ pteg[i+1] |= HPTE_R_C; } else { dprintk("KVM: Mapping read-only page!\n"); } /* Write back into the PTEG */ if (pteg[i+1] != oldr) copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); return 0; } else { dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx " "ptegp=0x%lx)\n", eaddr, to_book3s(vcpu)->sdr1, ptegp); for (i = 0; i < 16; i += 2) dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n", i, pteg[i], pteg[i+1], avpn); if (!second) { second = HPTE_V_SECONDARY; goto do_second; } } no_page_found: if (perm_err) return -EPERM; return -ENOENT; no_seg_found: dprintk("KVM MMU: Trigger segment fault\n"); return -EINVAL; } static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) { struct kvmppc_vcpu_book3s *vcpu_book3s; u64 esid, esid_1t; int slb_nr; struct kvmppc_slb *slbe; dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); vcpu_book3s = to_book3s(vcpu); esid = GET_ESID(rb); esid_1t = GET_ESID_1T(rb); slb_nr = rb & 0xfff; if (slb_nr > vcpu->arch.slb_nr) return; slbe = &vcpu->arch.slb[slb_nr]; slbe->large = (rs & SLB_VSID_L) ? 1 : 0; slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; slbe->esid = slbe->tb ? esid_1t : esid; slbe->vsid = rs >> 12; slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; slbe->class = (rs & SLB_VSID_C) ? 1 : 0; slbe->orige = rb & (ESID_MASK | SLB_ESID_V); slbe->origv = rs; /* Map the new segment */ kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); } static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) { struct kvmppc_slb *slbe; if (slb_nr > vcpu->arch.slb_nr) return 0; slbe = &vcpu->arch.slb[slb_nr]; return slbe->orige; } static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) { struct kvmppc_slb *slbe; if (slb_nr > vcpu->arch.slb_nr) return 0; slbe = &vcpu->arch.slb[slb_nr]; return slbe->origv; } static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) { struct kvmppc_slb *slbe; dprintk("KVM MMU: slbie(0x%llx)\n", ea); slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); if (!slbe) return; dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); slbe->valid = false; kvmppc_mmu_map_segment(vcpu, ea); } static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) { int i; dprintk("KVM MMU: slbia()\n"); for (i = 1; i < vcpu->arch.slb_nr; i++) vcpu->arch.slb[i].valid = false; if (vcpu->arch.shared->msr & MSR_IR) { kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); } } static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, ulong value) { u64 rb = 0, rs = 0; /* * According to Book3 2.01 mtsrin is implemented as: * * The SLB entry specified by (RB)32:35 is loaded from register * RS, as follows. * * SLBE Bit Source SLB Field * * 0:31 0x0000_0000 ESID-0:31 * 32:35 (RB)32:35 ESID-32:35 * 36 0b1 V * 37:61 0x00_0000|| 0b0 VSID-0:24 * 62:88 (RS)37:63 VSID-25:51 * 89:91 (RS)33:35 Ks Kp N * 92 (RS)36 L ((RS)36 must be 0b0) * 93 0b0 C */ dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value); /* ESID = srnum */ rb |= (srnum & 0xf) << 28; /* Set the valid bit */ rb |= 1 << 27; /* Index = ESID */ rb |= srnum; /* VSID = VSID */ rs |= (value & 0xfffffff) << 12; /* flags = flags */ rs |= ((value >> 28) & 0x7) << 9; kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); } static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, bool large) { u64 mask = 0xFFFFFFFFFULL; dprintk("KVM MMU: tlbie(0x%lx)\n", va); if (large) mask = 0xFFFFFF000ULL; kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask); } static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid) { ulong ea = esid << SID_SHIFT; struct kvmppc_slb *slb; u64 gvsid = esid; ulong mp_ea = vcpu->arch.magic_page_ea; if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); if (slb) gvsid = slb->vsid; } switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { case 0: *vsid = VSID_REAL | esid; break; case MSR_IR: *vsid = VSID_REAL_IR | gvsid; break; case MSR_DR: *vsid = VSID_REAL_DR | gvsid; break; case MSR_DR|MSR_IR: if (!slb) goto no_slb; *vsid = gvsid; break; default: BUG(); break; } if (vcpu->arch.shared->msr & MSR_PR) *vsid |= VSID_PR; return 0; no_slb: /* Catch magic page case */ if (unlikely(mp_ea) && unlikely(esid == (mp_ea >> SID_SHIFT)) && !(vcpu->arch.shared->msr & MSR_PR)) { *vsid = VSID_REAL | esid; return 0; } return -EINVAL; } static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) { return (to_book3s(vcpu)->hid[5] & 0x80); } void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) { struct kvmppc_mmu *mmu = &vcpu->arch.mmu; mmu->mfsrin = NULL; mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; mmu->slbie = kvmppc_mmu_book3s_64_slbie; mmu->slbia = kvmppc_mmu_book3s_64_slbia; mmu->xlate = kvmppc_mmu_book3s_64_xlate; mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; }
gpl-2.0
CandyKat/kernel_lge_hammerhead
drivers/net/ethernet/8390/lib8390.c
7462
35078
/* 8390.c: A general NS8390 ethernet driver core for linux. */ /* Written 1992-94 by Donald Becker. Copyright 1993 United States Government as represented by the Director, National Security Agency. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 This is the chip-specific code for many 8390-based ethernet adaptors. This is not a complete driver, it must be combined with board-specific code such as ne.c, wd.c, 3c503.c, etc. Seeing how at least eight drivers use this code, (not counting the PCMCIA ones either) it is easy to break some card by what seems like a simple innocent change. Please contact me or Donald if you think you have found something that needs changing. -- PG Changelog: Paul Gortmaker : remove set_bit lock, other cleanups. Paul Gortmaker : add ei_get_8390_hdr() so we can pass skb's to ei_block_input() for eth_io_copy_and_sum(). Paul Gortmaker : exchange static int ei_pingpong for a #define, also add better Tx error handling. Paul Gortmaker : rewrite Rx overrun handling as per NS specs. Alexey Kuznetsov : use the 8390's six bit hash multicast filter. Paul Gortmaker : tweak ANK's above multicast changes a bit. Paul Gortmaker : update packet statistics for v2.1.x Alan Cox : support arbitrary stupid port mappings on the 68K Macintosh. Support >16bit I/O spaces Paul Gortmaker : add kmod support for auto-loading of the 8390 module by all drivers that require it. Alan Cox : Spinlocking work, added 'BUG_83C690' Paul Gortmaker : Separate out Tx timeout code from Tx path. Paul Gortmaker : Remove old unused single Tx buffer code. Hayato Fujiwara : Add m32r support. Paul Gortmaker : use skb_padto() instead of stack scratch area Sources: The National Semiconductor LAN Databook, and the 3Com 3c503 databook. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> #include <linux/bitops.h> #include <linux/uaccess.h> #include <linux/io.h> #include <asm/irq.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #define NS8390_CORE #include "8390.h" #define BUG_83C690 /* These are the operational function interfaces to board-specific routines. void reset_8390(struct net_device *dev) Resets the board associated with DEV, including a hardware reset of the 8390. This is only called when there is a transmit timeout, and it is always followed by 8390_init(). void block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) Write the COUNT bytes of BUF to the packet buffer at START_PAGE. The "page" value uses the 8390's 256-byte pages. void get_8390_hdr(struct net_device *dev, struct e8390_hdr *hdr, int ring_page) Read the 4 byte, page aligned 8390 header. *If* there is a subsequent read, it will be of the rest of the packet. void block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) Read COUNT bytes from the packet buffer into the skb data area. Start reading from RING_OFFSET, the address as the 8390 sees it. This will always follow the read of the 8390 header. */ #define ei_reset_8390 (ei_local->reset_8390) #define ei_block_output (ei_local->block_output) #define ei_block_input (ei_local->block_input) #define ei_get_8390_hdr (ei_local->get_8390_hdr) /* use 0 for production, 1 for verification, >2 for debug */ #ifndef ei_debug int ei_debug = 1; #endif /* Index to functions. */ static void ei_tx_intr(struct net_device *dev); static void ei_tx_err(struct net_device *dev); static void ei_receive(struct net_device *dev); static void ei_rx_overrun(struct net_device *dev); /* Routines generic to NS8390-based boards. */ static void NS8390_trigger_send(struct net_device *dev, unsigned int length, int start_page); static void do_set_multicast_list(struct net_device *dev); static void __NS8390_init(struct net_device *dev, int startp); /* * SMP and the 8390 setup. * * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is * a page register that controls bank and packet buffer access. We guard * this with ei_local->page_lock. Nobody should assume or set the page other * than zero when the lock is not held. Lock holders must restore page 0 * before unlocking. Even pure readers must take the lock to protect in * page 0. * * To make life difficult the chip can also be very slow. We therefore can't * just use spinlocks. For the longer lockups we disable the irq the device * sits on and hold the lock. We must hold the lock because there is a dual * processor case other than interrupts (get stats/set multicast list in * parallel with each other and transmit). * * Note: in theory we can just disable the irq on the card _but_ there is * a latency on SMP irq delivery. So we can easily go "disable irq" "sync irqs" * enter lock, take the queued irq. So we waddle instead of flying. * * Finally by special arrangement for the purpose of being generally * annoying the transmit function is called bh atomic. That places * restrictions on the user context callers as disable_irq won't save * them. * * Additional explanation of problems with locking by Alan Cox: * * "The author (me) didn't use spin_lock_irqsave because the slowness of the * card means that approach caused horrible problems like losing serial data * at 38400 baud on some chips. Remember many 8390 nics on PCI were ISA * chips with FPGA front ends. * * Ok the logic behind the 8390 is very simple: * * Things to know * - IRQ delivery is asynchronous to the PCI bus * - Blocking the local CPU IRQ via spin locks was too slow * - The chip has register windows needing locking work * * So the path was once (I say once as people appear to have changed it * in the mean time and it now looks rather bogus if the changes to use * disable_irq_nosync_irqsave are disabling the local IRQ) * * * Take the page lock * Mask the IRQ on chip * Disable the IRQ (but not mask locally- someone seems to have * broken this with the lock validator stuff) * [This must be _nosync as the page lock may otherwise * deadlock us] * Drop the page lock and turn IRQs back on * * At this point an existing IRQ may still be running but we can't * get a new one * * Take the lock (so we know the IRQ has terminated) but don't mask * the IRQs on the processor * Set irqlock [for debug] * * Transmit (slow as ****) * * re-enable the IRQ * * * We have to use disable_irq because otherwise you will get delayed * interrupts on the APIC bus deadlocking the transmit path. * * Quite hairy but the chip simply wasn't designed for SMP and you can't * even ACK an interrupt without risking corrupting other parallel * activities on the chip." [lkml, 25 Jul 2007] */ /** * ei_open - Open/initialize the board. * @dev: network device to initialize * * This routine goes all-out, setting everything * up anew at each open, even though many of these registers should only * need to be set once at boot. */ static int __ei_open(struct net_device *dev) { unsigned long flags; struct ei_device *ei_local = netdev_priv(dev); if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = TX_TIMEOUT; /* * Grab the page lock so we own the register set, then call * the init function. */ spin_lock_irqsave(&ei_local->page_lock, flags); __NS8390_init(dev, 1); /* Set the flag before we drop the lock, That way the IRQ arrives after its set and we get no silly warnings */ netif_start_queue(dev); spin_unlock_irqrestore(&ei_local->page_lock, flags); ei_local->irqlock = 0; return 0; } /** * ei_close - shut down network device * @dev: network device to close * * Opposite of ei_open(). Only used when "ifconfig <devname> down" is done. */ static int __ei_close(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); unsigned long flags; /* * Hold the page lock during close */ spin_lock_irqsave(&ei_local->page_lock, flags); __NS8390_init(dev, 0); spin_unlock_irqrestore(&ei_local->page_lock, flags); netif_stop_queue(dev); return 0; } /** * ei_tx_timeout - handle transmit time out condition * @dev: network device which has apparently fallen asleep * * Called by kernel when device never acknowledges a transmit has * completed (or failed) - i.e. never posted a Tx related interrupt. */ static void __ei_tx_timeout(struct net_device *dev) { unsigned long e8390_base = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); unsigned long flags; dev->stats.tx_errors++; spin_lock_irqsave(&ei_local->page_lock, flags); txsr = ei_inb(e8390_base+EN0_TSR); isr = ei_inb(e8390_base+EN0_ISR); spin_unlock_irqrestore(&ei_local->page_lock, flags); netdev_dbg(dev, "Tx timed out, %s TSR=%#2x, ISR=%#2x, t=%d\n", (txsr & ENTSR_ABT) ? "excess collisions." : (isr) ? "lost interrupt?" : "cable problem?", txsr, isr, tickssofar); if (!isr && !dev->stats.tx_packets) { /* The 8390 probably hasn't gotten on the cable yet. */ ei_local->interface_num ^= 1; /* Try a different xcvr. */ } /* Ugly but a reset can be slow, yet must be protected */ disable_irq_nosync_lockdep(dev->irq); spin_lock(&ei_local->page_lock); /* Try to restart the card. Perhaps the user has fixed something. */ ei_reset_8390(dev); __NS8390_init(dev, 1); spin_unlock(&ei_local->page_lock); enable_irq_lockdep(dev->irq); netif_wake_queue(dev); } /** * ei_start_xmit - begin packet transmission * @skb: packet to be sent * @dev: network device to which packet is sent * * Sends a packet to an 8390 network device. */ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned long e8390_base = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); int send_length = skb->len, output_page; unsigned long flags; char buf[ETH_ZLEN]; char *data = skb->data; if (skb->len < ETH_ZLEN) { memset(buf, 0, ETH_ZLEN); /* more efficient than doing just the needed bits */ memcpy(buf, data, skb->len); send_length = ETH_ZLEN; data = buf; } /* Mask interrupts from the ethercard. SMP: We have to grab the lock here otherwise the IRQ handler on another CPU can flip window and race the IRQ mask set. We end up trashing the mcast filter not disabling irqs if we don't lock */ spin_lock_irqsave(&ei_local->page_lock, flags); ei_outb_p(0x00, e8390_base + EN0_IMR); spin_unlock_irqrestore(&ei_local->page_lock, flags); /* * Slow phase with lock held. */ disable_irq_nosync_lockdep_irqsave(dev->irq, &flags); spin_lock(&ei_local->page_lock); ei_local->irqlock = 1; /* * We have two Tx slots available for use. Find the first free * slot, and then perform some sanity checks. With two Tx bufs, * you get very close to transmitting back-to-back packets. With * only one Tx buf, the transmitter sits idle while you reload the * card, leaving a substantial gap between each transmitted packet. */ if (ei_local->tx1 == 0) { output_page = ei_local->tx_start_page; ei_local->tx1 = send_length; if (ei_debug && ei_local->tx2 > 0) netdev_dbg(dev, "idle transmitter tx2=%d, lasttx=%d, txing=%d\n", ei_local->tx2, ei_local->lasttx, ei_local->txing); } else if (ei_local->tx2 == 0) { output_page = ei_local->tx_start_page + TX_PAGES/2; ei_local->tx2 = send_length; if (ei_debug && ei_local->tx1 > 0) netdev_dbg(dev, "idle transmitter, tx1=%d, lasttx=%d, txing=%d\n", ei_local->tx1, ei_local->lasttx, ei_local->txing); } else { /* We should never get here. */ if (ei_debug) netdev_dbg(dev, "No Tx buffers free! tx1=%d tx2=%d last=%d\n", ei_local->tx1, ei_local->tx2, ei_local->lasttx); ei_local->irqlock = 0; netif_stop_queue(dev); ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); enable_irq_lockdep_irqrestore(dev->irq, &flags); dev->stats.tx_errors++; return NETDEV_TX_BUSY; } /* * Okay, now upload the packet and trigger a send if the transmitter * isn't already sending. If it is busy, the interrupt handler will * trigger the send later, upon receiving a Tx done interrupt. */ ei_block_output(dev, send_length, data, output_page); if (!ei_local->txing) { ei_local->txing = 1; NS8390_trigger_send(dev, send_length, output_page); if (output_page == ei_local->tx_start_page) { ei_local->tx1 = -1; ei_local->lasttx = -1; } else { ei_local->tx2 = -1; ei_local->lasttx = -2; } } else ei_local->txqueue++; if (ei_local->tx1 && ei_local->tx2) netif_stop_queue(dev); else netif_start_queue(dev); /* Turn 8390 interrupts back on. */ ei_local->irqlock = 0; ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); spin_unlock(&ei_local->page_lock); enable_irq_lockdep_irqrestore(dev->irq, &flags); skb_tx_timestamp(skb); dev_kfree_skb(skb); dev->stats.tx_bytes += send_length; return NETDEV_TX_OK; } /** * ei_interrupt - handle the interrupts from an 8390 * @irq: interrupt number * @dev_id: a pointer to the net_device * * Handle the ether interface interrupts. We pull packets from * the 8390 via the card specific functions and fire them at the networking * stack. We also handle transmit completions and wake the transmit path if * necessary. We also update the counters and do other housekeeping as * needed. */ static irqreturn_t __ei_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; unsigned long e8390_base = dev->base_addr; int interrupts, nr_serviced = 0; struct ei_device *ei_local = netdev_priv(dev); /* * Protect the irq test too. */ spin_lock(&ei_local->page_lock); if (ei_local->irqlock) { /* * This might just be an interrupt for a PCI device sharing * this line */ netdev_err(dev, "Interrupted while interrupts are masked! isr=%#2x imr=%#2x\n", ei_inb_p(e8390_base + EN0_ISR), ei_inb_p(e8390_base + EN0_IMR)); spin_unlock(&ei_local->page_lock); return IRQ_NONE; } /* Change to page 0 and read the intr status reg. */ ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); if (ei_debug > 3) netdev_dbg(dev, "interrupt(isr=%#2.2x)\n", ei_inb_p(e8390_base + EN0_ISR)); /* !!Assumption!! -- we stay in page 0. Don't break this. */ while ((interrupts = ei_inb_p(e8390_base + EN0_ISR)) != 0 && ++nr_serviced < MAX_SERVICE) { if (!netif_running(dev)) { netdev_warn(dev, "interrupt from stopped card\n"); /* rmk - acknowledge the interrupts */ ei_outb_p(interrupts, e8390_base + EN0_ISR); interrupts = 0; break; } if (interrupts & ENISR_OVER) ei_rx_overrun(dev); else if (interrupts & (ENISR_RX+ENISR_RX_ERR)) { /* Got a good (?) packet. */ ei_receive(dev); } /* Push the next to-transmit packet through. */ if (interrupts & ENISR_TX) ei_tx_intr(dev); else if (interrupts & ENISR_TX_ERR) ei_tx_err(dev); if (interrupts & ENISR_COUNTERS) { dev->stats.rx_frame_errors += ei_inb_p(e8390_base + EN0_COUNTER0); dev->stats.rx_crc_errors += ei_inb_p(e8390_base + EN0_COUNTER1); dev->stats.rx_missed_errors += ei_inb_p(e8390_base + EN0_COUNTER2); ei_outb_p(ENISR_COUNTERS, e8390_base + EN0_ISR); /* Ack intr. */ } /* Ignore any RDC interrupts that make it back to here. */ if (interrupts & ENISR_RDC) ei_outb_p(ENISR_RDC, e8390_base + EN0_ISR); ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); } if (interrupts && ei_debug) { ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base + E8390_CMD); if (nr_serviced >= MAX_SERVICE) { /* 0xFF is valid for a card removal */ if (interrupts != 0xFF) netdev_warn(dev, "Too much work at interrupt, status %#2.2x\n", interrupts); ei_outb_p(ENISR_ALL, e8390_base + EN0_ISR); /* Ack. most intrs. */ } else { netdev_warn(dev, "unknown interrupt %#2x\n", interrupts); ei_outb_p(0xff, e8390_base + EN0_ISR); /* Ack. all intrs. */ } } spin_unlock(&ei_local->page_lock); return IRQ_RETVAL(nr_serviced > 0); } #ifdef CONFIG_NET_POLL_CONTROLLER static void __ei_poll(struct net_device *dev) { disable_irq(dev->irq); __ei_interrupt(dev->irq, dev); enable_irq(dev->irq); } #endif /** * ei_tx_err - handle transmitter error * @dev: network device which threw the exception * * A transmitter error has happened. Most likely excess collisions (which * is a fairly normal condition). If the error is one where the Tx will * have been aborted, we try and send another one right away, instead of * letting the failed packet sit and collect dust in the Tx buffer. This * is a much better solution as it avoids kernel based Tx timeouts, and * an unnecessary card reset. * * Called with lock held. */ static void ei_tx_err(struct net_device *dev) { unsigned long e8390_base = dev->base_addr; /* ei_local is used on some platforms via the EI_SHIFT macro */ struct ei_device *ei_local __maybe_unused = netdev_priv(dev); unsigned char txsr = ei_inb_p(e8390_base+EN0_TSR); unsigned char tx_was_aborted = txsr & (ENTSR_ABT+ENTSR_FU); #ifdef VERBOSE_ERROR_DUMP netdev_dbg(dev, "transmitter error (%#2x):", txsr); if (txsr & ENTSR_ABT) pr_cont(" excess-collisions "); if (txsr & ENTSR_ND) pr_cont(" non-deferral "); if (txsr & ENTSR_CRS) pr_cont(" lost-carrier "); if (txsr & ENTSR_FU) pr_cont(" FIFO-underrun "); if (txsr & ENTSR_CDH) pr_cont(" lost-heartbeat "); pr_cont("\n"); #endif ei_outb_p(ENISR_TX_ERR, e8390_base + EN0_ISR); /* Ack intr. */ if (tx_was_aborted) ei_tx_intr(dev); else { dev->stats.tx_errors++; if (txsr & ENTSR_CRS) dev->stats.tx_carrier_errors++; if (txsr & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; if (txsr & ENTSR_OWC) dev->stats.tx_window_errors++; } } /** * ei_tx_intr - transmit interrupt handler * @dev: network device for which tx intr is handled * * We have finished a transmit: check for errors and then trigger the next * packet to be sent. Called with lock held. */ static void ei_tx_intr(struct net_device *dev) { unsigned long e8390_base = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); int status = ei_inb(e8390_base + EN0_TSR); ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ /* * There are two Tx buffers, see which one finished, and trigger * the send of another one if it exists. */ ei_local->txqueue--; if (ei_local->tx1 < 0) { if (ei_local->lasttx != 1 && ei_local->lasttx != -1) pr_err("%s: bogus last_tx_buffer %d, tx1=%d\n", ei_local->name, ei_local->lasttx, ei_local->tx1); ei_local->tx1 = 0; if (ei_local->tx2 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6); dev->trans_start = jiffies; ei_local->tx2 = -1, ei_local->lasttx = 2; } else ei_local->lasttx = 20, ei_local->txing = 0; } else if (ei_local->tx2 < 0) { if (ei_local->lasttx != 2 && ei_local->lasttx != -2) pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n", ei_local->name, ei_local->lasttx, ei_local->tx2); ei_local->tx2 = 0; if (ei_local->tx1 > 0) { ei_local->txing = 1; NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page); dev->trans_start = jiffies; ei_local->tx1 = -1; ei_local->lasttx = 1; } else ei_local->lasttx = 10, ei_local->txing = 0; } /* else netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n", ei_local->lasttx); */ /* Minimize Tx latency: update the statistics after we restart TXing. */ if (status & ENTSR_COL) dev->stats.collisions++; if (status & ENTSR_PTX) dev->stats.tx_packets++; else { dev->stats.tx_errors++; if (status & ENTSR_ABT) { dev->stats.tx_aborted_errors++; dev->stats.collisions += 16; } if (status & ENTSR_CRS) dev->stats.tx_carrier_errors++; if (status & ENTSR_FU) dev->stats.tx_fifo_errors++; if (status & ENTSR_CDH) dev->stats.tx_heartbeat_errors++; if (status & ENTSR_OWC) dev->stats.tx_window_errors++; } netif_wake_queue(dev); } /** * ei_receive - receive some packets * @dev: network device with which receive will be run * * We have a good packet(s), get it/them out of the buffers. * Called with lock held. */ static void ei_receive(struct net_device *dev) { unsigned long e8390_base = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); unsigned char rxing_page, this_frame, next_frame; unsigned short current_offset; int rx_pkt_count = 0; struct e8390_pkt_hdr rx_frame; int num_rx_pages = ei_local->stop_page-ei_local->rx_start_page; while (++rx_pkt_count < 10) { int pkt_len, pkt_stat; /* Get the rx page (incoming packet pointer). */ ei_outb_p(E8390_NODMA+E8390_PAGE1, e8390_base + E8390_CMD); rxing_page = ei_inb_p(e8390_base + EN1_CURPAG); ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base + E8390_CMD); /* Remove one frame from the ring. Boundary is always a page behind. */ this_frame = ei_inb_p(e8390_base + EN0_BOUNDARY) + 1; if (this_frame >= ei_local->stop_page) this_frame = ei_local->rx_start_page; /* Someday we'll omit the previous, iff we never get this message. (There is at least one clone claimed to have a problem.) Keep quiet if it looks like a card removal. One problem here is that some clones crash in roughly the same way. */ if (ei_debug > 0 && this_frame != ei_local->current_page && (this_frame != 0x0 || rxing_page != 0xFF)) netdev_err(dev, "mismatched read page pointers %2x vs %2x\n", this_frame, ei_local->current_page); if (this_frame == rxing_page) /* Read all the frames? */ break; /* Done for now */ current_offset = this_frame << 8; ei_get_8390_hdr(dev, &rx_frame, this_frame); pkt_len = rx_frame.count - sizeof(struct e8390_pkt_hdr); pkt_stat = rx_frame.status; next_frame = this_frame + 1 + ((pkt_len+4)>>8); /* Check for bogosity warned by 3c503 book: the status byte is never written. This happened a lot during testing! This code should be cleaned up someday. */ if (rx_frame.next != next_frame && rx_frame.next != next_frame + 1 && rx_frame.next != next_frame - num_rx_pages && rx_frame.next != next_frame + 1 - num_rx_pages) { ei_local->current_page = rxing_page; ei_outb(ei_local->current_page-1, e8390_base+EN0_BOUNDARY); dev->stats.rx_errors++; continue; } if (pkt_len < 60 || pkt_len > 1518) { if (ei_debug) netdev_dbg(dev, "bogus packet size: %d, status=%#2x nxpg=%#2x\n", rx_frame.count, rx_frame.status, rx_frame.next); dev->stats.rx_errors++; dev->stats.rx_length_errors++; } else if ((pkt_stat & 0x0F) == ENRSR_RXOK) { struct sk_buff *skb; skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { if (ei_debug > 1) netdev_dbg(dev, "Couldn't allocate a sk_buff of size %d\n", pkt_len); dev->stats.rx_dropped++; break; } else { skb_reserve(skb, 2); /* IP headers on 16 byte boundaries */ skb_put(skb, pkt_len); /* Make room */ ei_block_input(dev, pkt_len, skb, current_offset + sizeof(rx_frame)); skb->protocol = eth_type_trans(skb, dev); if (!skb_defer_rx_timestamp(skb)) netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; if (pkt_stat & ENRSR_PHY) dev->stats.multicast++; } } else { if (ei_debug) netdev_dbg(dev, "bogus packet: status=%#2x nxpg=%#2x size=%d\n", rx_frame.status, rx_frame.next, rx_frame.count); dev->stats.rx_errors++; /* NB: The NIC counts CRC, frame and missed errors. */ if (pkt_stat & ENRSR_FO) dev->stats.rx_fifo_errors++; } next_frame = rx_frame.next; /* This _should_ never happen: it's here for avoiding bad clones. */ if (next_frame >= ei_local->stop_page) { netdev_notice(dev, "next frame inconsistency, %#2x\n", next_frame); next_frame = ei_local->rx_start_page; } ei_local->current_page = next_frame; ei_outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); } /* We used to also ack ENISR_OVER here, but that would sometimes mask a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); } /** * ei_rx_overrun - handle receiver overrun * @dev: network device which threw exception * * We have a receiver overrun: we have to kick the 8390 to get it started * again. Problem is that you have to kick it exactly as NS prescribes in * the updated datasheets, or "the NIC may act in an unpredictable manner." * This includes causing "the NIC to defer indefinitely when it is stopped * on a busy network." Ugh. * Called with lock held. Don't call this with the interrupts off or your * computer will hate you - it takes 10ms or so. */ static void ei_rx_overrun(struct net_device *dev) { unsigned long e8390_base = dev->base_addr; unsigned char was_txing, must_resend = 0; /* ei_local is used on some platforms via the EI_SHIFT macro */ struct ei_device *ei_local __maybe_unused = netdev_priv(dev); /* * Record whether a Tx was in progress and then issue the * stop command. */ was_txing = ei_inb_p(e8390_base+E8390_CMD) & E8390_TRANS; ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); if (ei_debug > 1) netdev_dbg(dev, "Receiver overrun\n"); dev->stats.rx_over_errors++; /* * Wait a full Tx time (1.2ms) + some guard time, NS says 1.6ms total. * Early datasheets said to poll the reset bit, but now they say that * it "is not a reliable indicator and subsequently should be ignored." * We wait at least 10ms. */ mdelay(10); /* * Reset RBCR[01] back to zero as per magic incantation. */ ei_outb_p(0x00, e8390_base+EN0_RCNTLO); ei_outb_p(0x00, e8390_base+EN0_RCNTHI); /* * See if any Tx was interrupted or not. According to NS, this * step is vital, and skipping it will cause no end of havoc. */ if (was_txing) { unsigned char tx_completed = ei_inb_p(e8390_base+EN0_ISR) & (ENISR_TX+ENISR_TX_ERR); if (!tx_completed) must_resend = 1; } /* * Have to enter loopback mode and then restart the NIC before * you are allowed to slurp packets up off the ring. */ ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START, e8390_base + E8390_CMD); /* * Clear the Rx ring of all the debris, and ack the interrupt. */ ei_receive(dev); ei_outb_p(ENISR_OVER, e8390_base+EN0_ISR); /* * Leave loopback mode, and resend any packet that got stopped. */ ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); if (must_resend) ei_outb_p(E8390_NODMA + E8390_PAGE0 + E8390_START + E8390_TRANS, e8390_base + E8390_CMD); } /* * Collect the stats. This is called unlocked and from several contexts. */ static struct net_device_stats *__ei_get_stats(struct net_device *dev) { unsigned long ioaddr = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); unsigned long flags; /* If the card is stopped, just return the present stats. */ if (!netif_running(dev)) return &dev->stats; spin_lock_irqsave(&ei_local->page_lock, flags); /* Read the counter registers, assuming we are in page 0. */ dev->stats.rx_frame_errors += ei_inb_p(ioaddr + EN0_COUNTER0); dev->stats.rx_crc_errors += ei_inb_p(ioaddr + EN0_COUNTER1); dev->stats.rx_missed_errors += ei_inb_p(ioaddr + EN0_COUNTER2); spin_unlock_irqrestore(&ei_local->page_lock, flags); return &dev->stats; } /* * Form the 64 bit 8390 multicast table from the linked list of addresses * associated with this dev structure. */ static inline void make_mc_bits(u8 *bits, struct net_device *dev) { struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, dev) { u32 crc = ether_crc(ETH_ALEN, ha->addr); /* * The 8390 uses the 6 most significant bits of the * CRC to index the multicast table. */ bits[crc>>29] |= (1<<((crc>>26)&7)); } } /** * do_set_multicast_list - set/clear multicast filter * @dev: net device for which multicast filter is adjusted * * Set or clear the multicast filter for this adaptor. May be called * from a BH in 2.1.x. Must be called with lock held. */ static void do_set_multicast_list(struct net_device *dev) { unsigned long e8390_base = dev->base_addr; int i; struct ei_device *ei_local = netdev_priv(dev); if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { memset(ei_local->mcfilter, 0, 8); if (!netdev_mc_empty(dev)) make_mc_bits(ei_local->mcfilter, dev); } else memset(ei_local->mcfilter, 0xFF, 8); /* mcast set to accept-all */ /* * DP8390 manuals don't specify any magic sequence for altering * the multicast regs on an already running card. To be safe, we * ensure multicast mode is off prior to loading up the new hash * table. If this proves to be not enough, we can always resort * to stopping the NIC, loading the table and then restarting. * * Bug Alert! The MC regs on the SMC 83C690 (SMC Elite and SMC * Elite16) appear to be write-only. The NS 8390 data sheet lists * them as r/w so this is a bug. The SMC 83C790 (SMC Ultra and * Ultra32 EISA) appears to have this bug fixed. */ if (netif_running(dev)) ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); ei_outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); for (i = 0; i < 8; i++) { ei_outb_p(ei_local->mcfilter[i], e8390_base + EN1_MULT_SHIFT(i)); #ifndef BUG_83C690 if (ei_inb_p(e8390_base + EN1_MULT_SHIFT(i)) != ei_local->mcfilter[i]) netdev_err(dev, "Multicast filter read/write mismap %d\n", i); #endif } ei_outb_p(E8390_NODMA + E8390_PAGE0, e8390_base + E8390_CMD); if (dev->flags&IFF_PROMISC) ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR); else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR); else ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); } /* * Called without lock held. This is invoked from user context and may * be parallel to just about everything else. Its also fairly quick and * not called too often. Must protect against both bh and irq users */ static void __ei_set_multicast_list(struct net_device *dev) { unsigned long flags; struct ei_device *ei_local = netdev_priv(dev); spin_lock_irqsave(&ei_local->page_lock, flags); do_set_multicast_list(dev); spin_unlock_irqrestore(&ei_local->page_lock, flags); } /** * ethdev_setup - init rest of 8390 device struct * @dev: network device structure to init * * Initialize the rest of the 8390 device structure. Do NOT __init * this, as it is used by 8390 based modular drivers too. */ static void ethdev_setup(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); if (ei_debug > 1) printk(version); ether_setup(dev); spin_lock_init(&ei_local->page_lock); } /** * alloc_ei_netdev - alloc_etherdev counterpart for 8390 * @size: extra bytes to allocate * * Allocate 8390-specific net_device. */ static struct net_device *____alloc_ei_netdev(int size) { return alloc_netdev(sizeof(struct ei_device) + size, "eth%d", ethdev_setup); } /* This page of functions should be 8390 generic */ /* Follow National Semi's recommendations for initializing the "NIC". */ /** * NS8390_init - initialize 8390 hardware * @dev: network device to initialize * @startp: boolean. non-zero value to initiate chip processing * * Must be called with lock held. */ static void __NS8390_init(struct net_device *dev, int startp) { unsigned long e8390_base = dev->base_addr; struct ei_device *ei_local = netdev_priv(dev); int i; int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) : 0x48; if (sizeof(struct e8390_pkt_hdr) != 4) panic("8390.c: header struct mispacked\n"); /* Follow National Semi's recommendations for initing the DP83902. */ ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */ ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */ /* Clear the remote byte count registers. */ ei_outb_p(0x00, e8390_base + EN0_RCNTLO); ei_outb_p(0x00, e8390_base + EN0_RCNTHI); /* Set to monitor and loopback mode -- this is vital!. */ ei_outb_p(E8390_RXOFF, e8390_base + EN0_RXCR); /* 0x20 */ ei_outb_p(E8390_TXOFF, e8390_base + EN0_TXCR); /* 0x02 */ /* Set the transmit page and receive ring. */ ei_outb_p(ei_local->tx_start_page, e8390_base + EN0_TPSR); ei_local->tx1 = ei_local->tx2 = 0; ei_outb_p(ei_local->rx_start_page, e8390_base + EN0_STARTPG); ei_outb_p(ei_local->stop_page-1, e8390_base + EN0_BOUNDARY); /* 3c503 says 0x3f,NS0x26*/ ei_local->current_page = ei_local->rx_start_page; /* assert boundary+1 */ ei_outb_p(ei_local->stop_page, e8390_base + EN0_STOPPG); /* Clear the pending interrupts and mask. */ ei_outb_p(0xFF, e8390_base + EN0_ISR); ei_outb_p(0x00, e8390_base + EN0_IMR); /* Copy the station address into the DS8390 registers. */ ei_outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP, e8390_base+E8390_CMD); /* 0x61 */ for (i = 0; i < 6; i++) { ei_outb_p(dev->dev_addr[i], e8390_base + EN1_PHYS_SHIFT(i)); if (ei_debug > 1 && ei_inb_p(e8390_base + EN1_PHYS_SHIFT(i)) != dev->dev_addr[i]) netdev_err(dev, "Hw. address read/write mismap %d\n", i); } ei_outb_p(ei_local->rx_start_page, e8390_base + EN1_CURPAG); ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); ei_local->tx1 = ei_local->tx2 = 0; ei_local->txing = 0; if (startp) { ei_outb_p(0xff, e8390_base + EN0_ISR); ei_outb_p(ENISR_ALL, e8390_base + EN0_IMR); ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); ei_outb_p(E8390_TXCONFIG, e8390_base + EN0_TXCR); /* xmit on. */ /* 3c503 TechMan says rxconfig only after the NIC is started. */ ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); /* rx on, */ do_set_multicast_list(dev); /* (re)load the mcast table */ } } /* Trigger a transmit start, assuming the length is valid. Always called with the page lock held */ static void NS8390_trigger_send(struct net_device *dev, unsigned int length, int start_page) { unsigned long e8390_base = dev->base_addr; struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev); ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); if (ei_inb_p(e8390_base + E8390_CMD) & E8390_TRANS) { netdev_warn(dev, "trigger_send() called with the transmitter busy\n"); return; } ei_outb_p(length & 0xff, e8390_base + EN0_TCNTLO); ei_outb_p(length >> 8, e8390_base + EN0_TCNTHI); ei_outb_p(start_page, e8390_base + EN0_TPSR); ei_outb_p(E8390_NODMA+E8390_TRANS+E8390_START, e8390_base+E8390_CMD); }
gpl-2.0
skullface1/android_kernel_samsung_i9105
drivers/video/backlight/smart_dimming_ld9042.c
39
17998
/* linux/drivers/video/samsung/smartdimming.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * Samsung Smart Dimming for OCTA * * Minwoo Kim, <minwoo7945.kim@samsung.com> * */ #include "smart_dimming_ld9042.h" #include "ld9042_volt_tbl.h" /*#define MTP_REVERSE */ #define V255_MTP_OFFSET 4 #define V255_GAMMA_OFFSET 5 #define VALUE_DIM_1000 1000 const u8 v1_offset_table[17] = { 75, 69, 63, 57, 51, 46, 41, 36, 31, 27, 23, 19, 15, 12, 9, 6, 3, }; const u8 v19_offset_table[23] = { 101, 94, 87, 80, 74, 68, 62, 56, 51, 46, 41, 36, 32, 28, 24, 20, 17, 14, 11, 8, 6, 4, 2, }; const u8 range_table_count[IV_TABLE_MAX] = { 1, 18, 24, 44, 84, 84, 1 }; const u32 table_radio[IV_TABLE_MAX] = { 0, 404, 303, 745, 390, 390, 0 }; const u32 dv_value[IV_MAX] = { 0, 19, 43, 87, 171, 255 }; const char color_name[3] = {'R', 'G', 'B'}; const u8 *offset_table[IV_TABLE_MAX] = { NULL, v1_offset_table, v19_offset_table, NULL, NULL, NULL, NULL }; const unsigned char gamma_300cd_a2[] = { 0x0C, 0xA9, 0xAF, 0xA9, 0xBC, 0x00, 0xAA, 0x0C, 0xAB, 0xAE, 0xA6, 0xBB, 0x00, 0xC8, 0x0C, 0xB5, 0xB1, 0xA7, 0xBC, 0x00, 0xCC, }; const unsigned char *gamma_300cd_list[GAMMA_300CD_MAX] = { gamma_300cd_a2, }; const unsigned char gamma_id_list[GAMMA_300CD_MAX] = { 0xa2 }; u32 calc_v1_volt(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) { u32 ret = 0; ret = volt_table_v1[gamma] >> 10; return ret; } u32 calc_v19_volt(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) { /* for CV : 65, DV :320 */ int ret = 0; u32 v1, v43; u32 ratio = 0; v1 = adjust_volt[rgb_index][AD_IV1]; v43 = adjust_volt[rgb_index][AD_IV43]; ratio = volt_table_cv_65_dv_320[gamma]; ret = (v1 << 10) - ((v1-v43)*ratio); ret = ret >> 10; return ret; } u32 calc_v43_volt(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) { /* for CV : 65, DV :320 */ int ret = 0; u32 v1, v87; u32 ratio = 0; v1 = adjust_volt[rgb_index][AD_IV1]; v87 = adjust_volt[rgb_index][AD_IV87]; ratio = volt_table_cv_65_dv_320[gamma]; ret = (v1 << 10) - ((v1-v87)*ratio); ret = ret >> 10; return ret; } u32 calc_v87_volt(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) { /* for CV : 65, DV :320 */ int ret = 0; u32 v1, v171; u32 ratio = 0; v1 = adjust_volt[rgb_index][AD_IV1]; v171 = adjust_volt[rgb_index][AD_IV171]; ratio = volt_table_cv_65_dv_320[gamma]; ret = (v1 << 10) - ((v1-v171)*ratio); ret = ret >> 10; return ret; } u32 calc_v171_volt(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) { /* for CV : 65, DV :320 */ int ret = 0; u32 v1, v255; u32 ratio = 0; v1 = adjust_volt[rgb_index][AD_IV1]; v255 = adjust_volt[rgb_index][AD_IV255]; ratio = volt_table_cv_65_dv_320[gamma]; ret = (v1 << 10) - ((v1-v255)*ratio); ret = ret >> 10; return ret; } u32 calc_v255_volt(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) { u32 ret = 0; ret = volt_table_v255[gamma] >> 10; return ret; } u8 calc_voltage_table(struct str_smart_dim *smart, const u8 *mtp) { int c, i, j; #if defined(MTP_REVERSE) int offset1 = 0; #endif int offset = 0; s16 t1, t2; s16 adjust_mtp[CI_MAX][IV_MAX]; /* u32 adjust_volt[CI_MAX][AD_IVMAX] = {0, }; */ u8 range_index; u8 table_index = 0; u32 v1, v2; u32 ratio; u32(*calc_volt[IV_MAX])(s16 gamma, int rgb_index, u32 adjust_volt[CI_MAX][AD_IVMAX]) = { calc_v1_volt, calc_v19_volt, calc_v43_volt, calc_v87_volt, calc_v171_volt, calc_v255_volt, }; u8 calc_seq[6] = {IV_1, IV_171, IV_87, IV_43, IV_19}; u8 ad_seq[6] = {AD_IV1, AD_IV171, AD_IV87, AD_IV43, AD_IV19}; memset(adjust_mtp, 0, sizeof(adjust_mtp)); for (c = CI_RED; c < CI_MAX; c++) { offset = ((c + 1) * V255_MTP_OFFSET) + (c * 2); /* printk("1 offset : %d\n", offset); */ if (mtp[offset] & 0x01) t1 = mtp[offset + 1] * -1; else t1 = mtp[offset + 1]; /* printk("2 t1 : %d\n", t1); */ offset = ((c+1) * V255_GAMMA_OFFSET) + (c * 2); t2 = smart->default_gamma[offset] << 8 | smart->default_gamma[offset+1]; /* printk("3 t2 : %d\n", t2); */ t2 += t1; /* printk("4 t2+t1 : %d\n", t2); */ smart->mtp[c][IV_255] = t1; adjust_mtp[c][IV_255] = t2; smart->adjust_volt[c][AD_IV255] = calc_volt[IV_255](t2, c, smart->adjust_volt); /* for V0 All RGB Voltage Value is Reference Voltage */ smart->adjust_volt[c][AD_IV0] = 4320; } for (c = CI_RED; c < CI_MAX; c++) { for (i = IV_1; i < IV_255; i++) { if (calc_seq[i] == IV_1) t1 = 0; else { offset = (c * 6) + (calc_seq[i]-1); /* printk("21 offset : %d\n", offset); */ if (mtp[offset] & 0x80) t1 = (mtp[offset] & 0x7f) * (-1); else t1 = (mtp[offset] & 0x7f); } /* printk("22 t1 : %d\n", t1); */ offset = (c * 7) + (calc_seq[i]); /* printk("23 offset : %d\n", offset); */ t2 = smart->default_gamma[offset]; /* printk("23 t2 : %d\n", t2); */ t2 += t1; /* printk("24 t2+t1 : %d\n", t2); */ smart->mtp[c][calc_seq[i]] = t1; adjust_mtp[c][calc_seq[i]] = t2; smart->adjust_volt[c][ad_seq[i]] = calc_volt[calc_seq[i]](t2, c, smart->adjust_volt); } } for (i = 0; i < AD_IVMAX; i++) { for (c = CI_RED; c < CI_MAX; c++) smart->ve[table_index].v[c] = smart->adjust_volt[c][i]; range_index = 0; for (j = table_index + 1; j < table_index + range_table_count[i]; j++) { for (c = CI_RED; c < CI_MAX; c++) { if (smart->t_info[i].offset_table != NULL) ratio = smart->t_info[i].offset_table[range_index] * smart->t_info[i].rv; else ratio = (range_table_count[i]-(range_index+1)) * smart->t_info[i].rv; v1 = smart->adjust_volt[c][i+1] << 15; v2 = (smart->adjust_volt[c][i] - smart->adjust_volt[c][i+1])*ratio; smart->ve[j].v[c] = ((v1+v2) >> 15); } range_index++; } table_index = j; } #if 0 printk(KERN_INFO "++++++++++++++++++++++++++++++ MTP VALUE ++++++++++++++++++++++++++++++\n"); for (i = IV_1; i < IV_MAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk(" %c : 0x%08x(%04d)", color_name[c], smart->mtp[c][i], smart->mtp[c][i]); printk("\n"); } printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++ ADJUST VALUE ++++++++++++++++++++++++++++++\n"); for (i = IV_1; i < IV_MAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk(" %c : 0x%08x(%04d)", color_name[c], adjust_mtp[c][i], adjust_mtp[c][i]); printk("\n"); } printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++ ADJUST VOLTAGE ++++++++++++++++++++++++++++++\n"); for (i = AD_IV0; i < AD_IVMAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk(" %c : %04dV", color_name[c], smart->adjust_volt[c][i]); printk("\n"); } printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++++++++++ VOLTAGE TABLE ++++++++++++++++++++++++++++++++++++++\n"); for (i = 0; i < 256; i++) { printk("Gray Level : %03d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk(" %c : %04dV", color_name[c], smart->ve[i].v[c]); printk("\n"); } #endif return 0; } int init_table_info_22(struct str_smart_dim *smart) { int i; int offset = 0; for (i = 0; i < IV_TABLE_MAX; i++) { smart->t_info[i].count = (u8)range_table_count[i]; smart->t_info[i].offset_table = offset_table[i]; smart->t_info[i].rv = table_radio[i]; offset += range_table_count[i]; } smart->flooktbl = flookup_table; smart->g300_gra_tbl = gamma_300_gra_table; smart->g22_tbl = gamma_22_table; #if 0 // jehyun temp for (i = 0; i < GAMMA_300CD_MAX; i++) { if (smart->panelid[0] == gamma_id_list[i]) break; } #else smart->panelid[0] = gamma_id_list[0]; #endif #if 0 // jehyun temp if (i >= GAMMA_300CD_MAX) { printk(KERN_ERR "[SMART DIMMING-WARNING] %s Can't found default gamma table\n", __func__); smart->default_gamma = gamma_300cd_list[GAMMA_300CD_MAX-1]; } else smart->default_gamma = gamma_300cd_list[i]; #else smart->default_gamma = gamma_300cd_list[0]; #endif #if 0 for (i = 0; i < 24; i++) printk(KERN_INFO "Index : %d : %x\n", i, smart->default_gamma[i]); #endif return 0; } int init_table_info_19(struct str_smart_dim *smart) { int i; int offset = 0; for (i = 0; i < IV_TABLE_MAX; i++) { smart->t_info[i].count = (u8)range_table_count[i]; smart->t_info[i].offset_table = offset_table[i]; smart->t_info[i].rv = table_radio[i]; offset += range_table_count[i]; } smart->flooktbl = flookup_table; smart->g300_gra_tbl = gamma_300_gra_table; smart->g19_tbl = gamma_19_table; #if 0 // jehyun temp for (i = 0; i < GAMMA_300CD_MAX; i++) { if (smart->panelid[0] == gamma_id_list[i]) break; } #else smart->panelid[0] = gamma_id_list[0]; #endif #if 0 // jehyun temp if (i >= GAMMA_300CD_MAX) { printk(KERN_ERR "[SMART DIMMING-WARNING] %s Can't found default gamma table\n", __func__); smart->default_gamma = gamma_300cd_list[GAMMA_300CD_MAX-1]; } else smart->default_gamma = gamma_300cd_list[i]; #else smart->default_gamma = gamma_300cd_list[0]; #endif #if 0 for (i = 0; i < 24; i++) printk(KERN_INFO "Index : %d : %x\n", i, smart->default_gamma[i]); #endif return 0; } u32 lookup_vtbl_idx(struct str_smart_dim *smart, u32 gamma) { u32 lookup_index; u16 table_count, table_index; u32 gap, i; u32 minimum = smart->g300_gra_tbl[255]; u32 candidate = 0; u32 offset = 0; /* printk("Input Gamma Value : %d\n", gamma); */ lookup_index = (gamma/1000) + 1; if (lookup_index > MAX_GRADATION) { printk(KERN_ERR "ERROR Wrong input value LOOKUP INDEX : %d\n", lookup_index); lookup_index = MAX_GRADATION - 1; } /* printk("lookup index : %d\n",lookup_index); */ if (smart->flooktbl[lookup_index].count) { if (smart->flooktbl[lookup_index-1].count) { table_index = smart->flooktbl[lookup_index-1].entry; table_count = smart->flooktbl[lookup_index].count + smart->flooktbl[lookup_index-1].count; } else { table_index = smart->flooktbl[lookup_index].entry; table_count = smart->flooktbl[lookup_index].count; } } else { offset += 1; while (!(smart->flooktbl[lookup_index+offset].count || smart->flooktbl[lookup_index-offset].count)) offset++; if (smart->flooktbl[lookup_index-offset].count) table_index = smart->flooktbl[lookup_index-offset].entry; else table_index = smart->flooktbl[lookup_index+offset].entry; table_count = smart->flooktbl[lookup_index+offset].count + smart->flooktbl[lookup_index-offset].count; } for (i = 0; i < table_count; i++) { if (gamma > smart->g300_gra_tbl[table_index]) gap = gamma - smart->g300_gra_tbl[table_index]; else gap = smart->g300_gra_tbl[table_index] - gamma; /* printk("gap : %d\n", gap); */ if (gap == 0) { candidate = table_index; break; } if (gap < minimum) { minimum = gap; candidate = table_index; } table_index++; } return candidate; } u32 calc_v1_reg(int ci, u32 dv[CI_MAX][IV_MAX]) { u32 ret; u32 v1; v1 = dv[ci][IV_1]; ret = (595 * 1000) - (139 * v1); ret = ret/1000; /* printk("%s v1 value : %d, ret : %d\n", __func__, v1, ret); */ return ret; } u32 calc_v19_reg(int ci, u32 dv[CI_MAX][IV_MAX]) { u32 t1, t2; u32 v1, v19, v43; u32 ret; v1 = dv[ci][IV_1]; v19 = dv[ci][IV_19]; v43 = dv[ci][IV_43]; t1 = (v1 - v19) << 10; t2 = (v1 - v43) ? (v1 - v43) : (v1) ? v1 : 1; ret = (320 * (t1/t2)) - (65 << 10); ret >>= 10; /* printk("%s v1 : %d,v19 : %d, v43 : %d ret : %d\n", __func__, v1, v19, v43, ret); */ return ret; } u32 calc_v43_reg(int ci, u32 dv[CI_MAX][IV_MAX]) { u32 t1, t2; u32 v1, v43, v87; u32 ret; v1 = dv[ci][IV_1]; v43 = dv[ci][IV_43]; v87 = dv[ci][IV_87]; t1 = (v1 - v43) << 10; t2 = (v1 - v87) ? (v1 - v87) : (v1) ? v1 : 1; ret = (320 * (t1/t2)) - (65 << 10); ret >>= 10; /* printk("%s v43 : %d, ret : %d\n", __func__, v43, ret); */ return ret; } u32 calc_v87_reg(int ci, u32 dv[CI_MAX][IV_MAX]) { u32 t1, t2; u32 v1, v87, v171; u32 ret; v1 = dv[ci][IV_1]; v87 = dv[ci][IV_87]; v171 = dv[ci][IV_171]; t1 = (v1 - v87) << 10; t2 = (v1 - v171) ? (v1 - v171) : (v1) ? v1 : 1; ret = (320 * (t1/t2)) - (65 << 10); ret >>= 10; /* printk("%s v87 : %d, ret : %d\n", __func__, v87, ret); */ return ret; } u32 calc_v171_reg(int ci, u32 dv[CI_MAX][IV_MAX]) { u32 t1, t2; u32 v1, v171, v255; u32 ret; v1 = dv[ci][IV_1]; v171 = dv[ci][IV_171]; v255 = dv[ci][IV_255]; t1 = (v1 - v171) << 10; t2 = (v1 - v255) ? (v1 - v255) : (v1) ? v1 : 1; ret = (320 * (t1/t2)) - (65 << 10); ret >>= 10; /* printk("%s v171 : %d, ret : %d\n", __func__, v171, ret); */ return ret; } u32 calc_v255_reg(int ci, u32 dv[CI_MAX][IV_MAX]) { u32 ret; u32 v255; v255 = dv[ci][IV_255]; ret = (480 * 1000) - (139 * v255); ret = ret / 1000; /* printk("%s v255 : %d, ret : %d\n",__func__, v255, ret); */ return ret; } u32 calc_gamma_table_22(struct str_smart_dim *smart, u32 gv, u8 result[]) { u32 i, c; u32 temp; u32 lidx_22; u32 dv[CI_MAX][IV_MAX]; s16 gamma_22[CI_MAX][IV_MAX]; u16 offset; u32(*calc_reg[IV_MAX])(int ci, u32 dv[CI_MAX][IV_MAX]) = { calc_v1_reg, calc_v19_reg, calc_v43_reg, calc_v87_reg, calc_v171_reg, calc_v255_reg, }; /* printk("%s was call gv : %d\n", __func__, gv);*/ memset(gamma_22, 0, sizeof(gamma_22)); #if 0 for (c = CI_RED; c < CI_MAX; c++) dv[c][0] = smart->adjust_volt[c][AD_IV1]; #endif for (c = CI_RED; c < CI_MAX; c++) dv[c][IV_1] = smart->ve[AD_IV1].v[c]; for (i = IV_19; i < IV_MAX; i++) { temp = (smart->g22_tbl[dv_value[i]] * gv)/1000; /* printk("temp : %d, g22 val : %d, gv : %d\n",temp,smart->g22_tbl[dv_value[i]], gv); */ lidx_22 = lookup_vtbl_idx(smart, temp); /* printk("look index : %d\n", lidx_22); */ for (c = CI_RED; c < CI_MAX; c++) dv[c][i] = smart->ve[lidx_22].v[c]; } for (i = IV_1; i < IV_MAX; i++) { for (c = CI_RED; c < CI_MAX; c++) gamma_22[c][i] = (u16)calc_reg[i](c, dv) - smart->mtp[c][i]; } for (c = CI_RED; c < CI_MAX; c++) { offset = ((c+1) * V255_GAMMA_OFFSET) + (c * 2); result[offset+1] = gamma_22[c][IV_255] & 0xff; result[offset] = (u8)((gamma_22[c][IV_255] >> 8) & 0xff); /* printk("%s array index is result[%d+1] V255 print gamma : %d\n", __func__, offset, result[offset+1]); */ } for (c = CI_RED; c < CI_MAX; c++) { for (i = IV_1; i < IV_255; i++) { if (i < IV_255) { offset = (c*7)+i; result[offset] = gamma_22[c][i]; /* printk("%s array index is result[%d] V1~v171 print gamma : %d\n", __func__, offset, result[offset]); */ } /* else if (i == IV_255) { offset = ((c + 1) * V255_GAMMA_OFFSET) + (c * 2); result[offset]=0; printk("%s array index is result[%d] V255 print gamma : %d\n", __func__, offset, result[offset]); } */ } } #if 0 printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++ FOUND VOLTAGE ++++++++++++++++++++++++++++++\n"); for (i = IV_1; i < IV_MAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk("%c : %04dV", color_name[c], dv[c][i]); printk("\n"); } printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++ FOUND REG ++++++++++++++++++++++++++++++\n"); for (i = IV_1; i < IV_MAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk("2.2Gamma %c : %3d, 0x%2x", color_name[c], gamma_22[c][i], gamma_22[c][i]); printk("\n"); } #endif return 0; } u32 calc_gamma_table_19(struct str_smart_dim *smart, u32 gv, u8 result[]) { u32 i, c; u32 temp; u32 lidx_19; u32 dv[CI_MAX][IV_MAX]; s16 gamma_19[CI_MAX][IV_MAX]; u16 offset; u32(*calc_reg[IV_MAX])(int ci, u32 dv[CI_MAX][IV_MAX]) = { calc_v1_reg, calc_v19_reg, calc_v43_reg, calc_v87_reg, calc_v171_reg, calc_v255_reg, }; /* printk("%s was call gv : %d\n", __func__, gv);*/ memset(gamma_19, 0, sizeof(gamma_19)); #if 0 for (c = CI_RED; c < CI_MAX; c++) dv[c][0] = smart->adjust_volt[c][AD_IV1]; #endif for (c = CI_RED; c < CI_MAX; c++) dv[c][IV_1] = smart->ve[AD_IV1].v[c]; for (i = IV_19; i < IV_MAX; i++) { temp = (smart->g19_tbl[dv_value[i]] * gv)/1000; /* printk("temp : %d, g19 val : %d, gv : %d\n",temp,smart->g19_tbl[dv_value[i]], gv); */ lidx_19 = lookup_vtbl_idx(smart, temp); /* printk("look index : %d\n",lidx_19); */ for (c = CI_RED; c < CI_MAX; c++) dv[c][i] = smart->ve[lidx_19].v[c]; } /* for IV1 does not calculate value */ /* just use default gamma value (IV1) */ #if 0 for (c = CI_RED; c < CI_MAX; c++) gamma[c][IV_1] = smart->default_gamma[c]; #endif for (i = IV_1; i < IV_MAX; i++) { for (c = CI_RED; c < CI_MAX; c++) gamma_19[c][i] = (u16)calc_reg[i](c, dv) - smart->mtp[c][i]; } for (c = CI_RED; c < CI_MAX; c++) { offset = ((c+1) * V255_GAMMA_OFFSET) + (c * 2); result[offset+1] = gamma_19[c][IV_255] & 0xff; result[offset] = (u8)((gamma_19[c][IV_255] >> 8) & 0xff); /* printk("%s array index is result[%d+1] V255 print gamma : %d\n", __func__, offset, result[offset+1]); */ } for (c = CI_RED; c < CI_MAX; c++) { for (i = IV_1; i < IV_255; i++) { if (i < IV_255) { offset = (c*7)+i; result[offset] = gamma_19[c][i]; /* printk("%s array index is result[%d] V1~v171 print gamma : %d\n", __func__, offset, result[offset]); */ } /* else if (i == IV_255) { offset = ((c + 1) * V255_GAMMA_OFFSET) + (c * 2); result[offset]=0; printk("%s array index is result[%d] V255 print gamma : %d\n", __func__, offset, result[offset]); } */ } } #if 0 printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++ FOUND VOLTAGE ++++++++++++++++++++++++++++++\n"); for (i = IV_1; i < IV_MAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk("%c : %04dV", color_name[c], dv[c][i]); printk("\n"); } printk(KERN_INFO "\n\n++++++++++++++++++++++++++++++ FOUND REG ++++++++++++++++++++++++++++++\n"); for (i = IV_1; i < IV_MAX; i++) { printk("V Level : %d - ", i); for (c = CI_RED; c < CI_MAX; c++) printk("1.9Gamma %c : %3d, 0x%2x", color_name[c], gamma_19[c][i], gamma_19[c][i]); printk("\n"); } #endif return 0; }
gpl-2.0
gazoo74/linux
drivers/soundwire/bus_type.c
39
4373
// SPDX-License-Identifier: GPL-2.0 // Copyright(c) 2015-17 Intel Corporation. #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/pm_domain.h> #include <linux/soundwire/sdw.h> #include <linux/soundwire/sdw_type.h> /** * sdw_get_device_id - find the matching SoundWire device id * @slave: SoundWire Slave Device * @drv: SoundWire Slave Driver * * The match is done by comparing the mfg_id and part_id from the * struct sdw_device_id. */ static const struct sdw_device_id * sdw_get_device_id(struct sdw_slave *slave, struct sdw_driver *drv) { const struct sdw_device_id *id = drv->id_table; while (id && id->mfg_id) { if (slave->id.mfg_id == id->mfg_id && slave->id.part_id == id->part_id) return id; id++; } return NULL; } static int sdw_bus_match(struct device *dev, struct device_driver *ddrv) { struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_driver *drv = drv_to_sdw_driver(ddrv); return !!sdw_get_device_id(slave, drv); } int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size) { /* modalias is sdw:m<mfg_id>p<part_id> */ return snprintf(buf, size, "sdw:m%04Xp%04X\n", slave->id.mfg_id, slave->id.part_id); } static int sdw_uevent(struct device *dev, struct kobj_uevent_env *env) { struct sdw_slave *slave = dev_to_sdw_dev(dev); char modalias[32]; sdw_slave_modalias(slave, modalias, sizeof(modalias)); if (add_uevent_var(env, "MODALIAS=%s", modalias)) return -ENOMEM; return 0; } struct bus_type sdw_bus_type = { .name = "soundwire", .match = sdw_bus_match, .uevent = sdw_uevent, }; EXPORT_SYMBOL_GPL(sdw_bus_type); static int sdw_drv_probe(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); const struct sdw_device_id *id; int ret; id = sdw_get_device_id(slave, drv); if (!id) return -ENODEV; slave->ops = drv->ops; /* * attach to power domain but don't turn on (last arg) */ ret = dev_pm_domain_attach(dev, false); if (ret) return ret; ret = drv->probe(slave, id); if (ret) { dev_err(dev, "Probe of %s failed: %d\n", drv->name, ret); dev_pm_domain_detach(dev, false); return ret; } /* device is probed so let's read the properties now */ if (slave->ops && slave->ops->read_prop) slave->ops->read_prop(slave); /* * Check for valid clk_stop_timeout, use DisCo worst case value of * 300ms * * TODO: check the timeouts and driver removal case */ if (slave->prop.clk_stop_timeout == 0) slave->prop.clk_stop_timeout = 300; slave->bus->clk_stop_timeout = max_t(u32, slave->bus->clk_stop_timeout, slave->prop.clk_stop_timeout); return 0; } static int sdw_drv_remove(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); int ret = 0; if (drv->remove) ret = drv->remove(slave); dev_pm_domain_detach(dev, false); return ret; } static void sdw_drv_shutdown(struct device *dev) { struct sdw_slave *slave = dev_to_sdw_dev(dev); struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); if (drv->shutdown) drv->shutdown(slave); } /** * __sdw_register_driver() - register a SoundWire Slave driver * @drv: driver to register * @owner: owning module/driver * * Return: zero on success, else a negative error code. */ int __sdw_register_driver(struct sdw_driver *drv, struct module *owner) { drv->driver.bus = &sdw_bus_type; if (!drv->probe) { pr_err("driver %s didn't provide SDW probe routine\n", drv->name); return -EINVAL; } drv->driver.owner = owner; drv->driver.probe = sdw_drv_probe; if (drv->remove) drv->driver.remove = sdw_drv_remove; if (drv->shutdown) drv->driver.shutdown = sdw_drv_shutdown; return driver_register(&drv->driver); } EXPORT_SYMBOL_GPL(__sdw_register_driver); /** * sdw_unregister_driver() - unregisters the SoundWire Slave driver * @drv: driver to unregister */ void sdw_unregister_driver(struct sdw_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL_GPL(sdw_unregister_driver); static int __init sdw_bus_init(void) { return bus_register(&sdw_bus_type); } static void __exit sdw_bus_exit(void) { bus_unregister(&sdw_bus_type); } postcore_initcall(sdw_bus_init); module_exit(sdw_bus_exit); MODULE_DESCRIPTION("SoundWire bus"); MODULE_LICENSE("GPL v2");
gpl-2.0
BPI-SINOVOIP/BPI-Mainline-kernel
linux-5.4/drivers/media/platform/rcar-vin/rcar-csi2.c
39
30403
// SPDX-License-Identifier: GPL-2.0 /* * Driver for Renesas R-Car MIPI CSI-2 Receiver * * Copyright (C) 2018 Renesas Electronics Corp. */ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/sys_soc.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> #include <media/v4l2-mc.h> #include <media/v4l2-subdev.h> struct rcar_csi2; /* Register offsets and bits */ /* Control Timing Select */ #define TREF_REG 0x00 #define TREF_TREF BIT(0) /* Software Reset */ #define SRST_REG 0x04 #define SRST_SRST BIT(0) /* PHY Operation Control */ #define PHYCNT_REG 0x08 #define PHYCNT_SHUTDOWNZ BIT(17) #define PHYCNT_RSTZ BIT(16) #define PHYCNT_ENABLECLK BIT(4) #define PHYCNT_ENABLE_3 BIT(3) #define PHYCNT_ENABLE_2 BIT(2) #define PHYCNT_ENABLE_1 BIT(1) #define PHYCNT_ENABLE_0 BIT(0) /* Checksum Control */ #define CHKSUM_REG 0x0c #define CHKSUM_ECC_EN BIT(1) #define CHKSUM_CRC_EN BIT(0) /* * Channel Data Type Select * VCDT[0-15]: Channel 1 VCDT[16-31]: Channel 2 * VCDT2[0-15]: Channel 3 VCDT2[16-31]: Channel 4 */ #define VCDT_REG 0x10 #define VCDT2_REG 0x14 #define VCDT_VCDTN_EN BIT(15) #define VCDT_SEL_VC(n) (((n) & 0x3) << 8) #define VCDT_SEL_DTN_ON BIT(6) #define VCDT_SEL_DT(n) (((n) & 0x3f) << 0) /* Frame Data Type Select */ #define FRDT_REG 0x18 /* Field Detection Control */ #define FLD_REG 0x1c #define FLD_FLD_NUM(n) (((n) & 0xff) << 16) #define FLD_DET_SEL(n) (((n) & 0x3) << 4) #define FLD_FLD_EN4 BIT(3) #define FLD_FLD_EN3 BIT(2) #define FLD_FLD_EN2 BIT(1) #define FLD_FLD_EN BIT(0) /* Automatic Standby Control */ #define ASTBY_REG 0x20 /* Long Data Type Setting 0 */ #define LNGDT0_REG 0x28 /* Long Data Type Setting 1 */ #define LNGDT1_REG 0x2c /* Interrupt Enable */ #define INTEN_REG 0x30 #define INTEN_INT_AFIFO_OF BIT(27) #define INTEN_INT_ERRSOTHS BIT(4) #define INTEN_INT_ERRSOTSYNCHS BIT(3) /* Interrupt Source Mask */ #define INTCLOSE_REG 0x34 /* Interrupt Status Monitor */ #define INTSTATE_REG 0x38 #define INTSTATE_INT_ULPS_START BIT(7) #define INTSTATE_INT_ULPS_END BIT(6) /* Interrupt Error Status Monitor */ #define INTERRSTATE_REG 0x3c /* Short Packet Data */ #define SHPDAT_REG 0x40 /* Short Packet Count */ #define SHPCNT_REG 0x44 /* LINK Operation Control */ #define LINKCNT_REG 0x48 #define LINKCNT_MONITOR_EN BIT(31) #define LINKCNT_REG_MONI_PACT_EN BIT(25) #define LINKCNT_ICLK_NONSTOP BIT(24) /* Lane Swap */ #define LSWAP_REG 0x4c #define LSWAP_L3SEL(n) (((n) & 0x3) << 6) #define LSWAP_L2SEL(n) (((n) & 0x3) << 4) #define LSWAP_L1SEL(n) (((n) & 0x3) << 2) #define LSWAP_L0SEL(n) (((n) & 0x3) << 0) /* PHY Test Interface Write Register */ #define PHTW_REG 0x50 #define PHTW_DWEN BIT(24) #define PHTW_TESTDIN_DATA(n) (((n & 0xff)) << 16) #define PHTW_CWEN BIT(8) #define PHTW_TESTDIN_CODE(n) ((n & 0xff)) struct phtw_value { u16 data; u16 code; }; struct rcsi2_mbps_reg { u16 mbps; u16 reg; }; static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = { { .mbps = 80, .reg = 0x86 }, { .mbps = 90, .reg = 0x86 }, { .mbps = 100, .reg = 0x87 }, { .mbps = 110, .reg = 0x87 }, { .mbps = 120, .reg = 0x88 }, { .mbps = 130, .reg = 0x88 }, { .mbps = 140, .reg = 0x89 }, { .mbps = 150, .reg = 0x89 }, { .mbps = 160, .reg = 0x8a }, { .mbps = 170, .reg = 0x8a }, { .mbps = 180, .reg = 0x8b }, { .mbps = 190, .reg = 0x8b }, { .mbps = 205, .reg = 0x8c }, { .mbps = 220, .reg = 0x8d }, { .mbps = 235, .reg = 0x8e }, { .mbps = 250, .reg = 0x8e }, { /* sentinel */ }, }; static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x20 }, { .mbps = 100, .reg = 0x40 }, { .mbps = 110, .reg = 0x02 }, { .mbps = 130, .reg = 0x22 }, { .mbps = 140, .reg = 0x42 }, { .mbps = 150, .reg = 0x04 }, { .mbps = 170, .reg = 0x24 }, { .mbps = 180, .reg = 0x44 }, { .mbps = 200, .reg = 0x06 }, { .mbps = 220, .reg = 0x26 }, { .mbps = 240, .reg = 0x46 }, { .mbps = 250, .reg = 0x08 }, { .mbps = 270, .reg = 0x28 }, { .mbps = 300, .reg = 0x0a }, { .mbps = 330, .reg = 0x2a }, { .mbps = 360, .reg = 0x4a }, { .mbps = 400, .reg = 0x0c }, { .mbps = 450, .reg = 0x2c }, { .mbps = 500, .reg = 0x0e }, { .mbps = 550, .reg = 0x2e }, { .mbps = 600, .reg = 0x10 }, { .mbps = 650, .reg = 0x30 }, { .mbps = 700, .reg = 0x12 }, { .mbps = 750, .reg = 0x32 }, { .mbps = 800, .reg = 0x52 }, { .mbps = 850, .reg = 0x72 }, { .mbps = 900, .reg = 0x14 }, { .mbps = 950, .reg = 0x34 }, { .mbps = 1000, .reg = 0x54 }, { .mbps = 1050, .reg = 0x74 }, { .mbps = 1125, .reg = 0x16 }, { /* sentinel */ }, }; /* PHY Test Interface Clear */ #define PHTC_REG 0x58 #define PHTC_TESTCLR BIT(0) /* PHY Frequency Control */ #define PHYPLL_REG 0x68 #define PHYPLL_HSFREQRANGE(n) ((n) << 16) static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x10 }, { .mbps = 100, .reg = 0x20 }, { .mbps = 110, .reg = 0x30 }, { .mbps = 120, .reg = 0x01 }, { .mbps = 130, .reg = 0x11 }, { .mbps = 140, .reg = 0x21 }, { .mbps = 150, .reg = 0x31 }, { .mbps = 160, .reg = 0x02 }, { .mbps = 170, .reg = 0x12 }, { .mbps = 180, .reg = 0x22 }, { .mbps = 190, .reg = 0x32 }, { .mbps = 205, .reg = 0x03 }, { .mbps = 220, .reg = 0x13 }, { .mbps = 235, .reg = 0x23 }, { .mbps = 250, .reg = 0x33 }, { .mbps = 275, .reg = 0x04 }, { .mbps = 300, .reg = 0x14 }, { .mbps = 325, .reg = 0x25 }, { .mbps = 350, .reg = 0x35 }, { .mbps = 400, .reg = 0x05 }, { .mbps = 450, .reg = 0x16 }, { .mbps = 500, .reg = 0x26 }, { .mbps = 550, .reg = 0x37 }, { .mbps = 600, .reg = 0x07 }, { .mbps = 650, .reg = 0x18 }, { .mbps = 700, .reg = 0x28 }, { .mbps = 750, .reg = 0x39 }, { .mbps = 800, .reg = 0x09 }, { .mbps = 850, .reg = 0x19 }, { .mbps = 900, .reg = 0x29 }, { .mbps = 950, .reg = 0x3a }, { .mbps = 1000, .reg = 0x0a }, { .mbps = 1050, .reg = 0x1a }, { .mbps = 1100, .reg = 0x2a }, { .mbps = 1150, .reg = 0x3b }, { .mbps = 1200, .reg = 0x0b }, { .mbps = 1250, .reg = 0x1b }, { .mbps = 1300, .reg = 0x2b }, { .mbps = 1350, .reg = 0x3c }, { .mbps = 1400, .reg = 0x0c }, { .mbps = 1450, .reg = 0x1c }, { .mbps = 1500, .reg = 0x2c }, { /* sentinel */ }, }; static const struct rcsi2_mbps_reg hsfreqrange_m3w_h3es1[] = { { .mbps = 80, .reg = 0x00 }, { .mbps = 90, .reg = 0x10 }, { .mbps = 100, .reg = 0x20 }, { .mbps = 110, .reg = 0x30 }, { .mbps = 120, .reg = 0x01 }, { .mbps = 130, .reg = 0x11 }, { .mbps = 140, .reg = 0x21 }, { .mbps = 150, .reg = 0x31 }, { .mbps = 160, .reg = 0x02 }, { .mbps = 170, .reg = 0x12 }, { .mbps = 180, .reg = 0x22 }, { .mbps = 190, .reg = 0x32 }, { .mbps = 205, .reg = 0x03 }, { .mbps = 220, .reg = 0x13 }, { .mbps = 235, .reg = 0x23 }, { .mbps = 250, .reg = 0x33 }, { .mbps = 275, .reg = 0x04 }, { .mbps = 300, .reg = 0x14 }, { .mbps = 325, .reg = 0x05 }, { .mbps = 350, .reg = 0x15 }, { .mbps = 400, .reg = 0x25 }, { .mbps = 450, .reg = 0x06 }, { .mbps = 500, .reg = 0x16 }, { .mbps = 550, .reg = 0x07 }, { .mbps = 600, .reg = 0x17 }, { .mbps = 650, .reg = 0x08 }, { .mbps = 700, .reg = 0x18 }, { .mbps = 750, .reg = 0x09 }, { .mbps = 800, .reg = 0x19 }, { .mbps = 850, .reg = 0x29 }, { .mbps = 900, .reg = 0x39 }, { .mbps = 950, .reg = 0x0a }, { .mbps = 1000, .reg = 0x1a }, { .mbps = 1050, .reg = 0x2a }, { .mbps = 1100, .reg = 0x3a }, { .mbps = 1150, .reg = 0x0b }, { .mbps = 1200, .reg = 0x1b }, { .mbps = 1250, .reg = 0x2b }, { .mbps = 1300, .reg = 0x3b }, { .mbps = 1350, .reg = 0x0c }, { .mbps = 1400, .reg = 0x1c }, { .mbps = 1450, .reg = 0x2c }, { .mbps = 1500, .reg = 0x3c }, { /* sentinel */ }, }; /* PHY ESC Error Monitor */ #define PHEERM_REG 0x74 /* PHY Clock Lane Monitor */ #define PHCLM_REG 0x78 #define PHCLM_STOPSTATECKL BIT(0) /* PHY Data Lane Monitor */ #define PHDLM_REG 0x7c /* CSI0CLK Frequency Configuration Preset Register */ #define CSI0CLKFCPR_REG 0x260 #define CSI0CLKFREQRANGE(n) ((n & 0x3f) << 16) struct rcar_csi2_format { u32 code; unsigned int datatype; unsigned int bpp; }; static const struct rcar_csi2_format rcar_csi2_formats[] = { { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .bpp = 24 }, { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .bpp = 16 }, { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .bpp = 16 }, { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .bpp = 16 }, { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .bpp = 20 }, }; static const struct rcar_csi2_format *rcsi2_code_to_fmt(unsigned int code) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rcar_csi2_formats); i++) if (rcar_csi2_formats[i].code == code) return &rcar_csi2_formats[i]; return NULL; } enum rcar_csi2_pads { RCAR_CSI2_SINK, RCAR_CSI2_SOURCE_VC0, RCAR_CSI2_SOURCE_VC1, RCAR_CSI2_SOURCE_VC2, RCAR_CSI2_SOURCE_VC3, NR_OF_RCAR_CSI2_PAD, }; struct rcar_csi2_info { int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps); int (*confirm_start)(struct rcar_csi2 *priv); const struct rcsi2_mbps_reg *hsfreqrange; unsigned int csi0clkfreqrange; unsigned int num_channels; bool clear_ulps; }; struct rcar_csi2 { struct device *dev; void __iomem *base; const struct rcar_csi2_info *info; struct reset_control *rstc; struct v4l2_subdev subdev; struct media_pad pads[NR_OF_RCAR_CSI2_PAD]; struct v4l2_async_notifier notifier; struct v4l2_async_subdev asd; struct v4l2_subdev *remote; struct v4l2_mbus_framefmt mf; struct mutex lock; int stream_count; unsigned short lanes; unsigned char lane_swap[4]; }; static inline struct rcar_csi2 *sd_to_csi2(struct v4l2_subdev *sd) { return container_of(sd, struct rcar_csi2, subdev); } static inline struct rcar_csi2 *notifier_to_csi2(struct v4l2_async_notifier *n) { return container_of(n, struct rcar_csi2, notifier); } static u32 rcsi2_read(struct rcar_csi2 *priv, unsigned int reg) { return ioread32(priv->base + reg); } static void rcsi2_write(struct rcar_csi2 *priv, unsigned int reg, u32 data) { iowrite32(data, priv->base + reg); } static void rcsi2_enter_standby(struct rcar_csi2 *priv) { rcsi2_write(priv, PHYCNT_REG, 0); rcsi2_write(priv, PHTC_REG, PHTC_TESTCLR); reset_control_assert(priv->rstc); usleep_range(100, 150); pm_runtime_put(priv->dev); } static void rcsi2_exit_standby(struct rcar_csi2 *priv) { pm_runtime_get_sync(priv->dev); reset_control_deassert(priv->rstc); } static int rcsi2_wait_phy_start(struct rcar_csi2 *priv) { unsigned int timeout; /* Wait for the clock and data lanes to enter LP-11 state. */ for (timeout = 0; timeout <= 20; timeout++) { const u32 lane_mask = (1 << priv->lanes) - 1; if ((rcsi2_read(priv, PHCLM_REG) & PHCLM_STOPSTATECKL) && (rcsi2_read(priv, PHDLM_REG) & lane_mask) == lane_mask) return 0; usleep_range(1000, 2000); } dev_err(priv->dev, "Timeout waiting for LP-11 state\n"); return -ETIMEDOUT; } static int rcsi2_set_phypll(struct rcar_csi2 *priv, unsigned int mbps) { const struct rcsi2_mbps_reg *hsfreq; for (hsfreq = priv->info->hsfreqrange; hsfreq->mbps != 0; hsfreq++) if (hsfreq->mbps >= mbps) break; if (!hsfreq->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); return -ERANGE; } rcsi2_write(priv, PHYPLL_REG, PHYPLL_HSFREQRANGE(hsfreq->reg)); return 0; } static int rcsi2_calc_mbps(struct rcar_csi2 *priv, unsigned int bpp) { struct v4l2_subdev *source; struct v4l2_ctrl *ctrl; u64 mbps; if (!priv->remote) return -ENODEV; source = priv->remote; /* Read the pixel rate control from remote. */ ctrl = v4l2_ctrl_find(source->ctrl_handler, V4L2_CID_PIXEL_RATE); if (!ctrl) { dev_err(priv->dev, "no pixel rate control in subdev %s\n", source->name); return -EINVAL; } /* * Calculate the phypll in mbps. * link_freq = (pixel_rate * bits_per_sample) / (2 * nr_of_lanes) * bps = link_freq * 2 */ mbps = v4l2_ctrl_g_ctrl_int64(ctrl) * bpp; do_div(mbps, priv->lanes * 1000000); return mbps; } static int rcsi2_start_receiver(struct rcar_csi2 *priv) { const struct rcar_csi2_format *format; u32 phycnt, vcdt = 0, vcdt2 = 0, fld = 0; unsigned int i; int mbps, ret; dev_dbg(priv->dev, "Input size (%ux%u%c)\n", priv->mf.width, priv->mf.height, priv->mf.field == V4L2_FIELD_NONE ? 'p' : 'i'); /* Code is validated in set_fmt. */ format = rcsi2_code_to_fmt(priv->mf.code); /* * Enable all supported CSI-2 channels with virtual channel and * data type matching. * * NOTE: It's not possible to get individual datatype for each * source virtual channel. Once this is possible in V4L2 * it should be used here. */ for (i = 0; i < priv->info->num_channels; i++) { u32 vcdt_part; vcdt_part = VCDT_SEL_VC(i) | VCDT_VCDTN_EN | VCDT_SEL_DTN_ON | VCDT_SEL_DT(format->datatype); /* Store in correct reg and offset. */ if (i < 2) vcdt |= vcdt_part << ((i % 2) * 16); else vcdt2 |= vcdt_part << ((i % 2) * 16); } if (priv->mf.field == V4L2_FIELD_ALTERNATE) { fld = FLD_DET_SEL(1) | FLD_FLD_EN4 | FLD_FLD_EN3 | FLD_FLD_EN2 | FLD_FLD_EN; if (priv->mf.height == 240) fld |= FLD_FLD_NUM(0); else fld |= FLD_FLD_NUM(1); } phycnt = PHYCNT_ENABLECLK; phycnt |= (1 << priv->lanes) - 1; mbps = rcsi2_calc_mbps(priv, format->bpp); if (mbps < 0) return mbps; /* Enable interrupts. */ rcsi2_write(priv, INTEN_REG, INTEN_INT_AFIFO_OF | INTEN_INT_ERRSOTHS | INTEN_INT_ERRSOTSYNCHS); /* Init */ rcsi2_write(priv, TREF_REG, TREF_TREF); rcsi2_write(priv, PHTC_REG, 0); /* Configure */ rcsi2_write(priv, VCDT_REG, vcdt); if (vcdt2) rcsi2_write(priv, VCDT2_REG, vcdt2); /* Lanes are zero indexed. */ rcsi2_write(priv, LSWAP_REG, LSWAP_L0SEL(priv->lane_swap[0] - 1) | LSWAP_L1SEL(priv->lane_swap[1] - 1) | LSWAP_L2SEL(priv->lane_swap[2] - 1) | LSWAP_L3SEL(priv->lane_swap[3] - 1)); /* Start */ if (priv->info->init_phtw) { ret = priv->info->init_phtw(priv, mbps); if (ret) return ret; } if (priv->info->hsfreqrange) { ret = rcsi2_set_phypll(priv, mbps); if (ret) return ret; } if (priv->info->csi0clkfreqrange) rcsi2_write(priv, CSI0CLKFCPR_REG, CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange)); rcsi2_write(priv, PHYCNT_REG, phycnt); rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN | LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP); rcsi2_write(priv, FLD_REG, fld); rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ); rcsi2_write(priv, PHYCNT_REG, phycnt | PHYCNT_SHUTDOWNZ | PHYCNT_RSTZ); ret = rcsi2_wait_phy_start(priv); if (ret) return ret; /* Confirm start */ if (priv->info->confirm_start) { ret = priv->info->confirm_start(priv); if (ret) return ret; } /* Clear Ultra Low Power interrupt. */ if (priv->info->clear_ulps) rcsi2_write(priv, INTSTATE_REG, INTSTATE_INT_ULPS_START | INTSTATE_INT_ULPS_END); return 0; } static int rcsi2_start(struct rcar_csi2 *priv) { int ret; rcsi2_exit_standby(priv); ret = rcsi2_start_receiver(priv); if (ret) { rcsi2_enter_standby(priv); return ret; } ret = v4l2_subdev_call(priv->remote, video, s_stream, 1); if (ret) { rcsi2_enter_standby(priv); return ret; } return 0; } static void rcsi2_stop(struct rcar_csi2 *priv) { rcsi2_enter_standby(priv); v4l2_subdev_call(priv->remote, video, s_stream, 0); } static int rcsi2_s_stream(struct v4l2_subdev *sd, int enable) { struct rcar_csi2 *priv = sd_to_csi2(sd); int ret = 0; mutex_lock(&priv->lock); if (!priv->remote) { ret = -ENODEV; goto out; } if (enable && priv->stream_count == 0) { ret = rcsi2_start(priv); if (ret) goto out; } else if (!enable && priv->stream_count == 1) { rcsi2_stop(priv); } priv->stream_count += enable ? 1 : -1; out: mutex_unlock(&priv->lock); return ret; } static int rcsi2_set_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) { struct rcar_csi2 *priv = sd_to_csi2(sd); struct v4l2_mbus_framefmt *framefmt; if (!rcsi2_code_to_fmt(format->format.code)) format->format.code = rcar_csi2_formats[0].code; if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) { priv->mf = format->format; } else { framefmt = v4l2_subdev_get_try_format(sd, cfg, 0); *framefmt = format->format; } return 0; } static int rcsi2_get_pad_format(struct v4l2_subdev *sd, struct v4l2_subdev_pad_config *cfg, struct v4l2_subdev_format *format) { struct rcar_csi2 *priv = sd_to_csi2(sd); if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) format->format = priv->mf; else format->format = *v4l2_subdev_get_try_format(sd, cfg, 0); return 0; } static const struct v4l2_subdev_video_ops rcar_csi2_video_ops = { .s_stream = rcsi2_s_stream, }; static const struct v4l2_subdev_pad_ops rcar_csi2_pad_ops = { .set_fmt = rcsi2_set_pad_format, .get_fmt = rcsi2_get_pad_format, }; static const struct v4l2_subdev_ops rcar_csi2_subdev_ops = { .video = &rcar_csi2_video_ops, .pad = &rcar_csi2_pad_ops, }; static irqreturn_t rcsi2_irq(int irq, void *data) { struct rcar_csi2 *priv = data; u32 status, err_status; status = rcsi2_read(priv, INTSTATE_REG); err_status = rcsi2_read(priv, INTERRSTATE_REG); if (!status) return IRQ_HANDLED; rcsi2_write(priv, INTSTATE_REG, status); if (!err_status) return IRQ_HANDLED; rcsi2_write(priv, INTERRSTATE_REG, err_status); dev_info(priv->dev, "Transfer error, restarting CSI-2 receiver\n"); return IRQ_WAKE_THREAD; } static irqreturn_t rcsi2_irq_thread(int irq, void *data) { struct rcar_csi2 *priv = data; mutex_lock(&priv->lock); rcsi2_stop(priv); usleep_range(1000, 2000); if (rcsi2_start(priv)) dev_warn(priv->dev, "Failed to restart CSI-2 receiver\n"); mutex_unlock(&priv->lock); return IRQ_HANDLED; } /* ----------------------------------------------------------------------------- * Async handling and registration of subdevices and links. */ static int rcsi2_notify_bound(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) { struct rcar_csi2 *priv = notifier_to_csi2(notifier); int pad; pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode, MEDIA_PAD_FL_SOURCE); if (pad < 0) { dev_err(priv->dev, "Failed to find pad for %s\n", subdev->name); return pad; } priv->remote = subdev; dev_dbg(priv->dev, "Bound %s pad: %d\n", subdev->name, pad); return media_create_pad_link(&subdev->entity, pad, &priv->subdev.entity, 0, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); } static void rcsi2_notify_unbind(struct v4l2_async_notifier *notifier, struct v4l2_subdev *subdev, struct v4l2_async_subdev *asd) { struct rcar_csi2 *priv = notifier_to_csi2(notifier); priv->remote = NULL; dev_dbg(priv->dev, "Unbind %s\n", subdev->name); } static const struct v4l2_async_notifier_operations rcar_csi2_notify_ops = { .bound = rcsi2_notify_bound, .unbind = rcsi2_notify_unbind, }; static int rcsi2_parse_v4l2(struct rcar_csi2 *priv, struct v4l2_fwnode_endpoint *vep) { unsigned int i; /* Only port 0 endpoint 0 is valid. */ if (vep->base.port || vep->base.id) return -ENOTCONN; if (vep->bus_type != V4L2_MBUS_CSI2_DPHY) { dev_err(priv->dev, "Unsupported bus: %u\n", vep->bus_type); return -EINVAL; } priv->lanes = vep->bus.mipi_csi2.num_data_lanes; if (priv->lanes != 1 && priv->lanes != 2 && priv->lanes != 4) { dev_err(priv->dev, "Unsupported number of data-lanes: %u\n", priv->lanes); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(priv->lane_swap); i++) { priv->lane_swap[i] = i < priv->lanes ? vep->bus.mipi_csi2.data_lanes[i] : i; /* Check for valid lane number. */ if (priv->lane_swap[i] < 1 || priv->lane_swap[i] > 4) { dev_err(priv->dev, "data-lanes must be in 1-4 range\n"); return -EINVAL; } } return 0; } static int rcsi2_parse_dt(struct rcar_csi2 *priv) { struct device_node *ep; struct v4l2_fwnode_endpoint v4l2_ep = { .bus_type = 0 }; int ret; ep = of_graph_get_endpoint_by_regs(priv->dev->of_node, 0, 0); if (!ep) { dev_err(priv->dev, "Not connected to subdevice\n"); return -EINVAL; } ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &v4l2_ep); if (ret) { dev_err(priv->dev, "Could not parse v4l2 endpoint\n"); of_node_put(ep); return -EINVAL; } ret = rcsi2_parse_v4l2(priv, &v4l2_ep); if (ret) { of_node_put(ep); return ret; } priv->asd.match.fwnode = fwnode_graph_get_remote_endpoint(of_fwnode_handle(ep)); priv->asd.match_type = V4L2_ASYNC_MATCH_FWNODE; of_node_put(ep); v4l2_async_notifier_init(&priv->notifier); ret = v4l2_async_notifier_add_subdev(&priv->notifier, &priv->asd); if (ret) { fwnode_handle_put(priv->asd.match.fwnode); return ret; } priv->notifier.ops = &rcar_csi2_notify_ops; dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(priv->asd.match.fwnode)); ret = v4l2_async_subdev_notifier_register(&priv->subdev, &priv->notifier); if (ret) v4l2_async_notifier_cleanup(&priv->notifier); return ret; } /* ----------------------------------------------------------------------------- * PHTW initialization sequences. * * NOTE: Magic values are from the datasheet and lack documentation. */ static int rcsi2_phtw_write(struct rcar_csi2 *priv, u16 data, u16 code) { unsigned int timeout; rcsi2_write(priv, PHTW_REG, PHTW_DWEN | PHTW_TESTDIN_DATA(data) | PHTW_CWEN | PHTW_TESTDIN_CODE(code)); /* Wait for DWEN and CWEN to be cleared by hardware. */ for (timeout = 0; timeout <= 20; timeout++) { if (!(rcsi2_read(priv, PHTW_REG) & (PHTW_DWEN | PHTW_CWEN))) return 0; usleep_range(1000, 2000); } dev_err(priv->dev, "Timeout waiting for PHTW_DWEN and/or PHTW_CWEN\n"); return -ETIMEDOUT; } static int rcsi2_phtw_write_array(struct rcar_csi2 *priv, const struct phtw_value *values) { const struct phtw_value *value; int ret; for (value = values; value->data || value->code; value++) { ret = rcsi2_phtw_write(priv, value->data, value->code); if (ret) return ret; } return 0; } static int rcsi2_phtw_write_mbps(struct rcar_csi2 *priv, unsigned int mbps, const struct rcsi2_mbps_reg *values, u16 code) { const struct rcsi2_mbps_reg *value; for (value = values; value->mbps; value++) if (value->mbps >= mbps) break; if (!value->mbps) { dev_err(priv->dev, "Unsupported PHY speed (%u Mbps)", mbps); return -ERANGE; } return rcsi2_phtw_write(priv, value->reg, code); } static int __rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps) { static const struct phtw_value step1[] = { { .data = 0xcc, .code = 0xe2 }, { .data = 0x01, .code = 0xe3 }, { .data = 0x11, .code = 0xe4 }, { .data = 0x01, .code = 0xe5 }, { .data = 0x10, .code = 0x04 }, { /* sentinel */ }, }; static const struct phtw_value step2[] = { { .data = 0x38, .code = 0x08 }, { .data = 0x01, .code = 0x00 }, { .data = 0x4b, .code = 0xac }, { .data = 0x03, .code = 0x00 }, { .data = 0x80, .code = 0x07 }, { /* sentinel */ }, }; int ret; ret = rcsi2_phtw_write_array(priv, step1); if (ret) return ret; if (mbps != 0 && mbps <= 250) { ret = rcsi2_phtw_write(priv, 0x39, 0x05); if (ret) return ret; ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_h3_v3h_m3n, 0xf1); if (ret) return ret; } return rcsi2_phtw_write_array(priv, step2); } static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps) { return __rcsi2_init_phtw_h3_v3h_m3n(priv, mbps); } static int rcsi2_init_phtw_h3es2(struct rcar_csi2 *priv, unsigned int mbps) { return __rcsi2_init_phtw_h3_v3h_m3n(priv, 0); } static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps) { return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44); } static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv) { static const struct phtw_value step1[] = { { .data = 0xee, .code = 0x34 }, { .data = 0xee, .code = 0x44 }, { .data = 0xee, .code = 0x54 }, { .data = 0xee, .code = 0x84 }, { .data = 0xee, .code = 0x94 }, { /* sentinel */ }, }; return rcsi2_phtw_write_array(priv, step1); } /* ----------------------------------------------------------------------------- * Platform Device Driver. */ static const struct media_entity_operations rcar_csi2_entity_ops = { .link_validate = v4l2_subdev_link_validate, }; static int rcsi2_probe_resources(struct rcar_csi2 *priv, struct platform_device *pdev) { struct resource *res; int irq, ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_threaded_irq(&pdev->dev, irq, rcsi2_irq, rcsi2_irq_thread, IRQF_SHARED, KBUILD_MODNAME, priv); if (ret) return ret; priv->rstc = devm_reset_control_get(&pdev->dev, NULL); return PTR_ERR_OR_ZERO(priv->rstc); } static const struct rcar_csi2_info rcar_csi2_info_r8a7795 = { .init_phtw = rcsi2_init_phtw_h3_v3h_m3n, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .num_channels = 4, .clear_ulps = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a7795es1 = { .hsfreqrange = hsfreqrange_m3w_h3es1, .num_channels = 4, }; static const struct rcar_csi2_info rcar_csi2_info_r8a7795es2 = { .init_phtw = rcsi2_init_phtw_h3es2, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .num_channels = 4, .clear_ulps = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a7796 = { .hsfreqrange = hsfreqrange_m3w_h3es1, .num_channels = 4, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = { .init_phtw = rcsi2_init_phtw_h3_v3h_m3n, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .num_channels = 4, .clear_ulps = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = { .init_phtw = rcsi2_init_phtw_v3m_e3, .confirm_start = rcsi2_confirm_start_v3m_e3, .num_channels = 4, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77980 = { .init_phtw = rcsi2_init_phtw_h3_v3h_m3n, .hsfreqrange = hsfreqrange_h3_v3h_m3n, .csi0clkfreqrange = 0x20, .clear_ulps = true, }; static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = { .init_phtw = rcsi2_init_phtw_v3m_e3, .confirm_start = rcsi2_confirm_start_v3m_e3, .num_channels = 2, }; static const struct of_device_id rcar_csi2_of_table[] = { { .compatible = "renesas,r8a774a1-csi2", .data = &rcar_csi2_info_r8a7796, }, { .compatible = "renesas,r8a774c0-csi2", .data = &rcar_csi2_info_r8a77990, }, { .compatible = "renesas,r8a7795-csi2", .data = &rcar_csi2_info_r8a7795, }, { .compatible = "renesas,r8a7796-csi2", .data = &rcar_csi2_info_r8a7796, }, { .compatible = "renesas,r8a77965-csi2", .data = &rcar_csi2_info_r8a77965, }, { .compatible = "renesas,r8a77970-csi2", .data = &rcar_csi2_info_r8a77970, }, { .compatible = "renesas,r8a77980-csi2", .data = &rcar_csi2_info_r8a77980, }, { .compatible = "renesas,r8a77990-csi2", .data = &rcar_csi2_info_r8a77990, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, rcar_csi2_of_table); static const struct soc_device_attribute r8a7795[] = { { .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_csi2_info_r8a7795es1, }, { .soc_id = "r8a7795", .revision = "ES2.*", .data = &rcar_csi2_info_r8a7795es2, }, { /* sentinel */ }, }; static int rcsi2_probe(struct platform_device *pdev) { const struct soc_device_attribute *attr; struct rcar_csi2 *priv; unsigned int i; int ret; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->info = of_device_get_match_data(&pdev->dev); /* * The different ES versions of r8a7795 (H3) behave differently but * share the same compatible string. */ attr = soc_device_match(r8a7795); if (attr) priv->info = attr->data; priv->dev = &pdev->dev; mutex_init(&priv->lock); priv->stream_count = 0; ret = rcsi2_probe_resources(priv, pdev); if (ret) { dev_err(priv->dev, "Failed to get resources\n"); return ret; } platform_set_drvdata(pdev, priv); ret = rcsi2_parse_dt(priv); if (ret) return ret; priv->subdev.owner = THIS_MODULE; priv->subdev.dev = &pdev->dev; v4l2_subdev_init(&priv->subdev, &rcar_csi2_subdev_ops); v4l2_set_subdevdata(&priv->subdev, &pdev->dev); snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s", KBUILD_MODNAME, dev_name(&pdev->dev)); priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE; priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER; priv->subdev.entity.ops = &rcar_csi2_entity_ops; priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK; for (i = RCAR_CSI2_SOURCE_VC0; i < NR_OF_RCAR_CSI2_PAD; i++) priv->pads[i].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_pads_init(&priv->subdev.entity, NR_OF_RCAR_CSI2_PAD, priv->pads); if (ret) goto error; pm_runtime_enable(&pdev->dev); ret = v4l2_async_register_subdev(&priv->subdev); if (ret < 0) goto error; dev_info(priv->dev, "%d lanes found\n", priv->lanes); return 0; error: v4l2_async_notifier_unregister(&priv->notifier); v4l2_async_notifier_cleanup(&priv->notifier); return ret; } static int rcsi2_remove(struct platform_device *pdev) { struct rcar_csi2 *priv = platform_get_drvdata(pdev); v4l2_async_notifier_unregister(&priv->notifier); v4l2_async_notifier_cleanup(&priv->notifier); v4l2_async_unregister_subdev(&priv->subdev); pm_runtime_disable(&pdev->dev); return 0; } static struct platform_driver rcar_csi2_pdrv = { .remove = rcsi2_remove, .probe = rcsi2_probe, .driver = { .name = "rcar-csi2", .of_match_table = rcar_csi2_of_table, }, }; module_platform_driver(rcar_csi2_pdrv); MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>"); MODULE_DESCRIPTION("Renesas R-Car MIPI CSI-2 receiver driver"); MODULE_LICENSE("GPL");
gpl-2.0
mtmichaelson/LG_Spectrum_Kernel
drivers/power/msm_battery.c
39
42062
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ /* * this needs to be before <linux/kernel.h> is loaded, * and <linux/sched.h> loads <linux/kernel.h> */ #define DEBUG 1 #include <linux/slab.h> #include <linux/earlysuspend.h> #include <linux/err.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <asm/atomic.h> #include <mach/msm_rpcrouter.h> #include <mach/msm_battery.h> #define BATTERY_RPC_PROG 0x30000089 #define BATTERY_RPC_VER_1_1 0x00010001 #define BATTERY_RPC_VER_2_1 0x00020001 #define BATTERY_RPC_VER_4_1 0x00040001 #define BATTERY_RPC_VER_5_1 0x00050001 #define BATTERY_RPC_CB_PROG (BATTERY_RPC_PROG | 0x01000000) #define CHG_RPC_PROG 0x3000001a #define CHG_RPC_VER_1_1 0x00010001 #define CHG_RPC_VER_1_3 0x00010003 #define CHG_RPC_VER_2_2 0x00020002 #define CHG_RPC_VER_3_1 0x00030001 #define CHG_RPC_VER_4_1 0x00040001 #define BATTERY_REGISTER_PROC 2 #define BATTERY_MODIFY_CLIENT_PROC 4 #define BATTERY_DEREGISTER_CLIENT_PROC 5 #define BATTERY_READ_MV_PROC 12 #define BATTERY_ENABLE_DISABLE_FILTER_PROC 14 #define VBATT_FILTER 2 #define BATTERY_CB_TYPE_PROC 1 #define BATTERY_CB_ID_ALL_ACTIV 1 #define BATTERY_CB_ID_LOW_VOL 2 #define BATTERY_LOW 3200 #define BATTERY_HIGH 4300 #define ONCRPC_CHG_GET_GENERAL_STATUS_PROC 12 #define ONCRPC_CHARGER_API_VERSIONS_PROC 0xffffffff #define BATT_RPC_TIMEOUT 5000 /* 5 sec */ #define INVALID_BATT_HANDLE -1 #define RPC_TYPE_REQ 0 #define RPC_TYPE_REPLY 1 #define RPC_REQ_REPLY_COMMON_HEADER_SIZE (3 * sizeof(uint32_t)) #if DEBUG #define DBG_LIMIT(x...) do {if (printk_ratelimit()) pr_debug(x); } while (0) #else #define DBG_LIMIT(x...) do {} while (0) #endif enum { BATTERY_REGISTRATION_SUCCESSFUL = 0, BATTERY_DEREGISTRATION_SUCCESSFUL = BATTERY_REGISTRATION_SUCCESSFUL, BATTERY_MODIFICATION_SUCCESSFUL = BATTERY_REGISTRATION_SUCCESSFUL, BATTERY_INTERROGATION_SUCCESSFUL = BATTERY_REGISTRATION_SUCCESSFUL, BATTERY_CLIENT_TABLE_FULL = 1, BATTERY_REG_PARAMS_WRONG = 2, BATTERY_DEREGISTRATION_FAILED = 4, BATTERY_MODIFICATION_FAILED = 8, BATTERY_INTERROGATION_FAILED = 16, /* Client's filter could not be set because perhaps it does not exist */ BATTERY_SET_FILTER_FAILED = 32, /* Client's could not be found for enabling or disabling the individual * client */ BATTERY_ENABLE_DISABLE_INDIVIDUAL_CLIENT_FAILED = 64, BATTERY_LAST_ERROR = 128, }; enum { BATTERY_VOLTAGE_UP = 0, BATTERY_VOLTAGE_DOWN, BATTERY_VOLTAGE_ABOVE_THIS_LEVEL, BATTERY_VOLTAGE_BELOW_THIS_LEVEL, BATTERY_VOLTAGE_LEVEL, BATTERY_ALL_ACTIVITY, VBATT_CHG_EVENTS, BATTERY_VOLTAGE_UNKNOWN, }; /* * This enum contains defintions of the charger hardware status */ enum chg_charger_status_type { /* The charger is good */ CHARGER_STATUS_GOOD, /* The charger is bad */ CHARGER_STATUS_BAD, /* The charger is weak */ CHARGER_STATUS_WEAK, /* Invalid charger status. */ CHARGER_STATUS_INVALID }; /* *This enum contains defintions of the charger hardware type */ enum chg_charger_hardware_type { /* The charger is removed */ CHARGER_TYPE_NONE, /* The charger is a regular wall charger */ CHARGER_TYPE_WALL, /* The charger is a PC USB */ CHARGER_TYPE_USB_PC, /* The charger is a wall USB charger */ CHARGER_TYPE_USB_WALL, /* The charger is a USB carkit */ CHARGER_TYPE_USB_CARKIT, /* Invalid charger hardware status. */ CHARGER_TYPE_INVALID }; /* * This enum contains defintions of the battery status */ enum chg_battery_status_type { /* The battery is good */ BATTERY_STATUS_GOOD, /* The battery is cold/hot */ BATTERY_STATUS_BAD_TEMP, /* The battery is bad */ BATTERY_STATUS_BAD, /* The battery is removed */ BATTERY_STATUS_REMOVED, /* on v2.2 only */ BATTERY_STATUS_INVALID_v1 = BATTERY_STATUS_REMOVED, /* Invalid battery status. */ BATTERY_STATUS_INVALID }; /* *This enum contains defintions of the battery voltage level */ enum chg_battery_level_type { /* The battery voltage is dead/very low (less than 3.2V) */ BATTERY_LEVEL_DEAD, /* The battery voltage is weak/low (between 3.2V and 3.4V) */ BATTERY_LEVEL_WEAK, /* The battery voltage is good/normal(between 3.4V and 4.2V) */ BATTERY_LEVEL_GOOD, /* The battery voltage is up to full (close to 4.2V) */ BATTERY_LEVEL_FULL, /* Invalid battery voltage level. */ BATTERY_LEVEL_INVALID }; #ifndef CONFIG_BATTERY_MSM_FAKE struct rpc_reply_batt_chg_v1 { struct rpc_reply_hdr hdr; u32 more_data; u32 charger_status; u32 charger_type; u32 battery_status; u32 battery_level; u32 battery_voltage; u32 battery_temp; }; struct rpc_reply_batt_chg_v2 { struct rpc_reply_batt_chg_v1 v1; u32 is_charger_valid; u32 is_charging; u32 is_battery_valid; u32 ui_event; }; union rpc_reply_batt_chg { struct rpc_reply_batt_chg_v1 v1; struct rpc_reply_batt_chg_v2 v2; }; static union rpc_reply_batt_chg rep_batt_chg; #endif struct msm_battery_info { u32 voltage_max_design; u32 voltage_min_design; u32 chg_api_version; u32 batt_technology; u32 batt_api_version; u32 avail_chg_sources; u32 current_chg_source; u32 batt_status; u32 batt_health; u32 charger_valid; u32 batt_valid; u32 batt_capacity; /* in percentage */ u32 charger_status; u32 charger_type; u32 battery_status; u32 battery_level; u32 battery_voltage; /* in millie volts */ u32 battery_temp; /* in celsius */ u32(*calculate_capacity) (u32 voltage); s32 batt_handle; struct power_supply *msm_psy_ac; struct power_supply *msm_psy_usb; struct power_supply *msm_psy_batt; struct power_supply *current_ps; struct msm_rpc_client *batt_client; struct msm_rpc_endpoint *chg_ep; wait_queue_head_t wait_q; u32 vbatt_modify_reply_avail; struct early_suspend early_suspend; }; static struct msm_battery_info msm_batt_info = { .batt_handle = INVALID_BATT_HANDLE, .charger_status = CHARGER_STATUS_BAD, .charger_type = CHARGER_TYPE_INVALID, .battery_status = BATTERY_STATUS_GOOD, .battery_level = BATTERY_LEVEL_FULL, .battery_voltage = BATTERY_HIGH, .batt_capacity = 100, .batt_status = POWER_SUPPLY_STATUS_DISCHARGING, .batt_health = POWER_SUPPLY_HEALTH_GOOD, .batt_valid = 1, .battery_temp = 23, .vbatt_modify_reply_avail = 0, }; static enum power_supply_property msm_power_props[] = { POWER_SUPPLY_PROP_ONLINE, }; static char *msm_power_supplied_to[] = { "battery", }; static int msm_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_ONLINE: if (psy->type == POWER_SUPPLY_TYPE_MAINS) { val->intval = msm_batt_info.current_chg_source & AC_CHG ? 1 : 0; } if (psy->type == POWER_SUPPLY_TYPE_USB) { val->intval = msm_batt_info.current_chg_source & USB_CHG ? 1 : 0; } break; default: return -EINVAL; } return 0; } static struct power_supply msm_psy_ac = { .name = "ac", .type = POWER_SUPPLY_TYPE_MAINS, .supplied_to = msm_power_supplied_to, .num_supplicants = ARRAY_SIZE(msm_power_supplied_to), .properties = msm_power_props, .num_properties = ARRAY_SIZE(msm_power_props), .get_property = msm_power_get_property, }; static struct power_supply msm_psy_usb = { .name = "usb", .type = POWER_SUPPLY_TYPE_USB, .supplied_to = msm_power_supplied_to, .num_supplicants = ARRAY_SIZE(msm_power_supplied_to), .properties = msm_power_props, .num_properties = ARRAY_SIZE(msm_power_props), .get_property = msm_power_get_property, }; static enum power_supply_property msm_batt_power_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CAPACITY, }; static int msm_batt_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = msm_batt_info.batt_status; break; case POWER_SUPPLY_PROP_HEALTH: val->intval = msm_batt_info.batt_health; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = msm_batt_info.batt_valid; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = msm_batt_info.batt_technology; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = msm_batt_info.voltage_max_design; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = msm_batt_info.voltage_min_design; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = msm_batt_info.battery_voltage; break; case POWER_SUPPLY_PROP_CAPACITY: val->intval = msm_batt_info.batt_capacity; break; default: return -EINVAL; } return 0; } static struct power_supply msm_psy_batt = { .name = "battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = msm_batt_power_props, .num_properties = ARRAY_SIZE(msm_batt_power_props), .get_property = msm_batt_power_get_property, }; #ifndef CONFIG_BATTERY_MSM_FAKE struct msm_batt_get_volt_ret_data { u32 battery_voltage; }; static int msm_batt_get_volt_ret_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct msm_batt_get_volt_ret_data *data_ptr, *buf_ptr; data_ptr = (struct msm_batt_get_volt_ret_data *)data; buf_ptr = (struct msm_batt_get_volt_ret_data *)buf; data_ptr->battery_voltage = be32_to_cpu(buf_ptr->battery_voltage); return 0; } static u32 msm_batt_get_vbatt_voltage(void) { int rc; struct msm_batt_get_volt_ret_data rep; rc = msm_rpc_client_req(msm_batt_info.batt_client, BATTERY_READ_MV_PROC, NULL, NULL, msm_batt_get_volt_ret_func, &rep, msecs_to_jiffies(BATT_RPC_TIMEOUT)); if (rc < 0) { pr_err("%s: FAIL: vbatt get volt. rc=%d\n", __func__, rc); return 0; } return rep.battery_voltage; } #define be32_to_cpu_self(v) (v = be32_to_cpu(v)) static int msm_batt_get_batt_chg_status(void) { int rc; struct rpc_req_batt_chg { struct rpc_request_hdr hdr; u32 more_data; } req_batt_chg; struct rpc_reply_batt_chg_v1 *v1p; req_batt_chg.more_data = cpu_to_be32(1); memset(&rep_batt_chg, 0, sizeof(rep_batt_chg)); v1p = &rep_batt_chg.v1; rc = msm_rpc_call_reply(msm_batt_info.chg_ep, ONCRPC_CHG_GET_GENERAL_STATUS_PROC, &req_batt_chg, sizeof(req_batt_chg), &rep_batt_chg, sizeof(rep_batt_chg), msecs_to_jiffies(BATT_RPC_TIMEOUT)); if (rc < 0) { pr_err("%s: ERROR. msm_rpc_call_reply failed! proc=%d rc=%d\n", __func__, ONCRPC_CHG_GET_GENERAL_STATUS_PROC, rc); return rc; } else if (be32_to_cpu(v1p->more_data)) { be32_to_cpu_self(v1p->charger_status); be32_to_cpu_self(v1p->charger_type); be32_to_cpu_self(v1p->battery_status); be32_to_cpu_self(v1p->battery_level); be32_to_cpu_self(v1p->battery_voltage); be32_to_cpu_self(v1p->battery_temp); } else { pr_err("%s: No battery/charger data in RPC reply\n", __func__); return -EIO; } return 0; } static void msm_batt_update_psy_status(void) { static u32 unnecessary_event_count; u32 charger_status; u32 charger_type; u32 battery_status; u32 battery_level; u32 battery_voltage; u32 battery_temp; struct power_supply *supp; if (msm_batt_get_batt_chg_status()) return; charger_status = rep_batt_chg.v1.charger_status; charger_type = rep_batt_chg.v1.charger_type; battery_status = rep_batt_chg.v1.battery_status; battery_level = rep_batt_chg.v1.battery_level; battery_voltage = rep_batt_chg.v1.battery_voltage; battery_temp = rep_batt_chg.v1.battery_temp; /* Make correction for battery status */ if (battery_status == BATTERY_STATUS_INVALID_v1) { if (msm_batt_info.chg_api_version < CHG_RPC_VER_3_1) battery_status = BATTERY_STATUS_INVALID; } if (charger_status == msm_batt_info.charger_status && charger_type == msm_batt_info.charger_type && battery_status == msm_batt_info.battery_status && battery_level == msm_batt_info.battery_level && battery_voltage == msm_batt_info.battery_voltage && battery_temp == msm_batt_info.battery_temp) { /* Got unnecessary event from Modem PMIC VBATT driver. * Nothing changed in Battery or charger status. */ unnecessary_event_count++; if ((unnecessary_event_count % 20) == 1) DBG_LIMIT("BATT: same event count = %u\n", unnecessary_event_count); return; } unnecessary_event_count = 0; DBG_LIMIT("BATT: rcvd: %d, %d, %d, %d; %d, %d\n", charger_status, charger_type, battery_status, battery_level, battery_voltage, battery_temp); if (battery_status == BATTERY_STATUS_INVALID && battery_level != BATTERY_LEVEL_INVALID) { DBG_LIMIT("BATT: change status(%d) to (%d) for level=%d\n", battery_status, BATTERY_STATUS_GOOD, battery_level); battery_status = BATTERY_STATUS_GOOD; } if (msm_batt_info.charger_type != charger_type) { if (charger_type == CHARGER_TYPE_USB_WALL || charger_type == CHARGER_TYPE_USB_PC || charger_type == CHARGER_TYPE_USB_CARKIT) { DBG_LIMIT("BATT: USB charger plugged in\n"); msm_batt_info.current_chg_source = USB_CHG; supp = &msm_psy_usb; } else if (charger_type == CHARGER_TYPE_WALL) { DBG_LIMIT("BATT: AC Wall changer plugged in\n"); msm_batt_info.current_chg_source = AC_CHG; supp = &msm_psy_ac; } else { if (msm_batt_info.current_chg_source & AC_CHG) DBG_LIMIT("BATT: AC Wall charger removed\n"); else if (msm_batt_info.current_chg_source & USB_CHG) DBG_LIMIT("BATT: USB charger removed\n"); else DBG_LIMIT("BATT: No charger present\n"); msm_batt_info.current_chg_source = 0; supp = &msm_psy_batt; /* Correct charger status */ if (charger_status != CHARGER_STATUS_INVALID) { DBG_LIMIT("BATT: No charging!\n"); charger_status = CHARGER_STATUS_INVALID; msm_batt_info.batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; } } } else supp = NULL; if (msm_batt_info.charger_status != charger_status) { if (charger_status == CHARGER_STATUS_GOOD || charger_status == CHARGER_STATUS_WEAK) { if (msm_batt_info.current_chg_source) { DBG_LIMIT("BATT: Charging.\n"); msm_batt_info.batt_status = POWER_SUPPLY_STATUS_CHARGING; /* Correct when supp==NULL */ if (msm_batt_info.current_chg_source & AC_CHG) supp = &msm_psy_ac; else supp = &msm_psy_usb; } } else { DBG_LIMIT("BATT: No charging.\n"); msm_batt_info.batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING; supp = &msm_psy_batt; } } else { /* Correct charger status */ if (charger_type != CHARGER_TYPE_INVALID && charger_status == CHARGER_STATUS_GOOD) { DBG_LIMIT("BATT: In charging\n"); msm_batt_info.batt_status = POWER_SUPPLY_STATUS_CHARGING; } } /* Correct battery voltage and status */ if (!battery_voltage) { if (charger_status == CHARGER_STATUS_INVALID) { DBG_LIMIT("BATT: Read VBATT\n"); battery_voltage = msm_batt_get_vbatt_voltage(); } else /* Use previous */ battery_voltage = msm_batt_info.battery_voltage; } if (battery_status == BATTERY_STATUS_INVALID) { if (battery_voltage >= msm_batt_info.voltage_min_design && battery_voltage <= msm_batt_info.voltage_max_design) { DBG_LIMIT("BATT: Battery valid\n"); msm_batt_info.batt_valid = 1; battery_status = BATTERY_STATUS_GOOD; } } if (msm_batt_info.battery_status != battery_status) { if (battery_status != BATTERY_STATUS_INVALID) { msm_batt_info.batt_valid = 1; if (battery_status == BATTERY_STATUS_BAD) { DBG_LIMIT("BATT: Battery bad.\n"); msm_batt_info.batt_health = POWER_SUPPLY_HEALTH_DEAD; } else if (battery_status == BATTERY_STATUS_BAD_TEMP) { DBG_LIMIT("BATT: Battery overheat.\n"); msm_batt_info.batt_health = POWER_SUPPLY_HEALTH_OVERHEAT; } else { DBG_LIMIT("BATT: Battery good.\n"); msm_batt_info.batt_health = POWER_SUPPLY_HEALTH_GOOD; } } else { msm_batt_info.batt_valid = 0; DBG_LIMIT("BATT: Battery invalid.\n"); msm_batt_info.batt_health = POWER_SUPPLY_HEALTH_UNKNOWN; } if (msm_batt_info.batt_status != POWER_SUPPLY_STATUS_CHARGING) { if (battery_status == BATTERY_STATUS_INVALID) { DBG_LIMIT("BATT: Battery -> unknown\n"); msm_batt_info.batt_status = POWER_SUPPLY_STATUS_UNKNOWN; } else { DBG_LIMIT("BATT: Battery -> discharging\n"); msm_batt_info.batt_status = POWER_SUPPLY_STATUS_DISCHARGING; } } if (!supp) { if (msm_batt_info.current_chg_source) { if (msm_batt_info.current_chg_source & AC_CHG) supp = &msm_psy_ac; else supp = &msm_psy_usb; } else supp = &msm_psy_batt; } } msm_batt_info.charger_status = charger_status; msm_batt_info.charger_type = charger_type; msm_batt_info.battery_status = battery_status; msm_batt_info.battery_level = battery_level; msm_batt_info.battery_temp = battery_temp; if (msm_batt_info.battery_voltage != battery_voltage) { msm_batt_info.battery_voltage = battery_voltage; msm_batt_info.batt_capacity = msm_batt_info.calculate_capacity(battery_voltage); DBG_LIMIT("BATT: voltage = %u mV [capacity = %d%%]\n", battery_voltage, msm_batt_info.batt_capacity); if (!supp) supp = msm_batt_info.current_ps; } if (supp) { msm_batt_info.current_ps = supp; DBG_LIMIT("BATT: Supply = %s\n", supp->name); power_supply_changed(supp); } } #ifdef CONFIG_HAS_EARLYSUSPEND struct batt_modify_client_req { u32 client_handle; /* The voltage at which callback (CB) should be called. */ u32 desired_batt_voltage; /* The direction when the CB should be called. */ u32 voltage_direction; /* The registered callback to be called when voltage and * direction specs are met. */ u32 batt_cb_id; /* The call back data */ u32 cb_data; }; struct batt_modify_client_rep { u32 result; }; static int msm_batt_modify_client_arg_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct batt_modify_client_req *batt_modify_client_req = (struct batt_modify_client_req *)data; u32 *req = (u32 *)buf; int size = 0; *req = cpu_to_be32(batt_modify_client_req->client_handle); size += sizeof(u32); req++; *req = cpu_to_be32(batt_modify_client_req->desired_batt_voltage); size += sizeof(u32); req++; *req = cpu_to_be32(batt_modify_client_req->voltage_direction); size += sizeof(u32); req++; *req = cpu_to_be32(batt_modify_client_req->batt_cb_id); size += sizeof(u32); req++; *req = cpu_to_be32(batt_modify_client_req->cb_data); size += sizeof(u32); return size; } static int msm_batt_modify_client_ret_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct batt_modify_client_rep *data_ptr, *buf_ptr; data_ptr = (struct batt_modify_client_rep *)data; buf_ptr = (struct batt_modify_client_rep *)buf; data_ptr->result = be32_to_cpu(buf_ptr->result); return 0; } static int msm_batt_modify_client(u32 client_handle, u32 desired_batt_voltage, u32 voltage_direction, u32 batt_cb_id, u32 cb_data) { int rc; struct batt_modify_client_req req; struct batt_modify_client_rep rep; req.client_handle = client_handle; req.desired_batt_voltage = desired_batt_voltage; req.voltage_direction = voltage_direction; req.batt_cb_id = batt_cb_id; req.cb_data = cb_data; rc = msm_rpc_client_req(msm_batt_info.batt_client, BATTERY_MODIFY_CLIENT_PROC, msm_batt_modify_client_arg_func, &req, msm_batt_modify_client_ret_func, &rep, msecs_to_jiffies(BATT_RPC_TIMEOUT)); if (rc < 0) { pr_err("%s: ERROR. failed to modify Vbatt client\n", __func__); return rc; } if (rep.result != BATTERY_MODIFICATION_SUCCESSFUL) { pr_err("%s: ERROR. modify client failed. result = %u\n", __func__, rep.result); return -EIO; } return 0; } void msm_batt_early_suspend(struct early_suspend *h) { int rc; pr_debug("%s: enter\n", __func__); if (msm_batt_info.batt_handle != INVALID_BATT_HANDLE) { rc = msm_batt_modify_client(msm_batt_info.batt_handle, BATTERY_LOW, BATTERY_VOLTAGE_BELOW_THIS_LEVEL, BATTERY_CB_ID_LOW_VOL, BATTERY_LOW); if (rc < 0) { pr_err("%s: msm_batt_modify_client. rc=%d\n", __func__, rc); return; } } else { pr_err("%s: ERROR. invalid batt_handle\n", __func__); return; } pr_debug("%s: exit\n", __func__); } void msm_batt_late_resume(struct early_suspend *h) { int rc; pr_debug("%s: enter\n", __func__); if (msm_batt_info.batt_handle != INVALID_BATT_HANDLE) { rc = msm_batt_modify_client(msm_batt_info.batt_handle, BATTERY_LOW, BATTERY_ALL_ACTIVITY, BATTERY_CB_ID_ALL_ACTIV, BATTERY_ALL_ACTIVITY); if (rc < 0) { pr_err("%s: msm_batt_modify_client FAIL rc=%d\n", __func__, rc); return; } } else { pr_err("%s: ERROR. invalid batt_handle\n", __func__); return; } msm_batt_update_psy_status(); pr_debug("%s: exit\n", __func__); } #endif struct msm_batt_vbatt_filter_req { u32 batt_handle; u32 enable_filter; u32 vbatt_filter; }; struct msm_batt_vbatt_filter_rep { u32 result; }; static int msm_batt_filter_arg_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct msm_batt_vbatt_filter_req *vbatt_filter_req = (struct msm_batt_vbatt_filter_req *)data; u32 *req = (u32 *)buf; int size = 0; *req = cpu_to_be32(vbatt_filter_req->batt_handle); size += sizeof(u32); req++; *req = cpu_to_be32(vbatt_filter_req->enable_filter); size += sizeof(u32); req++; *req = cpu_to_be32(vbatt_filter_req->vbatt_filter); size += sizeof(u32); return size; } static int msm_batt_filter_ret_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct msm_batt_vbatt_filter_rep *data_ptr, *buf_ptr; data_ptr = (struct msm_batt_vbatt_filter_rep *)data; buf_ptr = (struct msm_batt_vbatt_filter_rep *)buf; data_ptr->result = be32_to_cpu(buf_ptr->result); return 0; } static int msm_batt_enable_filter(u32 vbatt_filter) { int rc; struct msm_batt_vbatt_filter_req vbatt_filter_req; struct msm_batt_vbatt_filter_rep vbatt_filter_rep; vbatt_filter_req.batt_handle = msm_batt_info.batt_handle; vbatt_filter_req.enable_filter = 1; vbatt_filter_req.vbatt_filter = vbatt_filter; rc = msm_rpc_client_req(msm_batt_info.batt_client, BATTERY_ENABLE_DISABLE_FILTER_PROC, msm_batt_filter_arg_func, &vbatt_filter_req, msm_batt_filter_ret_func, &vbatt_filter_rep, msecs_to_jiffies(BATT_RPC_TIMEOUT)); if (rc < 0) { pr_err("%s: FAIL: enable vbatt filter. rc=%d\n", __func__, rc); return rc; } if (vbatt_filter_rep.result != BATTERY_DEREGISTRATION_SUCCESSFUL) { pr_err("%s: FAIL: enable vbatt filter: result=%d\n", __func__, vbatt_filter_rep.result); return -EIO; } pr_debug("%s: enable vbatt filter: OK\n", __func__); return rc; } struct batt_client_registration_req { /* The voltage at which callback (CB) should be called. */ u32 desired_batt_voltage; /* The direction when the CB should be called. */ u32 voltage_direction; /* The registered callback to be called when voltage and * direction specs are met. */ u32 batt_cb_id; /* The call back data */ u32 cb_data; u32 more_data; u32 batt_error; }; struct batt_client_registration_req_4_1 { /* The voltage at which callback (CB) should be called. */ u32 desired_batt_voltage; /* The direction when the CB should be called. */ u32 voltage_direction; /* The registered callback to be called when voltage and * direction specs are met. */ u32 batt_cb_id; /* The call back data */ u32 cb_data; u32 batt_error; }; struct batt_client_registration_rep { u32 batt_handle; }; struct batt_client_registration_rep_4_1 { u32 batt_handle; u32 more_data; u32 err; }; static int msm_batt_register_arg_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct batt_client_registration_req *batt_reg_req = (struct batt_client_registration_req *)data; u32 *req = (u32 *)buf; int size = 0; if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) { *req = cpu_to_be32(batt_reg_req->desired_batt_voltage); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->voltage_direction); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->batt_cb_id); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->cb_data); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->batt_error); size += sizeof(u32); return size; } else { *req = cpu_to_be32(batt_reg_req->desired_batt_voltage); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->voltage_direction); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->batt_cb_id); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->cb_data); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->more_data); size += sizeof(u32); req++; *req = cpu_to_be32(batt_reg_req->batt_error); size += sizeof(u32); return size; } } static int msm_batt_register_ret_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct batt_client_registration_rep *data_ptr, *buf_ptr; struct batt_client_registration_rep_4_1 *data_ptr_4_1, *buf_ptr_4_1; if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) { data_ptr_4_1 = (struct batt_client_registration_rep_4_1 *)data; buf_ptr_4_1 = (struct batt_client_registration_rep_4_1 *)buf; data_ptr_4_1->batt_handle = be32_to_cpu(buf_ptr_4_1->batt_handle); data_ptr_4_1->more_data = be32_to_cpu(buf_ptr_4_1->more_data); data_ptr_4_1->err = be32_to_cpu(buf_ptr_4_1->err); return 0; } else { data_ptr = (struct batt_client_registration_rep *)data; buf_ptr = (struct batt_client_registration_rep *)buf; data_ptr->batt_handle = be32_to_cpu(buf_ptr->batt_handle); return 0; } } static int msm_batt_register(u32 desired_batt_voltage, u32 voltage_direction, u32 batt_cb_id, u32 cb_data) { struct batt_client_registration_req batt_reg_req; struct batt_client_registration_req_4_1 batt_reg_req_4_1; struct batt_client_registration_rep batt_reg_rep; struct batt_client_registration_rep_4_1 batt_reg_rep_4_1; void *request; void *reply; int rc; if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) { batt_reg_req_4_1.desired_batt_voltage = desired_batt_voltage; batt_reg_req_4_1.voltage_direction = voltage_direction; batt_reg_req_4_1.batt_cb_id = batt_cb_id; batt_reg_req_4_1.cb_data = cb_data; batt_reg_req_4_1.batt_error = 1; request = &batt_reg_req_4_1; } else { batt_reg_req.desired_batt_voltage = desired_batt_voltage; batt_reg_req.voltage_direction = voltage_direction; batt_reg_req.batt_cb_id = batt_cb_id; batt_reg_req.cb_data = cb_data; batt_reg_req.more_data = 1; batt_reg_req.batt_error = 0; request = &batt_reg_req; } if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) reply = &batt_reg_rep_4_1; else reply = &batt_reg_rep; rc = msm_rpc_client_req(msm_batt_info.batt_client, BATTERY_REGISTER_PROC, msm_batt_register_arg_func, request, msm_batt_register_ret_func, reply, msecs_to_jiffies(BATT_RPC_TIMEOUT)); if (rc < 0) { pr_err("%s: FAIL: vbatt register. rc=%d\n", __func__, rc); return rc; } if (msm_batt_info.batt_api_version == BATTERY_RPC_VER_4_1) { if (batt_reg_rep_4_1.more_data != 0 && batt_reg_rep_4_1.err != BATTERY_REGISTRATION_SUCCESSFUL) { pr_err("%s: vBatt Registration Failed proc_num=%d\n" , __func__, BATTERY_REGISTER_PROC); return -EIO; } msm_batt_info.batt_handle = batt_reg_rep_4_1.batt_handle; } else msm_batt_info.batt_handle = batt_reg_rep.batt_handle; return 0; } struct batt_client_deregister_req { u32 batt_handle; }; struct batt_client_deregister_rep { u32 batt_error; }; static int msm_batt_deregister_arg_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct batt_client_deregister_req *deregister_req = (struct batt_client_deregister_req *)data; u32 *req = (u32 *)buf; int size = 0; *req = cpu_to_be32(deregister_req->batt_handle); size += sizeof(u32); return size; } static int msm_batt_deregister_ret_func(struct msm_rpc_client *batt_client, void *buf, void *data) { struct batt_client_deregister_rep *data_ptr, *buf_ptr; data_ptr = (struct batt_client_deregister_rep *)data; buf_ptr = (struct batt_client_deregister_rep *)buf; data_ptr->batt_error = be32_to_cpu(buf_ptr->batt_error); return 0; } static int msm_batt_deregister(u32 batt_handle) { int rc; struct batt_client_deregister_req req; struct batt_client_deregister_rep rep; req.batt_handle = batt_handle; rc = msm_rpc_client_req(msm_batt_info.batt_client, BATTERY_DEREGISTER_CLIENT_PROC, msm_batt_deregister_arg_func, &req, msm_batt_deregister_ret_func, &rep, msecs_to_jiffies(BATT_RPC_TIMEOUT)); if (rc < 0) { pr_err("%s: FAIL: vbatt deregister. rc=%d\n", __func__, rc); return rc; } if (rep.batt_error != BATTERY_DEREGISTRATION_SUCCESSFUL) { pr_err("%s: vbatt deregistration FAIL. error=%d, handle=%d\n", __func__, rep.batt_error, batt_handle); return -EIO; } return 0; } #endif /* CONFIG_BATTERY_MSM_FAKE */ static int msm_batt_cleanup(void) { int rc = 0; #ifndef CONFIG_BATTERY_MSM_FAKE if (msm_batt_info.batt_handle != INVALID_BATT_HANDLE) { rc = msm_batt_deregister(msm_batt_info.batt_handle); if (rc < 0) pr_err("%s: FAIL: msm_batt_deregister. rc=%d\n", __func__, rc); } msm_batt_info.batt_handle = INVALID_BATT_HANDLE; if (msm_batt_info.batt_client) msm_rpc_unregister_client(msm_batt_info.batt_client); #endif /* CONFIG_BATTERY_MSM_FAKE */ if (msm_batt_info.msm_psy_ac) power_supply_unregister(msm_batt_info.msm_psy_ac); if (msm_batt_info.msm_psy_usb) power_supply_unregister(msm_batt_info.msm_psy_usb); if (msm_batt_info.msm_psy_batt) power_supply_unregister(msm_batt_info.msm_psy_batt); #ifndef CONFIG_BATTERY_MSM_FAKE if (msm_batt_info.chg_ep) { rc = msm_rpc_close(msm_batt_info.chg_ep); if (rc < 0) { pr_err("%s: FAIL. msm_rpc_close(chg_ep). rc=%d\n", __func__, rc); } } #ifdef CONFIG_HAS_EARLYSUSPEND if (msm_batt_info.early_suspend.suspend == msm_batt_early_suspend) unregister_early_suspend(&msm_batt_info.early_suspend); #endif #endif return rc; } static u32 msm_batt_capacity(u32 current_voltage) { u32 low_voltage = msm_batt_info.voltage_min_design; u32 high_voltage = msm_batt_info.voltage_max_design; if (current_voltage <= low_voltage) return 0; else if (current_voltage >= high_voltage) return 100; else return (current_voltage - low_voltage) * 100 / (high_voltage - low_voltage); } #ifndef CONFIG_BATTERY_MSM_FAKE int msm_batt_get_charger_api_version(void) { int rc ; struct rpc_reply_hdr *reply; struct rpc_req_chg_api_ver { struct rpc_request_hdr hdr; u32 more_data; } req_chg_api_ver; struct rpc_rep_chg_api_ver { struct rpc_reply_hdr hdr; u32 num_of_chg_api_versions; u32 *chg_api_versions; }; u32 num_of_versions; struct rpc_rep_chg_api_ver *rep_chg_api_ver; req_chg_api_ver.more_data = cpu_to_be32(1); msm_rpc_setup_req(&req_chg_api_ver.hdr, CHG_RPC_PROG, CHG_RPC_VER_1_1, ONCRPC_CHARGER_API_VERSIONS_PROC); rc = msm_rpc_write(msm_batt_info.chg_ep, &req_chg_api_ver, sizeof(req_chg_api_ver)); if (rc < 0) { pr_err("%s: FAIL: msm_rpc_write. proc=0x%08x, rc=%d\n", __func__, ONCRPC_CHARGER_API_VERSIONS_PROC, rc); return rc; } for (;;) { rc = msm_rpc_read(msm_batt_info.chg_ep, (void *) &reply, -1, BATT_RPC_TIMEOUT); if (rc < 0) return rc; if (rc < RPC_REQ_REPLY_COMMON_HEADER_SIZE) { pr_err("%s: LENGTH ERR: msm_rpc_read. rc=%d (<%d)\n", __func__, rc, RPC_REQ_REPLY_COMMON_HEADER_SIZE); rc = -EIO; break; } /* we should not get RPC REQ or call packets -- ignore them */ if (reply->type == RPC_TYPE_REQ) { pr_err("%s: TYPE ERR: type=%d (!=%d)\n", __func__, reply->type, RPC_TYPE_REQ); kfree(reply); continue; } /* If an earlier call timed out, we could get the (no * longer wanted) reply for it. Ignore replies that * we don't expect */ if (reply->xid != req_chg_api_ver.hdr.xid) { pr_err("%s: XID ERR: xid=%d (!=%d)\n", __func__, reply->xid, req_chg_api_ver.hdr.xid); kfree(reply); continue; } if (reply->reply_stat != RPCMSG_REPLYSTAT_ACCEPTED) { rc = -EPERM; break; } if (reply->data.acc_hdr.accept_stat != RPC_ACCEPTSTAT_SUCCESS) { rc = -EINVAL; break; } rep_chg_api_ver = (struct rpc_rep_chg_api_ver *)reply; num_of_versions = be32_to_cpu(rep_chg_api_ver->num_of_chg_api_versions); rep_chg_api_ver->chg_api_versions = (u32 *) ((u8 *) reply + sizeof(struct rpc_reply_hdr) + sizeof(rep_chg_api_ver->num_of_chg_api_versions)); rc = be32_to_cpu( rep_chg_api_ver->chg_api_versions[num_of_versions - 1]); pr_debug("%s: num_of_chg_api_versions = %u. " "The chg api version = 0x%08x\n", __func__, num_of_versions, rc); break; } kfree(reply); return rc; } static int msm_batt_cb_func(struct msm_rpc_client *client, void *buffer, int in_size) { int rc = 0; struct rpc_request_hdr *req; u32 procedure; u32 accept_status; req = (struct rpc_request_hdr *)buffer; procedure = be32_to_cpu(req->procedure); switch (procedure) { case BATTERY_CB_TYPE_PROC: accept_status = RPC_ACCEPTSTAT_SUCCESS; break; default: accept_status = RPC_ACCEPTSTAT_PROC_UNAVAIL; pr_err("%s: ERROR. procedure (%d) not supported\n", __func__, procedure); break; } msm_rpc_start_accepted_reply(msm_batt_info.batt_client, be32_to_cpu(req->xid), accept_status); rc = msm_rpc_send_accepted_reply(msm_batt_info.batt_client, 0); if (rc) pr_err("%s: FAIL: sending reply. rc=%d\n", __func__, rc); if (accept_status == RPC_ACCEPTSTAT_SUCCESS) msm_batt_update_psy_status(); return rc; } #endif /* CONFIG_BATTERY_MSM_FAKE */ static int __devinit msm_batt_probe(struct platform_device *pdev) { int rc; struct msm_psy_batt_pdata *pdata = pdev->dev.platform_data; if (pdev->id != -1) { dev_err(&pdev->dev, "%s: MSM chipsets Can only support one" " battery ", __func__); return -EINVAL; } #ifndef CONFIG_BATTERY_MSM_FAKE if (pdata->avail_chg_sources & AC_CHG) { #else { #endif rc = power_supply_register(&pdev->dev, &msm_psy_ac); if (rc < 0) { dev_err(&pdev->dev, "%s: power_supply_register failed" " rc = %d\n", __func__, rc); msm_batt_cleanup(); return rc; } msm_batt_info.msm_psy_ac = &msm_psy_ac; msm_batt_info.avail_chg_sources |= AC_CHG; } if (pdata->avail_chg_sources & USB_CHG) { rc = power_supply_register(&pdev->dev, &msm_psy_usb); if (rc < 0) { dev_err(&pdev->dev, "%s: power_supply_register failed" " rc = %d\n", __func__, rc); msm_batt_cleanup(); return rc; } msm_batt_info.msm_psy_usb = &msm_psy_usb; msm_batt_info.avail_chg_sources |= USB_CHG; } if (!msm_batt_info.msm_psy_ac && !msm_batt_info.msm_psy_usb) { dev_err(&pdev->dev, "%s: No external Power supply(AC or USB)" "is avilable\n", __func__); msm_batt_cleanup(); return -ENODEV; } msm_batt_info.voltage_max_design = pdata->voltage_max_design; msm_batt_info.voltage_min_design = pdata->voltage_min_design; msm_batt_info.batt_technology = pdata->batt_technology; msm_batt_info.calculate_capacity = pdata->calculate_capacity; if (!msm_batt_info.voltage_min_design) msm_batt_info.voltage_min_design = BATTERY_LOW; if (!msm_batt_info.voltage_max_design) msm_batt_info.voltage_max_design = BATTERY_HIGH; if (msm_batt_info.batt_technology == POWER_SUPPLY_TECHNOLOGY_UNKNOWN) msm_batt_info.batt_technology = POWER_SUPPLY_TECHNOLOGY_LION; if (!msm_batt_info.calculate_capacity) msm_batt_info.calculate_capacity = msm_batt_capacity; rc = power_supply_register(&pdev->dev, &msm_psy_batt); if (rc < 0) { dev_err(&pdev->dev, "%s: power_supply_register failed" " rc=%d\n", __func__, rc); msm_batt_cleanup(); return rc; } msm_batt_info.msm_psy_batt = &msm_psy_batt; #ifndef CONFIG_BATTERY_MSM_FAKE rc = msm_batt_register(BATTERY_LOW, BATTERY_ALL_ACTIVITY, BATTERY_CB_ID_ALL_ACTIV, BATTERY_ALL_ACTIVITY); if (rc < 0) { dev_err(&pdev->dev, "%s: msm_batt_register failed rc = %d\n", __func__, rc); msm_batt_cleanup(); return rc; } rc = msm_batt_enable_filter(VBATT_FILTER); if (rc < 0) { dev_err(&pdev->dev, "%s: msm_batt_enable_filter failed rc = %d\n", __func__, rc); msm_batt_cleanup(); return rc; } #ifdef CONFIG_HAS_EARLYSUSPEND msm_batt_info.early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN; msm_batt_info.early_suspend.suspend = msm_batt_early_suspend; msm_batt_info.early_suspend.resume = msm_batt_late_resume; register_early_suspend(&msm_batt_info.early_suspend); #endif msm_batt_update_psy_status(); #else power_supply_changed(&msm_psy_ac); #endif /* CONFIG_BATTERY_MSM_FAKE */ return 0; } static int __devexit msm_batt_remove(struct platform_device *pdev) { int rc; rc = msm_batt_cleanup(); if (rc < 0) { dev_err(&pdev->dev, "%s: msm_batt_cleanup failed rc=%d\n", __func__, rc); return rc; } return 0; } static struct platform_driver msm_batt_driver = { .probe = msm_batt_probe, .remove = __devexit_p(msm_batt_remove), .driver = { .name = "msm-battery", .owner = THIS_MODULE, }, }; static int __devinit msm_batt_init_rpc(void) { int rc; #ifdef CONFIG_BATTERY_MSM_FAKE pr_info("Faking MSM battery\n"); #else msm_batt_info.chg_ep = msm_rpc_connect_compatible(CHG_RPC_PROG, CHG_RPC_VER_4_1, 0); msm_batt_info.chg_api_version = CHG_RPC_VER_4_1; if (msm_batt_info.chg_ep == NULL) { pr_err("%s: rpc connect CHG_RPC_PROG = NULL\n", __func__); return -ENODEV; } if (IS_ERR(msm_batt_info.chg_ep)) { msm_batt_info.chg_ep = msm_rpc_connect_compatible( CHG_RPC_PROG, CHG_RPC_VER_3_1, 0); msm_batt_info.chg_api_version = CHG_RPC_VER_3_1; } if (IS_ERR(msm_batt_info.chg_ep)) { msm_batt_info.chg_ep = msm_rpc_connect_compatible( CHG_RPC_PROG, CHG_RPC_VER_1_1, 0); msm_batt_info.chg_api_version = CHG_RPC_VER_1_1; } if (IS_ERR(msm_batt_info.chg_ep)) { msm_batt_info.chg_ep = msm_rpc_connect_compatible( CHG_RPC_PROG, CHG_RPC_VER_1_3, 0); msm_batt_info.chg_api_version = CHG_RPC_VER_1_3; } if (IS_ERR(msm_batt_info.chg_ep)) { msm_batt_info.chg_ep = msm_rpc_connect_compatible( CHG_RPC_PROG, CHG_RPC_VER_2_2, 0); msm_batt_info.chg_api_version = CHG_RPC_VER_2_2; } if (IS_ERR(msm_batt_info.chg_ep)) { rc = PTR_ERR(msm_batt_info.chg_ep); pr_err("%s: FAIL: rpc connect for CHG_RPC_PROG. rc=%d\n", __func__, rc); msm_batt_info.chg_ep = NULL; return rc; } /* Get the real 1.x version */ if (msm_batt_info.chg_api_version == CHG_RPC_VER_1_1) msm_batt_info.chg_api_version = msm_batt_get_charger_api_version(); /* Fall back to 1.1 for default */ if (msm_batt_info.chg_api_version < 0) msm_batt_info.chg_api_version = CHG_RPC_VER_1_1; msm_batt_info.batt_api_version = BATTERY_RPC_VER_4_1; msm_batt_info.batt_client = msm_rpc_register_client("battery", BATTERY_RPC_PROG, BATTERY_RPC_VER_4_1, 1, msm_batt_cb_func); if (msm_batt_info.batt_client == NULL) { pr_err("%s: FAIL: rpc_register_client. batt_client=NULL\n", __func__); return -ENODEV; } if (IS_ERR(msm_batt_info.batt_client)) { msm_batt_info.batt_client = msm_rpc_register_client("battery", BATTERY_RPC_PROG, BATTERY_RPC_VER_1_1, 1, msm_batt_cb_func); msm_batt_info.batt_api_version = BATTERY_RPC_VER_1_1; } if (IS_ERR(msm_batt_info.batt_client)) { msm_batt_info.batt_client = msm_rpc_register_client("battery", BATTERY_RPC_PROG, BATTERY_RPC_VER_2_1, 1, msm_batt_cb_func); msm_batt_info.batt_api_version = BATTERY_RPC_VER_2_1; } if (IS_ERR(msm_batt_info.batt_client)) { msm_batt_info.batt_client = msm_rpc_register_client("battery", BATTERY_RPC_PROG, BATTERY_RPC_VER_5_1, 1, msm_batt_cb_func); msm_batt_info.batt_api_version = BATTERY_RPC_VER_5_1; } if (IS_ERR(msm_batt_info.batt_client)) { rc = PTR_ERR(msm_batt_info.batt_client); pr_err("%s: ERROR: rpc_register_client: rc = %d\n ", __func__, rc); msm_batt_info.batt_client = NULL; return rc; } #endif /* CONFIG_BATTERY_MSM_FAKE */ rc = platform_driver_register(&msm_batt_driver); if (rc < 0) pr_err("%s: FAIL: platform_driver_register. rc = %d\n", __func__, rc); return rc; } static int __init msm_batt_init(void) { int rc; pr_debug("%s: enter\n", __func__); rc = msm_batt_init_rpc(); if (rc < 0) { pr_err("%s: FAIL: msm_batt_init_rpc. rc=%d\n", __func__, rc); msm_batt_cleanup(); return rc; } pr_info("%s: Charger/Battery = 0x%08x/0x%08x (RPC version)\n", __func__, msm_batt_info.chg_api_version, msm_batt_info.batt_api_version); return 0; } static void __exit msm_batt_exit(void) { platform_driver_unregister(&msm_batt_driver); } module_init(msm_batt_init); module_exit(msm_batt_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Kiran Kandi, Qualcomm Innovation Center, Inc."); MODULE_DESCRIPTION("Battery driver for Qualcomm MSM chipsets."); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:msm_battery");
gpl-2.0
LinuxKernelDevelopment/linux-3.17-with_my_system_call
arch/arm/mach-at91/clock.c
295
24033
/* * linux/arch/arm/mach-at91/clock.c * * Copyright (C) 2005 David Brownell * Copyright (C) 2005 Ivan Kokshaysky * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/list.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/clk/at91_pmc.h> #include <mach/hardware.h> #include <mach/cpu.h> #include <asm/proc-fns.h> #include "clock.h" #include "generic.h" void __iomem *at91_pmc_base; EXPORT_SYMBOL_GPL(at91_pmc_base); /* * There's a lot more which can be done with clocks, including cpufreq * integration, slow clock mode support (for system suspend), letting * PLLB be used at other rates (on boards that don't need USB), etc. */ #define clk_is_primary(x) ((x)->type & CLK_TYPE_PRIMARY) #define clk_is_programmable(x) ((x)->type & CLK_TYPE_PROGRAMMABLE) #define clk_is_peripheral(x) ((x)->type & CLK_TYPE_PERIPHERAL) #define clk_is_sys(x) ((x)->type & CLK_TYPE_SYSTEM) /* * Chips have some kind of clocks : group them by functionality */ #define cpu_has_utmi() ( cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_sama5d3()) #define cpu_has_1056M_plla() (cpu_is_sama5d3()) #define cpu_has_800M_plla() ( cpu_is_at91sam9g20() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_at91sam9n12()) #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) #define cpu_has_240M_plla() (cpu_is_at91sam9261() \ || cpu_is_at91sam9263() \ || cpu_is_at91sam9rl()) #define cpu_has_210M_plla() (cpu_is_at91sam9260()) #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_sama5d3())) #define cpu_has_upll() (cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_sama5d3()) /* USB host HS & FS */ #define cpu_has_uhp() (!cpu_is_at91sam9rl()) /* USB device FS only */ #define cpu_has_udpfs() (!(cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_sama5d3())) #define cpu_has_plladiv2() (cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_at91sam9n12() \ || cpu_is_sama5d3()) #define cpu_has_mdiv3() (cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ || cpu_is_at91sam9n12() \ || cpu_is_sama5d3()) #define cpu_has_alt_prescaler() (cpu_is_at91sam9x5() \ || cpu_is_at91sam9n12() \ || cpu_is_sama5d3()) static LIST_HEAD(clocks); static DEFINE_SPINLOCK(clk_lock); static u32 at91_pllb_usb_init; /* * Four primary clock sources: two crystal oscillators (32K, main), and * two PLLs. PLLA usually runs the master clock; and PLLB must run at * 48 MHz (unless no USB function clocks are needed). The main clock and * both PLLs are turned off to run in "slow clock mode" (system suspend). */ static struct clk clk32k = { .name = "clk32k", .rate_hz = AT91_SLOW_CLOCK, .users = 1, /* always on */ .id = 0, .type = CLK_TYPE_PRIMARY, }; static struct clk main_clk = { .name = "main", .pmc_mask = AT91_PMC_MOSCS, /* in PMC_SR */ .id = 1, .type = CLK_TYPE_PRIMARY, }; static struct clk plla = { .name = "plla", .parent = &main_clk, .pmc_mask = AT91_PMC_LOCKA, /* in PMC_SR */ .id = 2, .type = CLK_TYPE_PRIMARY | CLK_TYPE_PLL, }; static void pllb_mode(struct clk *clk, int is_on) { u32 value; if (is_on) { is_on = AT91_PMC_LOCKB; value = at91_pllb_usb_init; } else value = 0; // REVISIT: Add work-around for AT91RM9200 Errata #26 ? at91_pmc_write(AT91_CKGR_PLLBR, value); do { cpu_relax(); } while ((at91_pmc_read(AT91_PMC_SR) & AT91_PMC_LOCKB) != is_on); } static struct clk pllb = { .name = "pllb", .parent = &main_clk, .pmc_mask = AT91_PMC_LOCKB, /* in PMC_SR */ .mode = pllb_mode, .id = 3, .type = CLK_TYPE_PRIMARY | CLK_TYPE_PLL, }; static void pmc_sys_mode(struct clk *clk, int is_on) { if (is_on) at91_pmc_write(AT91_PMC_SCER, clk->pmc_mask); else at91_pmc_write(AT91_PMC_SCDR, clk->pmc_mask); } static void pmc_uckr_mode(struct clk *clk, int is_on) { unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR); if (is_on) { is_on = AT91_PMC_LOCKU; at91_pmc_write(AT91_CKGR_UCKR, uckr | clk->pmc_mask); } else at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(clk->pmc_mask)); do { cpu_relax(); } while ((at91_pmc_read(AT91_PMC_SR) & AT91_PMC_LOCKU) != is_on); } /* USB function clocks (PLLB must be 48 MHz) */ static struct clk udpck = { .name = "udpck", .parent = &pllb, .mode = pmc_sys_mode, }; struct clk utmi_clk = { .name = "utmi_clk", .parent = &main_clk, .pmc_mask = AT91_PMC_UPLLEN, /* in CKGR_UCKR */ .mode = pmc_uckr_mode, .type = CLK_TYPE_PLL, }; static struct clk uhpck = { .name = "uhpck", /*.parent = ... we choose parent at runtime */ .mode = pmc_sys_mode, }; /* * The master clock is divided from the CPU clock (by 1-4). It's used for * memory, interfaces to on-chip peripherals, the AIC, and sometimes more * (e.g baud rate generation). It's sourced from one of the primary clocks. */ struct clk mck = { .name = "mck", .pmc_mask = AT91_PMC_MCKRDY, /* in PMC_SR */ }; static void pmc_periph_mode(struct clk *clk, int is_on) { u32 regval = 0; /* * With sama5d3 devices, we are managing clock division so we have to * use the Peripheral Control Register introduced from at91sam9x5 * devices. */ if (cpu_is_sama5d3()) { regval |= AT91_PMC_PCR_CMD; /* write command */ regval |= clk->pid & AT91_PMC_PCR_PID; /* peripheral selection */ regval |= AT91_PMC_PCR_DIV(clk->div); if (is_on) regval |= AT91_PMC_PCR_EN; /* enable clock */ at91_pmc_write(AT91_PMC_PCR, regval); } else { if (is_on) at91_pmc_write(AT91_PMC_PCER, clk->pmc_mask); else at91_pmc_write(AT91_PMC_PCDR, clk->pmc_mask); } } static struct clk __init *at91_css_to_clk(unsigned long css) { switch (css) { case AT91_PMC_CSS_SLOW: return &clk32k; case AT91_PMC_CSS_MAIN: return &main_clk; case AT91_PMC_CSS_PLLA: return &plla; case AT91_PMC_CSS_PLLB: if (cpu_has_upll()) /* CSS_PLLB == CSS_UPLL */ return &utmi_clk; else if (cpu_has_pllb()) return &pllb; break; /* alternate PMC: can use master clock */ case AT91_PMC_CSS_MASTER: return &mck; } return NULL; } static int pmc_prescaler_divider(u32 reg) { if (cpu_has_alt_prescaler()) { return 1 << ((reg & AT91_PMC_ALT_PRES) >> PMC_ALT_PRES_OFFSET); } else { return 1 << ((reg & AT91_PMC_PRES) >> PMC_PRES_OFFSET); } } static void __clk_enable(struct clk *clk) { if (clk->parent) __clk_enable(clk->parent); if (clk->users++ == 0 && clk->mode) clk->mode(clk, 1); } int clk_enable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_enable(clk); spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_enable); static void __clk_disable(struct clk *clk) { BUG_ON(clk->users == 0); if (--clk->users == 0 && clk->mode) clk->mode(clk, 0); if (clk->parent) __clk_disable(clk->parent); } void clk_disable(struct clk *clk) { unsigned long flags; spin_lock_irqsave(&clk_lock, flags); __clk_disable(clk); spin_unlock_irqrestore(&clk_lock, flags); } EXPORT_SYMBOL(clk_disable); unsigned long clk_get_rate(struct clk *clk) { unsigned long flags; unsigned long rate; spin_lock_irqsave(&clk_lock, flags); for (;;) { rate = clk->rate_hz; if (rate || !clk->parent) break; clk = clk->parent; } spin_unlock_irqrestore(&clk_lock, flags); return rate; } EXPORT_SYMBOL(clk_get_rate); /*------------------------------------------------------------------------*/ /* * For now, only the programmable clocks support reparenting (MCK could * do this too, with care) or rate changing (the PLLs could do this too, * ditto MCK but that's more for cpufreq). Drivers may reparent to get * a better rate match; we don't. */ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long flags; unsigned prescale; unsigned long actual; unsigned long prev = ULONG_MAX; if (!clk_is_programmable(clk)) return -EINVAL; spin_lock_irqsave(&clk_lock, flags); actual = clk->parent->rate_hz; for (prescale = 0; prescale < 7; prescale++) { if (actual > rate) prev = actual; if (actual && actual <= rate) { if ((prev - rate) < (rate - actual)) { actual = prev; prescale--; } break; } actual >>= 1; } spin_unlock_irqrestore(&clk_lock, flags); return (prescale < 7) ? actual : -ENOENT; } EXPORT_SYMBOL(clk_round_rate); int clk_set_rate(struct clk *clk, unsigned long rate) { unsigned long flags; unsigned prescale; unsigned long prescale_offset, css_mask; unsigned long actual; if (!clk_is_programmable(clk)) return -EINVAL; if (clk->users) return -EBUSY; if (cpu_has_alt_prescaler()) { prescale_offset = PMC_ALT_PRES_OFFSET; css_mask = AT91_PMC_ALT_PCKR_CSS; } else { prescale_offset = PMC_PRES_OFFSET; css_mask = AT91_PMC_CSS; } spin_lock_irqsave(&clk_lock, flags); actual = clk->parent->rate_hz; for (prescale = 0; prescale < 7; prescale++) { if (actual && actual <= rate) { u32 pckr; pckr = at91_pmc_read(AT91_PMC_PCKR(clk->id)); pckr &= css_mask; /* keep clock selection */ pckr |= prescale << prescale_offset; at91_pmc_write(AT91_PMC_PCKR(clk->id), pckr); clk->rate_hz = actual; break; } actual >>= 1; } spin_unlock_irqrestore(&clk_lock, flags); return (prescale < 7) ? actual : -ENOENT; } EXPORT_SYMBOL(clk_set_rate); struct clk *clk_get_parent(struct clk *clk) { return clk->parent; } EXPORT_SYMBOL(clk_get_parent); int clk_set_parent(struct clk *clk, struct clk *parent) { unsigned long flags; if (clk->users) return -EBUSY; if (!clk_is_primary(parent) || !clk_is_programmable(clk)) return -EINVAL; if (cpu_is_at91sam9rl() && parent->id == AT91_PMC_CSS_PLLB) return -EINVAL; spin_lock_irqsave(&clk_lock, flags); clk->rate_hz = parent->rate_hz; clk->parent = parent; at91_pmc_write(AT91_PMC_PCKR(clk->id), parent->id); spin_unlock_irqrestore(&clk_lock, flags); return 0; } EXPORT_SYMBOL(clk_set_parent); /* establish PCK0..PCKN parentage and rate */ static void __init init_programmable_clock(struct clk *clk) { struct clk *parent; u32 pckr; unsigned int css_mask; if (cpu_has_alt_prescaler()) css_mask = AT91_PMC_ALT_PCKR_CSS; else css_mask = AT91_PMC_CSS; pckr = at91_pmc_read(AT91_PMC_PCKR(clk->id)); parent = at91_css_to_clk(pckr & css_mask); clk->parent = parent; clk->rate_hz = parent->rate_hz / pmc_prescaler_divider(pckr); } /*------------------------------------------------------------------------*/ #ifdef CONFIG_DEBUG_FS static int at91_clk_show(struct seq_file *s, void *unused) { u32 scsr, pcsr, pcsr1 = 0, uckr = 0, sr; struct clk *clk; scsr = at91_pmc_read(AT91_PMC_SCSR); pcsr = at91_pmc_read(AT91_PMC_PCSR); if (cpu_is_sama5d3()) pcsr1 = at91_pmc_read(AT91_PMC_PCSR1); sr = at91_pmc_read(AT91_PMC_SR); seq_printf(s, "SCSR = %8x\n", scsr); seq_printf(s, "PCSR = %8x\n", pcsr); if (cpu_is_sama5d3()) seq_printf(s, "PCSR1 = %8x\n", pcsr1); seq_printf(s, "MOR = %8x\n", at91_pmc_read(AT91_CKGR_MOR)); seq_printf(s, "MCFR = %8x\n", at91_pmc_read(AT91_CKGR_MCFR)); seq_printf(s, "PLLA = %8x\n", at91_pmc_read(AT91_CKGR_PLLAR)); if (cpu_has_pllb()) seq_printf(s, "PLLB = %8x\n", at91_pmc_read(AT91_CKGR_PLLBR)); if (cpu_has_utmi()) { uckr = at91_pmc_read(AT91_CKGR_UCKR); seq_printf(s, "UCKR = %8x\n", uckr); } seq_printf(s, "MCKR = %8x\n", at91_pmc_read(AT91_PMC_MCKR)); if (cpu_has_upll() || cpu_is_at91sam9n12()) seq_printf(s, "USB = %8x\n", at91_pmc_read(AT91_PMC_USB)); seq_printf(s, "SR = %8x\n", sr); seq_printf(s, "\n"); list_for_each_entry(clk, &clocks, node) { char *state; if (clk->mode == pmc_sys_mode) { state = (scsr & clk->pmc_mask) ? "on" : "off"; } else if (clk->mode == pmc_periph_mode) { if (cpu_is_sama5d3()) { u32 pmc_mask = 1 << (clk->pid % 32); if (clk->pid > 31) state = (pcsr1 & pmc_mask) ? "on" : "off"; else state = (pcsr & pmc_mask) ? "on" : "off"; } else { state = (pcsr & clk->pmc_mask) ? "on" : "off"; } } else if (clk->mode == pmc_uckr_mode) { state = (uckr & clk->pmc_mask) ? "on" : "off"; } else if (clk->pmc_mask) { state = (sr & clk->pmc_mask) ? "on" : "off"; } else if (clk == &clk32k || clk == &main_clk) { state = "on"; } else { state = ""; } seq_printf(s, "%-10s users=%2d %-3s %9lu Hz %s\n", clk->name, clk->users, state, clk_get_rate(clk), clk->parent ? clk->parent->name : ""); } return 0; } static int at91_clk_open(struct inode *inode, struct file *file) { return single_open(file, at91_clk_show, NULL); } static const struct file_operations at91_clk_operations = { .open = at91_clk_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init at91_clk_debugfs_init(void) { /* /sys/kernel/debug/at91_clk */ (void) debugfs_create_file("at91_clk", S_IFREG | S_IRUGO, NULL, NULL, &at91_clk_operations); return 0; } postcore_initcall(at91_clk_debugfs_init); #endif /*------------------------------------------------------------------------*/ /* Register a new clock */ static void __init at91_clk_add(struct clk *clk) { list_add_tail(&clk->node, &clocks); clk->cl.con_id = clk->name; clk->cl.clk = clk; clkdev_add(&clk->cl); } int __init clk_register(struct clk *clk) { if (clk_is_peripheral(clk)) { if (!clk->parent) clk->parent = &mck; if (cpu_is_sama5d3()) clk->rate_hz = DIV_ROUND_UP(clk->parent->rate_hz, 1 << clk->div); clk->mode = pmc_periph_mode; } else if (clk_is_sys(clk)) { clk->parent = &mck; clk->mode = pmc_sys_mode; } else if (clk_is_programmable(clk)) { clk->mode = pmc_sys_mode; init_programmable_clock(clk); } at91_clk_add(clk); return 0; } /*------------------------------------------------------------------------*/ static u32 __init at91_pll_rate(struct clk *pll, u32 freq, u32 reg) { unsigned mul, div; div = reg & 0xff; if (cpu_is_sama5d3()) mul = AT91_PMC3_MUL_GET(reg); else mul = AT91_PMC_MUL_GET(reg); if (div && mul) { freq /= div; freq *= mul + 1; } else freq = 0; return freq; } static u32 __init at91_usb_rate(struct clk *pll, u32 freq, u32 reg) { if (pll == &pllb && (reg & AT91_PMC_USB96M)) return freq / 2; else if (pll == &utmi_clk || cpu_is_at91sam9n12()) return freq / (1 + ((reg & AT91_PMC_OHCIUSBDIV) >> 8)); else return freq; } static unsigned __init at91_pll_calc(unsigned main_freq, unsigned out_freq) { unsigned i, div = 0, mul = 0, diff = 1 << 30; unsigned ret = (out_freq > 155000000) ? 0xbe00 : 0x3e00; /* PLL output max 240 MHz (or 180 MHz per errata) */ if (out_freq > 240000000) goto fail; for (i = 1; i < 256; i++) { int diff1; unsigned input, mul1; /* * PLL input between 1MHz and 32MHz per spec, but lower * frequences seem necessary in some cases so allow 100K. * Warning: some newer products need 2MHz min. */ input = main_freq / i; if (cpu_is_at91sam9g20() && input < 2000000) continue; if (input < 100000) continue; if (input > 32000000) continue; mul1 = out_freq / input; if (cpu_is_at91sam9g20() && mul > 63) continue; if (mul1 > 2048) continue; if (mul1 < 2) goto fail; diff1 = out_freq - input * mul1; if (diff1 < 0) diff1 = -diff1; if (diff > diff1) { diff = diff1; div = i; mul = mul1; if (diff == 0) break; } } if (i == 256 && diff > (out_freq >> 5)) goto fail; return ret | ((mul - 1) << 16) | div; fail: return 0; } static struct clk *const standard_pmc_clocks[] __initconst = { /* four primary clocks */ &clk32k, &main_clk, &plla, /* MCK */ &mck }; /* PLLB generated USB full speed clock init */ static void __init at91_pllb_usbfs_clock_init(unsigned long main_clock) { unsigned int reg; /* * USB clock init: choose 48 MHz PLLB value, * disable 48MHz clock during usb peripheral suspend. * * REVISIT: assumes MCK doesn't derive from PLLB! */ uhpck.parent = &pllb; reg = at91_pllb_usb_init = at91_pll_calc(main_clock, 48000000 * 2); pllb.rate_hz = at91_pll_rate(&pllb, main_clock, at91_pllb_usb_init); if (cpu_is_at91rm9200()) { reg = at91_pllb_usb_init |= AT91_PMC_USB96M; uhpck.pmc_mask = AT91RM9200_PMC_UHP; udpck.pmc_mask = AT91RM9200_PMC_UDP; at91_pmc_write(AT91_PMC_SCER, AT91RM9200_PMC_MCKUDP); } else if (cpu_is_at91sam9260() || cpu_is_at91sam9261() || cpu_is_at91sam9263() || cpu_is_at91sam9g20() || cpu_is_at91sam9g10()) { reg = at91_pllb_usb_init |= AT91_PMC_USB96M; uhpck.pmc_mask = AT91SAM926x_PMC_UHP; udpck.pmc_mask = AT91SAM926x_PMC_UDP; } else if (cpu_is_at91sam9n12()) { /* Divider for USB clock is in USB clock register for 9n12 */ reg = AT91_PMC_USBS_PLLB; /* For PLLB output 96M, set usb divider 2 (USBDIV + 1) */ reg |= AT91_PMC_OHCIUSBDIV_2; at91_pmc_write(AT91_PMC_USB, reg); /* Still setup masks */ uhpck.pmc_mask = AT91SAM926x_PMC_UHP; udpck.pmc_mask = AT91SAM926x_PMC_UDP; } at91_pmc_write(AT91_CKGR_PLLBR, 0); udpck.rate_hz = at91_usb_rate(&pllb, pllb.rate_hz, reg); uhpck.rate_hz = at91_usb_rate(&pllb, pllb.rate_hz, reg); } /* UPLL generated USB full speed clock init */ static void __init at91_upll_usbfs_clock_init(unsigned long main_clock) { /* * USB clock init: choose 480 MHz from UPLL, */ unsigned int usbr = AT91_PMC_USBS_UPLL; /* Setup divider by 10 to reach 48 MHz */ usbr |= ((10 - 1) << 8) & AT91_PMC_OHCIUSBDIV; at91_pmc_write(AT91_PMC_USB, usbr); /* Now set uhpck values */ uhpck.parent = &utmi_clk; uhpck.pmc_mask = AT91SAM926x_PMC_UHP; uhpck.rate_hz = at91_usb_rate(&utmi_clk, utmi_clk.rate_hz, usbr); } static int __init at91_pmc_init(unsigned long main_clock) { unsigned tmp, freq, mckr; int i; int pll_overclock = false; /* * When the bootloader initialized the main oscillator correctly, * there's no problem using the cycle counter. But if it didn't, * or when using oscillator bypass mode, we must be told the speed * of the main clock. */ if (!main_clock) { do { tmp = at91_pmc_read(AT91_CKGR_MCFR); } while (!(tmp & AT91_PMC_MAINRDY)); main_clock = (tmp & AT91_PMC_MAINF) * (AT91_SLOW_CLOCK / 16); } main_clk.rate_hz = main_clock; /* report if PLLA is more than mildly overclocked */ plla.rate_hz = at91_pll_rate(&plla, main_clock, at91_pmc_read(AT91_CKGR_PLLAR)); if (cpu_has_1056M_plla()) { if (plla.rate_hz > 1056000000) pll_overclock = true; } else if (cpu_has_800M_plla()) { if (plla.rate_hz > 800000000) pll_overclock = true; } else if (cpu_has_300M_plla()) { if (plla.rate_hz > 300000000) pll_overclock = true; } else if (cpu_has_240M_plla()) { if (plla.rate_hz > 240000000) pll_overclock = true; } else if (cpu_has_210M_plla()) { if (plla.rate_hz > 210000000) pll_overclock = true; } else { if (plla.rate_hz > 209000000) pll_overclock = true; } if (pll_overclock) pr_info("Clocks: PLLA overclocked, %ld MHz\n", plla.rate_hz / 1000000); if (cpu_has_plladiv2()) { mckr = at91_pmc_read(AT91_PMC_MCKR); plla.rate_hz /= (1 << ((mckr & AT91_PMC_PLLADIV2) >> 12)); /* plla divisor by 2 */ } if (!cpu_has_pllb() && cpu_has_upll()) { /* setup UTMI clock as the fourth primary clock * (instead of pllb) */ utmi_clk.type |= CLK_TYPE_PRIMARY; utmi_clk.id = 3; } /* * USB HS clock init */ if (cpu_has_utmi()) { /* * multiplier is hard-wired to 40 * (obtain the USB High Speed 480 MHz when input is 12 MHz) */ utmi_clk.rate_hz = 40 * utmi_clk.parent->rate_hz; /* UTMI bias and PLL are managed at the same time */ if (cpu_has_upll()) utmi_clk.pmc_mask |= AT91_PMC_BIASEN; } /* * USB FS clock init */ if (cpu_has_pllb()) at91_pllb_usbfs_clock_init(main_clock); if (cpu_has_upll()) /* assumes that we choose UPLL for USB and not PLLA */ at91_upll_usbfs_clock_init(main_clock); /* * MCK and CPU derive from one of those primary clocks. * For now, assume this parentage won't change. */ mckr = at91_pmc_read(AT91_PMC_MCKR); mck.parent = at91_css_to_clk(mckr & AT91_PMC_CSS); freq = mck.parent->rate_hz; freq /= pmc_prescaler_divider(mckr); /* prescale */ if (cpu_is_at91rm9200()) { mck.rate_hz = freq / (1 + ((mckr & AT91_PMC_MDIV) >> 8)); /* mdiv */ } else if (cpu_is_at91sam9g20()) { mck.rate_hz = (mckr & AT91_PMC_MDIV) ? freq / ((mckr & AT91_PMC_MDIV) >> 7) : freq; /* mdiv ; (x >> 7) = ((x >> 8) * 2) */ if (mckr & AT91_PMC_PDIV) freq /= 2; /* processor clock division */ } else if (cpu_has_mdiv3()) { mck.rate_hz = (mckr & AT91_PMC_MDIV) == AT91SAM9_PMC_MDIV_3 ? freq / 3 : freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8)); /* mdiv */ } else { mck.rate_hz = freq / (1 << ((mckr & AT91_PMC_MDIV) >> 8)); /* mdiv */ } if (cpu_has_alt_prescaler()) { /* Programmable clocks can use MCK */ mck.type |= CLK_TYPE_PRIMARY; mck.id = 4; } /* Register the PMC's standard clocks */ for (i = 0; i < ARRAY_SIZE(standard_pmc_clocks); i++) at91_clk_add(standard_pmc_clocks[i]); if (cpu_has_pllb()) at91_clk_add(&pllb); if (cpu_has_uhp()) at91_clk_add(&uhpck); if (cpu_has_udpfs()) at91_clk_add(&udpck); if (cpu_has_utmi()) at91_clk_add(&utmi_clk); /* MCK and CPU clock are "always on" */ clk_enable(&mck); printk("Clocks: CPU %u MHz, master %u MHz, main %u.%03u MHz\n", freq / 1000000, (unsigned) mck.rate_hz / 1000000, (unsigned) main_clock / 1000000, ((unsigned) main_clock % 1000000) / 1000); return 0; } #if defined(CONFIG_OF) static struct of_device_id pmc_ids[] = { { .compatible = "atmel,at91rm9200-pmc" }, { .compatible = "atmel,at91sam9260-pmc" }, { .compatible = "atmel,at91sam9g45-pmc" }, { .compatible = "atmel,at91sam9n12-pmc" }, { .compatible = "atmel,at91sam9x5-pmc" }, { .compatible = "atmel,sama5d3-pmc" }, { /*sentinel*/ } }; static struct of_device_id osc_ids[] = { { .compatible = "atmel,osc" }, { /*sentinel*/ } }; int __init at91_dt_clock_init(void) { struct device_node *np; u32 main_clock = 0; np = of_find_matching_node(NULL, pmc_ids); if (!np) panic("unable to find compatible pmc node in dtb\n"); at91_pmc_base = of_iomap(np, 0); if (!at91_pmc_base) panic("unable to map pmc cpu registers\n"); of_node_put(np); /* retrieve the freqency of fixed clocks from device tree */ np = of_find_matching_node(NULL, osc_ids); if (np) { u32 rate; if (!of_property_read_u32(np, "clock-frequency", &rate)) main_clock = rate; } of_node_put(np); return at91_pmc_init(main_clock); } #endif int __init at91_clock_init(unsigned long main_clock) { at91_pmc_base = ioremap(AT91_PMC, 256); if (!at91_pmc_base) panic("Impossible to ioremap AT91_PMC 0x%x\n", AT91_PMC); return at91_pmc_init(main_clock); } /* * Several unused clocks may be active. Turn them off. */ static int __init at91_clock_reset(void) { unsigned long pcdr = 0; unsigned long pcdr1 = 0; unsigned long scdr = 0; struct clk *clk; list_for_each_entry(clk, &clocks, node) { if (clk->users > 0) continue; if (clk->mode == pmc_periph_mode) { if (cpu_is_sama5d3()) { u32 pmc_mask = 1 << (clk->pid % 32); if (clk->pid > 31) pcdr1 |= pmc_mask; else pcdr |= pmc_mask; } else pcdr |= clk->pmc_mask; } if (clk->mode == pmc_sys_mode) scdr |= clk->pmc_mask; pr_debug("Clocks: disable unused %s\n", clk->name); } at91_pmc_write(AT91_PMC_SCDR, scdr); if (cpu_is_sama5d3()) at91_pmc_write(AT91_PMC_PCDR1, pcdr1); return 0; } late_initcall(at91_clock_reset); void at91sam9_idle(void) { at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); cpu_do_idle(); }
gpl-2.0
bleached/android_kernel_samsung_msm7x27a
sound/pci/asihpi/hpidebug.c
295
5980
/************************************************************************ AudioScience HPI driver Copyright (C) 1997-2010 AudioScience Inc. <support@audioscience.com> This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation; This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Debug macro translation. ************************************************************************/ #include "hpi_internal.h" #include "hpidebug.h" /* Debug level; 0 quiet; 1 informative, 2 debug, 3 verbose debug. */ int hpi_debug_level = HPI_DEBUG_LEVEL_DEFAULT; void hpi_debug_init(void) { printk(KERN_INFO "debug start\n"); } int hpi_debug_level_set(int level) { int old_level; old_level = hpi_debug_level; hpi_debug_level = level; return old_level; } int hpi_debug_level_get(void) { return hpi_debug_level; } #ifdef HPIOS_DEBUG_PRINT /* implies OS has no printf-like function */ #include <stdarg.h> void hpi_debug_printf(char *fmt, ...) { va_list arglist; char buffer[128]; va_start(arglist, fmt); if (buffer[0]) HPIOS_DEBUG_PRINT(buffer); va_end(arglist); } #endif struct treenode { void *array; unsigned int num_elements; }; #define make_treenode_from_array(nodename, array) \ static void *tmp_strarray_##nodename[] = array; \ static struct treenode nodename = { \ &tmp_strarray_##nodename, \ ARRAY_SIZE(tmp_strarray_##nodename) \ }; #define get_treenode_elem(node_ptr, idx, type) \ (&(*((type *)(node_ptr)->array)[idx])) make_treenode_from_array(hpi_control_type_strings, HPI_CONTROL_TYPE_STRINGS) make_treenode_from_array(hpi_subsys_strings, HPI_SUBSYS_STRINGS) make_treenode_from_array(hpi_adapter_strings, HPI_ADAPTER_STRINGS) make_treenode_from_array(hpi_istream_strings, HPI_ISTREAM_STRINGS) make_treenode_from_array(hpi_ostream_strings, HPI_OSTREAM_STRINGS) make_treenode_from_array(hpi_mixer_strings, HPI_MIXER_STRINGS) make_treenode_from_array(hpi_node_strings, { "NODE is invalid object"}) make_treenode_from_array(hpi_control_strings, HPI_CONTROL_STRINGS) make_treenode_from_array(hpi_nvmemory_strings, HPI_OBJ_STRINGS) make_treenode_from_array(hpi_digitalio_strings, HPI_DIGITALIO_STRINGS) make_treenode_from_array(hpi_watchdog_strings, HPI_WATCHDOG_STRINGS) make_treenode_from_array(hpi_clock_strings, HPI_CLOCK_STRINGS) make_treenode_from_array(hpi_profile_strings, HPI_PROFILE_STRINGS) make_treenode_from_array(hpi_asyncevent_strings, HPI_ASYNCEVENT_STRINGS) #define HPI_FUNCTION_STRINGS \ { \ &hpi_subsys_strings,\ &hpi_adapter_strings,\ &hpi_ostream_strings,\ &hpi_istream_strings,\ &hpi_mixer_strings,\ &hpi_node_strings,\ &hpi_control_strings,\ &hpi_nvmemory_strings,\ &hpi_digitalio_strings,\ &hpi_watchdog_strings,\ &hpi_clock_strings,\ &hpi_profile_strings,\ &hpi_control_strings, \ &hpi_asyncevent_strings \ } make_treenode_from_array(hpi_function_strings, HPI_FUNCTION_STRINGS) compile_time_assert(HPI_OBJ_MAXINDEX == 14, obj_list_doesnt_match); static char *hpi_function_string(unsigned int function) { unsigned int object; struct treenode *tmp; object = function / HPI_OBJ_FUNCTION_SPACING; function = function - object * HPI_OBJ_FUNCTION_SPACING; if (object == 0 || object == HPI_OBJ_NODE || object > hpi_function_strings.num_elements) return "invalid object"; tmp = get_treenode_elem(&hpi_function_strings, object - 1, struct treenode *); if (function == 0 || function > tmp->num_elements) return "invalid function"; return get_treenode_elem(tmp, function - 1, char *); } void hpi_debug_message(struct hpi_message *phm, char *sz_fileline) { if (phm) { if ((phm->object <= HPI_OBJ_MAXINDEX) && phm->object) { u16 index = 0; u16 attrib = 0; int is_control = 0; index = phm->obj_index; switch (phm->object) { case HPI_OBJ_ADAPTER: case HPI_OBJ_PROFILE: break; case HPI_OBJ_MIXER: if (phm->function == HPI_MIXER_GET_CONTROL_BY_INDEX) index = phm->u.m.control_index; break; case HPI_OBJ_OSTREAM: case HPI_OBJ_ISTREAM: break; case HPI_OBJ_CONTROLEX: case HPI_OBJ_CONTROL: if (phm->version == 1) attrib = HPI_CTL_ATTR(UNIVERSAL, 1); else attrib = phm->u.c.attribute; is_control = 1; break; default: break; } if (is_control && (attrib & 0xFF00)) { int control_type = (attrib & 0xFF00) >> 8; int attr_index = HPI_CTL_ATTR_INDEX(attrib); /* note the KERN facility level is in szFileline already */ printk("%s adapter %d %s " "ctrl_index x%04x %s %d\n", sz_fileline, phm->adapter_index, hpi_function_string(phm->function), index, get_treenode_elem (&hpi_control_type_strings, control_type, char *), attr_index); } else printk("%s adapter %d %s " "idx x%04x attr x%04x \n", sz_fileline, phm->adapter_index, hpi_function_string(phm->function), index, attrib); } else { printk("adap=%d, invalid obj=%d, func=0x%x\n", phm->adapter_index, phm->object, phm->function); } } else printk(KERN_ERR "NULL message pointer to hpi_debug_message!\n"); } void hpi_debug_data(u16 *pdata, u32 len) { u32 i; int j; int k; int lines; int cols = 8; lines = (len + cols - 1) / cols; if (lines > 8) lines = 8; for (i = 0, j = 0; j < lines; j++) { printk(KERN_DEBUG "%p:", (pdata + i)); for (k = 0; k < cols && i < len; i++, k++) printk("%s%04x", k == 0 ? "" : " ", pdata[i]); printk("\n"); } }
gpl-2.0
falaze/nexus5n
arch/arm/mach-msm/pil-gss.c
295
15437
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/smp.h> #include <linux/miscdevice.h> #include <linux/reboot.h> #include <linux/interrupt.h> #include <mach/msm_xo.h> #include <mach/socinfo.h> #include <mach/msm_bus_board.h> #include <mach/msm_bus.h> #include <mach/subsystem_restart.h> #include <mach/ramdump.h> #include <mach/msm_smem.h> #include "peripheral-loader.h" #include "scm-pas.h" #include "smd_private.h" #define GSS_CSR_AHB_CLK_SEL 0x0 #define GSS_CSR_RESET 0x4 #define GSS_CSR_CLK_BLK_CONFIG 0x8 #define GSS_CSR_CLK_ENABLE 0xC #define GSS_CSR_BOOT_REMAP 0x14 #define GSS_CSR_POWER_UP_DOWN 0x18 #define GSS_CSR_CFG_HID 0x2C #define GSS_SLP_CLK_CTL 0x2C60 #define GSS_RESET 0x2C64 #define GSS_CLAMP_ENA 0x2C68 #define GSS_CXO_SRC_CTL 0x2C74 #define PLL5_STATUS 0x30F8 #define PLL_ENA_GSS 0x3480 #define PLL5_VOTE BIT(5) #define PLL_STATUS BIT(16) #define REMAP_ENABLE BIT(16) #define A5_POWER_STATUS BIT(4) #define A5_POWER_ENA BIT(0) #define NAV_POWER_ENA BIT(1) #define XO_CLK_BRANCH_ENA BIT(0) #define SLP_CLK_BRANCH_ENA BIT(4) #define A5_RESET BIT(0) struct gss_data { void __iomem *base; void __iomem *qgic2_base; void __iomem *cbase; struct clk *xo; struct pil_desc pil_desc; struct miscdevice misc_dev; struct subsys_device *subsys; struct subsys_desc subsys_desc; int crash_shutdown; int irq; void *subsys_handle; struct ramdump_device *ramdump_dev; struct ramdump_device *smem_ramdump_dev; }; static int make_gss_proxy_votes(struct pil_desc *pil) { int ret; struct gss_data *drv = dev_get_drvdata(pil->dev); ret = clk_prepare_enable(drv->xo); if (ret) { dev_err(pil->dev, "Failed to enable XO\n"); return ret; } return 0; } static void remove_gss_proxy_votes(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); clk_disable_unprepare(drv->xo); } static void gss_init(struct gss_data *drv) { void __iomem *base = drv->base; void __iomem *cbase = drv->cbase; /* Supply clocks to GSS. */ writel_relaxed(XO_CLK_BRANCH_ENA, cbase + GSS_CXO_SRC_CTL); writel_relaxed(SLP_CLK_BRANCH_ENA, cbase + GSS_SLP_CLK_CTL); /* Deassert GSS reset and clamps. */ writel_relaxed(0x0, cbase + GSS_RESET); writel_relaxed(0x0, cbase + GSS_CLAMP_ENA); mb(); /* * Configure clock source and dividers for 288MHz core, 144MHz AXI and * 72MHz AHB, all derived from the 288MHz PLL. */ writel_relaxed(0x341, base + GSS_CSR_CLK_BLK_CONFIG); writel_relaxed(0x1, base + GSS_CSR_AHB_CLK_SEL); /* Assert all GSS resets. */ writel_relaxed(0x7F, base + GSS_CSR_RESET); /* Enable all bus clocks and wait for resets to propagate. */ writel_relaxed(0x1F, base + GSS_CSR_CLK_ENABLE); mb(); udelay(1); /* Release subsystem from reset, but leave A5 in reset. */ writel_relaxed(A5_RESET, base + GSS_CSR_RESET); } static void cfg_qgic2_bus_access(void *data) { struct gss_data *drv = data; int i; /* * Apply a 8064 v1.0 workaround to configure QGIC bus access. * This must be done from Krait 0 to configure the Master ID * correctly. */ writel_relaxed(0x2, drv->base + GSS_CSR_CFG_HID); for (i = 0; i <= 3; i++) readl_relaxed(drv->qgic2_base); } static int pil_gss_shutdown(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); void __iomem *base = drv->base; void __iomem *cbase = drv->cbase; u32 regval; int ret; ret = clk_prepare_enable(drv->xo); if (ret) { dev_err(pil->dev, "Failed to enable XO\n"); return ret; } /* Make sure bus port is halted. */ msm_bus_axi_porthalt(MSM_BUS_MASTER_GSS_NAV); /* * Vote PLL on in GSS's voting register and wait for it to enable. * The PLL must be enable to switch the GFMUX to a low-power source. */ writel_relaxed(PLL5_VOTE, cbase + PLL_ENA_GSS); while ((readl_relaxed(cbase + PLL5_STATUS) & PLL_STATUS) == 0) cpu_relax(); /* Perform one-time GSS initialization. */ gss_init(drv); /* Assert A5 reset. */ regval = readl_relaxed(base + GSS_CSR_RESET); regval |= A5_RESET; writel_relaxed(regval, base + GSS_CSR_RESET); /* Power down A5 and NAV. */ regval = readl_relaxed(base + GSS_CSR_POWER_UP_DOWN); regval &= ~(A5_POWER_ENA|NAV_POWER_ENA); writel_relaxed(regval, base + GSS_CSR_POWER_UP_DOWN); /* Select XO clock source and increase dividers to save power. */ regval = readl_relaxed(base + GSS_CSR_CLK_BLK_CONFIG); regval |= 0x3FF; writel_relaxed(regval, base + GSS_CSR_CLK_BLK_CONFIG); /* Disable bus clocks. */ writel_relaxed(0x1F, base + GSS_CSR_CLK_ENABLE); /* Clear GSS PLL votes. */ writel_relaxed(0, cbase + PLL_ENA_GSS); mb(); clk_disable_unprepare(drv->xo); return 0; } static int pil_gss_reset(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); void __iomem *base = drv->base; phys_addr_t start_addr = pil_get_entry_addr(pil); void __iomem *cbase = drv->cbase; int ret; /* Unhalt bus port. */ ret = msm_bus_axi_portunhalt(MSM_BUS_MASTER_GSS_NAV); if (ret) { dev_err(pil->dev, "Failed to unhalt bus port\n"); return ret; } /* Vote PLL on in GSS's voting register and wait for it to enable. */ writel_relaxed(PLL5_VOTE, cbase + PLL_ENA_GSS); while ((readl_relaxed(cbase + PLL5_STATUS) & PLL_STATUS) == 0) cpu_relax(); /* Perform GSS initialization. */ gss_init(drv); /* Configure boot address and enable remap. */ writel_relaxed(REMAP_ENABLE | (start_addr >> 16), base + GSS_CSR_BOOT_REMAP); /* Power up A5 core. */ writel_relaxed(A5_POWER_ENA, base + GSS_CSR_POWER_UP_DOWN); while (!(readl_relaxed(base + GSS_CSR_POWER_UP_DOWN) & A5_POWER_STATUS)) cpu_relax(); if (cpu_is_apq8064() && ((SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 1) && (SOCINFO_VERSION_MINOR(socinfo_get_version()) == 0))) { ret = smp_call_function_single(0, cfg_qgic2_bus_access, drv, 1); if (ret) { pr_err("Failed to configure QGIC2 bus access\n"); pil_gss_shutdown(pil); return ret; } } /* Release A5 from reset. */ writel_relaxed(0x0, base + GSS_CSR_RESET); return 0; } static struct pil_reset_ops pil_gss_ops = { .auth_and_reset = pil_gss_reset, .shutdown = pil_gss_shutdown, .proxy_vote = make_gss_proxy_votes, .proxy_unvote = remove_gss_proxy_votes, }; static int pil_gss_init_image_trusted(struct pil_desc *pil, const u8 *metadata, size_t size) { return pas_init_image(PAS_GSS, metadata, size); } static int pil_gss_shutdown_trusted(struct pil_desc *pil) { struct gss_data *drv = dev_get_drvdata(pil->dev); int ret; /* * CXO is used in the secure shutdown code to configure the processor * for low power mode. */ ret = clk_prepare_enable(drv->xo); if (ret) { dev_err(pil->dev, "Failed to enable XO\n"); return ret; } msm_bus_axi_porthalt(MSM_BUS_MASTER_GSS_NAV); ret = pas_shutdown(PAS_GSS); clk_disable_unprepare(drv->xo); return ret; } static int pil_gss_reset_trusted(struct pil_desc *pil) { int err; err = msm_bus_axi_portunhalt(MSM_BUS_MASTER_GSS_NAV); if (err) { dev_err(pil->dev, "Failed to unhalt bus port\n"); goto out; } err = pas_auth_and_reset(PAS_GSS); if (err) goto halt_port; return 0; halt_port: msm_bus_axi_porthalt(MSM_BUS_MASTER_GSS_NAV); out: return err; } static struct pil_reset_ops pil_gss_ops_trusted = { .init_image = pil_gss_init_image_trusted, .auth_and_reset = pil_gss_reset_trusted, .shutdown = pil_gss_shutdown_trusted, .proxy_vote = make_gss_proxy_votes, .proxy_unvote = remove_gss_proxy_votes, }; #define MAX_SSR_REASON_LEN 81U static void log_gss_sfr(void) { u32 size; char *smem_reason, reason[MAX_SSR_REASON_LEN]; smem_reason = smem_get_entry(SMEM_SSR_REASON_MSS0, &size); if (!smem_reason || !size) { pr_err("GSS subsystem failure reason: (unknown, smem_get_entry failed).\n"); return; } if (!smem_reason[0]) { pr_err("GSS subsystem failure reason: (unknown, init string found).\n"); return; } size = min(size, MAX_SSR_REASON_LEN-1); memcpy(reason, smem_reason, size); reason[size] = '\0'; pr_err("GSS subsystem failure reason: %s.\n", reason); smem_reason[0] = '\0'; wmb(); } static void restart_gss(struct gss_data *drv) { log_gss_sfr(); subsystem_restart_dev(drv->subsys); } static void smsm_state_cb(void *data, uint32_t old_state, uint32_t new_state) { struct gss_data *drv = data; /* Ignore if we're the one that set SMSM_RESET */ if (drv->crash_shutdown) return; if (new_state & SMSM_RESET) { pr_err("GSS SMSM state changed to SMSM_RESET.\n" "Probable err_fatal on the GSS. " "Calling subsystem restart...\n"); restart_gss(drv); } } static int gss_start(const struct subsys_desc *desc) { struct gss_data *drv; drv = container_of(desc, struct gss_data, subsys_desc); return pil_boot(&drv->pil_desc); } static void gss_stop(const struct subsys_desc *desc) { struct gss_data *drv; drv = container_of(desc, struct gss_data, subsys_desc); pil_shutdown(&drv->pil_desc); } static int gss_shutdown(const struct subsys_desc *desc) { struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); pil_shutdown(&drv->pil_desc); disable_irq_nosync(drv->irq); return 0; } static int gss_powerup(const struct subsys_desc *desc) { struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); pil_boot(&drv->pil_desc); enable_irq(drv->irq); return 0; } void gss_crash_shutdown(const struct subsys_desc *desc) { struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); drv->crash_shutdown = 1; smsm_reset_modem(SMSM_RESET); } static struct ramdump_segment smem_segments[] = { {0x80000000, 0x00200000}, }; static int gss_ramdump(int enable, const struct subsys_desc *desc) { int ret; struct gss_data *drv = container_of(desc, struct gss_data, subsys_desc); if (!enable) return 0; ret = pil_do_ramdump(&drv->pil_desc, drv->ramdump_dev); if (ret < 0) { pr_err("Unable to dump gss memory\n"); return ret; } ret = do_elf_ramdump(drv->smem_ramdump_dev, smem_segments, ARRAY_SIZE(smem_segments)); if (ret < 0) { pr_err("Unable to dump smem memory (rc = %d).\n", ret); return ret; } return 0; } static irqreturn_t gss_wdog_bite_irq(int irq, void *dev_id) { struct gss_data *drv = dev_id; pr_err("Watchdog bite received from GSS!\n"); restart_gss(drv); return IRQ_HANDLED; } static int gss_open(struct inode *inode, struct file *filp) { struct miscdevice *c = filp->private_data; struct gss_data *drv = container_of(c, struct gss_data, misc_dev); drv->subsys_handle = subsystem_get("gss"); if (IS_ERR(drv->subsys_handle)) { pr_debug("%s - subsystem_get returned error\n", __func__); return PTR_ERR(drv->subsys_handle); } return 0; } static int gss_release(struct inode *inode, struct file *filp) { struct miscdevice *c = filp->private_data; struct gss_data *drv = container_of(c, struct gss_data, misc_dev); subsystem_put(drv->subsys_handle); pr_debug("%s subsystem_put called on GSS\n", __func__); return 0; } const struct file_operations gss_file_ops = { .open = gss_open, .release = gss_release, .owner = THIS_MODULE, }; static int __devinit pil_gss_probe(struct platform_device *pdev) { struct gss_data *drv; struct resource *res; struct pil_desc *desc; int ret; drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM; platform_set_drvdata(pdev, drv); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); drv->base = devm_request_and_ioremap(&pdev->dev, res); if (!drv->base) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); drv->qgic2_base = devm_request_and_ioremap(&pdev->dev, res); if (!drv->qgic2_base) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) return -EINVAL; drv->cbase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!drv->cbase) return -ENOMEM; drv->xo = devm_clk_get(&pdev->dev, "xo"); if (IS_ERR(drv->xo)) return PTR_ERR(drv->xo); drv->irq = platform_get_irq(pdev, 0); if (drv->irq < 0) return drv->irq; desc = &drv->pil_desc; desc->name = "gss"; desc->dev = &pdev->dev; desc->owner = THIS_MODULE; desc->proxy_timeout = 10000; if (pas_supported(PAS_GSS) > 0) { desc->ops = &pil_gss_ops_trusted; dev_info(&pdev->dev, "using secure boot\n"); } else { desc->ops = &pil_gss_ops; dev_info(&pdev->dev, "using non-secure boot\n"); } ret = pil_desc_init(desc); if (ret) return ret; /* Force into low power mode because hardware doesn't do this */ desc->ops->shutdown(desc); ret = smsm_state_cb_register(SMSM_MODEM_STATE, SMSM_RESET, smsm_state_cb, drv); if (ret < 0) dev_warn(&pdev->dev, "Unable to register SMSM callback\n"); drv->subsys_desc.name = "gss"; drv->subsys_desc.dev = &pdev->dev; drv->subsys_desc.owner = THIS_MODULE; drv->subsys_desc.start = gss_start; drv->subsys_desc.stop = gss_stop; drv->subsys_desc.shutdown = gss_shutdown; drv->subsys_desc.powerup = gss_powerup; drv->subsys_desc.ramdump = gss_ramdump; drv->subsys_desc.crash_shutdown = gss_crash_shutdown; drv->subsys = subsys_register(&drv->subsys_desc); if (IS_ERR(drv->subsys)) { ret = PTR_ERR(drv->subsys); goto err_subsys; } drv->misc_dev.minor = MISC_DYNAMIC_MINOR; drv->misc_dev.name = "gss"; drv->misc_dev.fops = &gss_file_ops; ret = misc_register(&drv->misc_dev); if (ret) goto err_misc; drv->ramdump_dev = create_ramdump_device("gss", &pdev->dev); if (!drv->ramdump_dev) { ret = -ENOMEM; goto err_ramdump; } drv->smem_ramdump_dev = create_ramdump_device("smem-gss", &pdev->dev); if (!drv->smem_ramdump_dev) { ret = -ENOMEM; goto err_smem; } ret = devm_request_irq(&pdev->dev, drv->irq, gss_wdog_bite_irq, IRQF_TRIGGER_RISING, "gss_a5_wdog", drv); if (ret < 0) goto err; return 0; err: destroy_ramdump_device(drv->smem_ramdump_dev); err_smem: destroy_ramdump_device(drv->ramdump_dev); err_ramdump: misc_deregister(&drv->misc_dev); err_misc: subsys_unregister(drv->subsys); err_subsys: pil_desc_release(desc); return ret; } static int __devexit pil_gss_remove(struct platform_device *pdev) { struct gss_data *drv = platform_get_drvdata(pdev); destroy_ramdump_device(drv->smem_ramdump_dev); destroy_ramdump_device(drv->ramdump_dev); misc_deregister(&drv->misc_dev); subsys_unregister(drv->subsys); pil_desc_release(&drv->pil_desc); return 0; } static struct platform_driver pil_gss_driver = { .probe = pil_gss_probe, .remove = __devexit_p(pil_gss_remove), .driver = { .name = "pil_gss", .owner = THIS_MODULE, }, }; static int __init pil_gss_init(void) { return platform_driver_register(&pil_gss_driver); } module_init(pil_gss_init); static void __exit pil_gss_exit(void) { platform_driver_unregister(&pil_gss_driver); } module_exit(pil_gss_exit); MODULE_DESCRIPTION("Support for booting the GSS processor"); MODULE_LICENSE("GPL v2");
gpl-2.0
tjcgj/busybox-xlnx-soc
archival/libarchive/header_verbose_list.c
295
1607
/* vi: set sw=4 ts=4: */ /* * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ #include "libbb.h" #include "bb_archive.h" void FAST_FUNC header_verbose_list(const file_header_t *file_header) { struct tm tm_time; struct tm *ptm = &tm_time; //localtime(&file_header->mtime); #if ENABLE_FEATURE_TAR_UNAME_GNAME char uid[sizeof(int)*3 + 2]; /*char gid[sizeof(int)*3 + 2];*/ char *user; char *group; localtime_r(&file_header->mtime, ptm); user = file_header->tar__uname; if (user == NULL) { sprintf(uid, "%u", (unsigned)file_header->uid); user = uid; } group = file_header->tar__gname; if (group == NULL) { /*sprintf(gid, "%u", (unsigned)file_header->gid);*/ group = utoa(file_header->gid); } printf("%s %s/%s %9"OFF_FMT"u %4u-%02u-%02u %02u:%02u:%02u %s", bb_mode_string(file_header->mode), user, group, file_header->size, 1900 + ptm->tm_year, 1 + ptm->tm_mon, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, file_header->name); #else /* !FEATURE_TAR_UNAME_GNAME */ localtime_r(&file_header->mtime, ptm); printf("%s %u/%u %9"OFF_FMT"u %4u-%02u-%02u %02u:%02u:%02u %s", bb_mode_string(file_header->mode), (unsigned)file_header->uid, (unsigned)file_header->gid, file_header->size, 1900 + ptm->tm_year, 1 + ptm->tm_mon, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec, file_header->name); #endif /* FEATURE_TAR_UNAME_GNAME */ /* NB: GNU tar shows "->" for symlinks and "link to" for hardlinks */ if (file_header->link_target) { printf(" -> %s", file_header->link_target); } bb_putchar('\n'); }
gpl-2.0
Smartandroidtech/platform_kernel_lge_hammerhead
arch/arm/mach-msm/qdsp5/adsp_rm.c
1063
4933
/* Copyright (c) 2010, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/cdev.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/uaccess.h> #include <linux/msm_adsp.h> #include <linux/module.h> #include <mach/qdsp5/qdsp5rmtcmdi.h> #include <mach/qdsp5/qdsp5rmtmsg.h> #include <mach/debug_mm.h> #include "adsp.h" #define MAX_CLIENTS 5 #define MAX_AUDIO_CLIENTS 5 #define MAX_RM_CLIENTS MAX_AUDIO_CLIENTS static char *rm_errs[] = { "", "PCM Blocks not Sufficient", "TASK is already occupied", "Concurrency not supported", "MIPS not sufficient", "DDP invalid/no licence" }; static struct client { wait_queue_head_t wait; unsigned int wait_state; struct aud_codec_config_ack cfg_msg; } rmclient[MAX_RM_CLIENTS]; static struct rm { struct msm_adsp_module *mod; int cnt; int state; struct aud_codec_config_ack cfg_msg; struct mutex lock; } rmtask; static void rm_dsp_event(void *data, unsigned id, size_t len, void (*getevent) (void *ptr, size_t len)); static struct msm_adsp_ops rm_ops = { .event = rm_dsp_event, }; int32_t get_adsp_resource(unsigned short client_id, void *cmd_buf, size_t cmd_size) { int rc = 0; int client_idx; client_idx = ((client_id >> 8) * MAX_CLIENTS) + (client_id & 0xFF); if (client_idx >= MAX_RM_CLIENTS) return -EINVAL; mutex_lock(&rmtask.lock); if (rmtask.state != ADSP_STATE_ENABLED) { rc = msm_adsp_get("RMTASK", &rmtask.mod, &rm_ops, NULL); if (rc) { MM_ERR("Failed to get module RMTASK\n"); mutex_unlock(&rmtask.lock); return rc; } rc = msm_adsp_enable(rmtask.mod); if (rc) { MM_ERR("RMTASK enable Failed\n"); msm_adsp_put(rmtask.mod); mutex_unlock(&rmtask.lock); return rc; } rmtask.state = ADSP_STATE_ENABLED; } rmclient[client_idx].wait_state = -1; mutex_unlock(&rmtask.lock); msm_adsp_write(rmtask.mod, QDSP_apuRmtQueue, cmd_buf, cmd_size); rc = wait_event_interruptible_timeout(rmclient[client_idx].wait, rmclient[client_idx].wait_state != -1, 5 * HZ); mutex_lock(&rmtask.lock); if (unlikely(rc < 0)) { if (rc == -ERESTARTSYS) MM_ERR("wait_event_interruptible " "returned -ERESTARTSYS\n"); else MM_ERR("wait_event_interruptible " "returned error\n"); if (!rmtask.cnt) goto disable_rm; goto unlock; } else if (rc == 0) { MM_ERR("RMTASK Msg not received\n"); rc = -ETIMEDOUT; if (!rmtask.cnt) goto disable_rm; goto unlock; } if (!(rmclient[client_idx].cfg_msg.enable)) { MM_ERR("Reason for failure: %s\n", rm_errs[rmclient[client_idx].cfg_msg.reason]); rc = -EBUSY; if (!rmtask.cnt) goto disable_rm; goto unlock; } rmtask.cnt++; mutex_unlock(&rmtask.lock); return 0; disable_rm: msm_adsp_disable(rmtask.mod); msm_adsp_put(rmtask.mod); rmtask.state = ADSP_STATE_DISABLED; unlock: mutex_unlock(&rmtask.lock); return rc; } EXPORT_SYMBOL(get_adsp_resource); int32_t put_adsp_resource(unsigned short client_id, void *cmd_buf, size_t cmd_size) { mutex_lock(&rmtask.lock); if (rmtask.state != ADSP_STATE_ENABLED) { mutex_unlock(&rmtask.lock); return 0; } msm_adsp_write(rmtask.mod, QDSP_apuRmtQueue, cmd_buf, cmd_size); rmtask.cnt--; if (!rmtask.cnt) { msm_adsp_disable(rmtask.mod); msm_adsp_put(rmtask.mod); rmtask.state = ADSP_STATE_DISABLED; } mutex_unlock(&rmtask.lock); return 0; } EXPORT_SYMBOL(put_adsp_resource); static void rm_dsp_event(void *data, unsigned id, size_t len, void (*getevent) (void *ptr, size_t len)) { unsigned short client_id; int client_idx; MM_DBG("Msg ID = %d\n", id); switch (id) { case RMT_CODEC_CONFIG_ACK: { getevent(&rmtask.cfg_msg, sizeof(rmtask.cfg_msg)); client_id = ((rmtask.cfg_msg.client_id << 8) | rmtask.cfg_msg.task_id); client_idx = ((client_id >> 8) * MAX_CLIENTS) + (client_id & 0xFF); memcpy(&rmclient[client_idx].cfg_msg, &rmtask.cfg_msg, sizeof(rmtask.cfg_msg)); rmclient[client_idx].wait_state = 1; wake_up(&rmclient[client_idx].wait); break; } case RMT_DSP_OUT_OF_MIPS: { struct rmt_dsp_out_of_mips msg; getevent(&msg, sizeof(msg)); MM_ERR("RMT_DSP_OUT_OF_MIPS: Not enough resorces in ADSP \ to handle all sessions :%hx\n", msg.dec_info); break; } default: MM_DBG("Unknown Msg Id\n"); break; } } void rmtask_init(void) { int i; for (i = 0; i < MAX_RM_CLIENTS; i++) init_waitqueue_head(&rmclient[i].wait); mutex_init(&rmtask.lock); }
gpl-2.0
GeyerA/kernel_shamu
net/dccp/ipv6.c
1575
31246
/* * DCCP over IPv6 * Linux INET6 implementation * * Based on net/dccp6/ipv6.c * * Arnaldo Carvalho de Melo <acme@ghostprotocols.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/xfrm.h> #include <net/addrconf.h> #include <net/inet_common.h> #include <net/inet_hashtables.h> #include <net/inet_sock.h> #include <net/inet6_connection_sock.h> #include <net/inet6_hashtables.h> #include <net/ip6_route.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/ip6_checksum.h> #include <net/xfrm.h> #include <net/secure_seq.h> #include "dccp.h" #include "ipv6.h" #include "feat.h" /* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped; static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops; static void dccp_v6_hash(struct sock *sk) { if (sk->sk_state != DCCP_CLOSED) { if (inet_csk(sk)->icsk_af_ops == &dccp_ipv6_mapped) { inet_hash(sk); return; } local_bh_disable(); __inet6_hash(sk, NULL); local_bh_enable(); } } /* add pseudo-header to DCCP checksum stored in skb->csum */ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr) { return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); } static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct dccp_hdr *dh = dccp_hdr(skb); dccp_csum_outgoing(skb); dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr); } static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb) { return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, dccp_hdr(skb)->dccph_dport, dccp_hdr(skb)->dccph_sport ); } static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); struct dccp_sock *dp; struct ipv6_pinfo *np; struct sock *sk; int err; __u64 seq; struct net *net = dev_net(skb->dev); if (skb->len < offset + sizeof(*dh) || skb->len < offset + __dccp_basic_hdr_len(dh)) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } sk = inet6_lookup(net, &dccp_hashinfo, &hdr->daddr, dh->dccph_dport, &hdr->saddr, dh->dccph_sport, inet6_iif(skb)); if (sk == NULL) { ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (sk->sk_state == DCCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } bh_lock_sock(sk); if (sock_owned_by_user(sk)) NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == DCCP_CLOSED) goto out; dp = dccp_sk(sk); seq = dccp_hdr_seq(dh); if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && !between48(seq, dp->dccps_awl, dp->dccps_awh)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = inet6_sk(sk); if (type == NDISC_REDIRECT) { struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); if (dst) dst->ops->redirect(dst, sk, skb); goto out; } if (type == ICMPV6_PKT_TOOBIG) { struct dst_entry *dst = NULL; if (sock_owned_by_user(sk)) goto out; if ((1 << sk->sk_state) & (DCCPF_LISTEN | DCCPF_CLOSED)) goto out; dst = inet6_csk_update_pmtu(sk, ntohl(info)); if (!dst) goto out; if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) dccp_sync_mss(sk, dst_mtu(dst)); goto out; } icmpv6_err_convert(type, code, &err); /* Might be for an request_sock */ switch (sk->sk_state) { struct request_sock *req, **prev; case DCCP_LISTEN: if (sock_owned_by_user(sk)) goto out; req = inet6_csk_search_req(sk, &prev, dh->dccph_dport, &hdr->daddr, &hdr->saddr, inet6_iif(skb)); if (req == NULL) goto out; /* * ICMPs are not backlogged, hence we cannot get an established * socket here. */ WARN_ON(req->sk != NULL); if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } inet_csk_reqsk_queue_drop(sk, req, prev); goto out; case DCCP_REQUESTING: case DCCP_RESPOND: /* Cannot happen. It can, it SYNs are crossed. --ANK */ if (!sock_owned_by_user(sk)) { DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); sk->sk_err = err; /* * Wake people up to see the error * (see connect in sock.c) */ sk->sk_error_report(sk); dccp_done(sk); } else sk->sk_err_soft = err; goto out; } if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static int dccp_v6_send_response(struct sock *sk, struct request_sock *req) { struct inet6_request_sock *ireq6 = inet6_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *skb; struct in6_addr *final_p, final; struct flowi6 fl6; int err = -1; struct dst_entry *dst; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; fl6.daddr = ireq6->rmt_addr; fl6.saddr = ireq6->loc_addr; fl6.flowlabel = 0; fl6.flowi6_oif = ireq6->iif; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; security_req_classify_flow(req, flowi6_to_flowi(&fl6)); final_p = fl6_update_dst(&fl6, np->opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); if (IS_ERR(dst)) { err = PTR_ERR(dst); dst = NULL; goto done; } skb = dccp_make_response(sk, dst, req); if (skb != NULL) { struct dccp_hdr *dh = dccp_hdr(skb); dh->dccph_checksum = dccp_v6_csum_finish(skb, &ireq6->loc_addr, &ireq6->rmt_addr); fl6.daddr = ireq6->rmt_addr; err = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass); err = net_xmit_eval(err); } done: dst_release(dst); return err; } static void dccp_v6_reqsk_destructor(struct request_sock *req) { dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); if (inet6_rsk(req)->pktopts != NULL) kfree_skb(inet6_rsk(req)->pktopts); } static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) { const struct ipv6hdr *rxip6h; struct sk_buff *skb; struct flowi6 fl6; struct net *net = dev_net(skb_dst(rxskb)->dev); struct sock *ctl_sk = net->dccp.v6_ctl_sk; struct dst_entry *dst; if (dccp_hdr(rxskb)->dccph_type == DCCP_PKT_RESET) return; if (!ipv6_unicast_destination(rxskb)) return; skb = dccp_ctl_make_reset(ctl_sk, rxskb); if (skb == NULL) return; rxip6h = ipv6_hdr(rxskb); dccp_hdr(skb)->dccph_checksum = dccp_v6_csum_finish(skb, &rxip6h->saddr, &rxip6h->daddr); memset(&fl6, 0, sizeof(fl6)); fl6.daddr = rxip6h->saddr; fl6.saddr = rxip6h->daddr; fl6.flowi6_proto = IPPROTO_DCCP; fl6.flowi6_oif = inet6_iif(rxskb); fl6.fl6_dport = dccp_hdr(skb)->dccph_dport; fl6.fl6_sport = dccp_hdr(skb)->dccph_sport; security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6)); /* sk = NULL, but it is safe for now. RST socket required. */ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL, false); if (!IS_ERR(dst)) { skb_dst_set(skb, dst); ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); return; } kfree_skb(skb); } static struct request_sock_ops dccp6_request_sock_ops = { .family = AF_INET6, .obj_size = sizeof(struct dccp6_request_sock), .rtx_syn_ack = dccp_v6_send_response, .send_ack = dccp_reqsk_send_ack, .destructor = dccp_v6_reqsk_destructor, .send_reset = dccp_v6_ctl_send_reset, .syn_ack_timeout = dccp_syn_ack_timeout, }; static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { const struct dccp_hdr *dh = dccp_hdr(skb); const struct ipv6hdr *iph = ipv6_hdr(skb); struct sock *nsk; struct request_sock **prev; /* Find possible connection requests. */ struct request_sock *req = inet6_csk_search_req(sk, &prev, dh->dccph_sport, &iph->saddr, &iph->daddr, inet6_iif(skb)); if (req != NULL) return dccp_check_req(sk, skb, req, prev); nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, &iph->saddr, dh->dccph_sport, &iph->daddr, ntohs(dh->dccph_dport), inet6_iif(skb)); if (nsk != NULL) { if (nsk->sk_state != DCCP_TIME_WAIT) { bh_lock_sock(nsk); return nsk; } inet_twsk_put(inet_twsk(nsk)); return NULL; } return sk; } static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { struct request_sock *req; struct dccp_request_sock *dreq; struct inet6_request_sock *ireq6; struct ipv6_pinfo *np = inet6_sk(sk); const __be32 service = dccp_hdr_request(skb)->dccph_req_service; struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); if (skb->protocol == htons(ETH_P_IP)) return dccp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) return 0; /* discard, don't send a reset here */ if (dccp_bad_service_code(sk, service)) { dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE; goto drop; } /* * There are no SYN attacks on IPv6, yet... */ dcb->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY; if (inet_csk_reqsk_queue_is_full(sk)) goto drop; if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; req = inet6_reqsk_alloc(&dccp6_request_sock_ops); if (req == NULL) goto drop; if (dccp_reqsk_init(req, dccp_sk(sk), skb)) goto drop_and_free; dreq = dccp_rsk(req); if (dccp_parse_options(sk, dreq, skb)) goto drop_and_free; if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; ireq6 = inet6_rsk(req); ireq6->rmt_addr = ipv6_hdr(skb)->saddr; ireq6->loc_addr = ipv6_hdr(skb)->daddr; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { atomic_inc(&skb->users); ireq6->pktopts = skb; } ireq6->iif = sk->sk_bound_dev_if; /* So that link locals have meaning */ if (!sk->sk_bound_dev_if && ipv6_addr_type(&ireq6->rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq6->iif = inet6_iif(skb); /* * Step 3: Process LISTEN state * * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookie * * Setting S.SWL/S.SWH to is deferred to dccp_create_openreq_child(). */ dreq->dreq_isr = dcb->dccpd_seq; dreq->dreq_gsr = dreq->dreq_isr; dreq->dreq_iss = dccp_v6_init_sequence(skb); dreq->dreq_gss = dreq->dreq_iss; dreq->dreq_service = service; if (dccp_v6_send_response(sk, req)) goto drop_and_free; inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); return 0; drop_and_free: reqsk_free(req); drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); return -1; } static struct sock *dccp_v6_request_recv_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet6_request_sock *ireq6 = inet6_rsk(req); struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct inet_sock *newinet; struct dccp6_sock *newdp6; struct sock *newsk; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = dccp_v4_request_recv_sock(sk, skb, req, dst); if (newsk == NULL) return NULL; newdp6 = (struct dccp6_sock *)newsk; newinet = inet_sk(newsk); newinet->pinet6 = &newdp6->inet6; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); ipv6_addr_set_v4mapped(newinet->inet_daddr, &newnp->daddr); ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr); newnp->rcv_saddr = newnp->saddr; inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; newsk->sk_backlog_rcv = dccp_v4_do_rcv; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, dccp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } if (sk_acceptq_is_full(sk)) goto out_overflow; if (dst == NULL) { struct in6_addr *final_p, final; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_proto = IPPROTO_DCCP; fl6.daddr = ireq6->rmt_addr; final_p = fl6_update_dst(&fl6, np->opt, &final); fl6.saddr = ireq6->loc_addr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = inet_rsk(req)->rmt_port; fl6.fl6_sport = inet_rsk(req)->loc_port; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); if (IS_ERR(dst)) goto out; } newsk = dccp_create_openreq_child(sk, req, skb); if (newsk == NULL) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, dccp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ __ip6_dst_store(newsk, dst, NULL, NULL); newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM | NETIF_F_TSO); newdp6 = (struct dccp6_sock *)newsk; newinet = inet_sk(newsk); newinet->pinet6 = &newdp6->inet6; newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->daddr = ireq6->rmt_addr; newnp->saddr = ireq6->loc_addr; newnp->rcv_saddr = ireq6->loc_addr; newsk->sk_bound_dev_if = ireq6->iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->inet_opt = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; /* Clone pktoptions received with SYN */ newnp->pktoptions = NULL; if (ireq6->pktopts != NULL) { newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); consume_skb(ireq6->pktopts); ireq6->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } newnp->opt = NULL; newnp->mcast_oif = inet6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; /* * Clone native IPv6 options from listening socket (if any) * * Yes, keeping reference count would be much more clever, but we make * one more one thing there: reattach optmem to newsk. */ if (np->opt != NULL) newnp->opt = ipv6_dup_options(newsk, np->opt); inet_csk(newsk)->icsk_ext_hdr_len = 0; if (newnp->opt != NULL) inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen + newnp->opt->opt_flen); dccp_sync_mss(newsk, dst_mtu(dst)); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; if (__inet_inherit_port(sk, newsk) < 0) { inet_csk_prepare_forced_close(newsk); dccp_done(newsk); goto out; } __inet6_hash(newsk, NULL); return newsk; out_overflow: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); return NULL; } /* The socket must have it's spinlock held when we get * here. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ static int dccp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, dccp_rcv_established and rcv_established handle them correctly, but it is not case with dccp_v6_hnd_req and dccp_v6_ctl_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return dccp_v4_do_rcv(sk, skb); if (sk_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv is currently * called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) /* * FIXME: Add handling of IPV6_PKTOPTIONS skb. See the comments below * (wrt ipv6_pktopions) and net/ipv6/tcp_ipv6.c for an example. */ opt_skb = skb_clone(skb, GFP_ATOMIC); if (sk->sk_state == DCCP_OPEN) { /* Fast path */ if (dccp_rcv_established(sk, skb, dccp_hdr(skb), skb->len)) goto reset; if (opt_skb) { /* XXX This is where we would goto ipv6_pktoptions. */ __kfree_skb(opt_skb); } return 0; } /* * Step 3: Process LISTEN state * If S.state == LISTEN, * If P.type == Request or P contains a valid Init Cookie option, * (* Must scan the packet's options to check for Init * Cookies. Only Init Cookies are processed here, * however; other options are processed in Step 8. This * scan need only be performed if the endpoint uses Init * Cookies *) * (* Generate a new socket and switch to that socket *) * Set S := new socket for this port pair * S.state = RESPOND * Choose S.ISS (initial seqno) or set from Init Cookies * Initialize S.GAR := S.ISS * Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies * Continue with S.state == RESPOND * (* A Response packet will be generated in Step 11 *) * Otherwise, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return * * NOTE: the check for the packet types is done in * dccp_rcv_state_process */ if (sk->sk_state == DCCP_LISTEN) { struct sock *nsk = dccp_v6_hnd_req(sk, skb); if (nsk == NULL) goto discard; /* * Queue it on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket.. */ if (nsk != sk) { if (dccp_child_process(sk, nsk, skb)) goto reset; if (opt_skb != NULL) __kfree_skb(opt_skb); return 0; } } if (dccp_rcv_state_process(sk, skb, dccp_hdr(skb), skb->len)) goto reset; if (opt_skb) { /* XXX This is where we would goto ipv6_pktoptions. */ __kfree_skb(opt_skb); } return 0; reset: dccp_v6_ctl_send_reset(sk, skb); discard: if (opt_skb != NULL) __kfree_skb(opt_skb); kfree_skb(skb); return 0; } static int dccp_v6_rcv(struct sk_buff *skb) { const struct dccp_hdr *dh; struct sock *sk; int min_cov; /* Step 1: Check header basics */ if (dccp_invalid_packet(skb)) goto discard_it; /* Step 1: If header checksum is incorrect, drop packet and return. */ if (dccp_v6_csum_finish(skb, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr)) { DCCP_WARN("dropped packet with invalid checksum\n"); goto discard_it; } dh = dccp_hdr(skb); DCCP_SKB_CB(skb)->dccpd_seq = dccp_hdr_seq(dh); DCCP_SKB_CB(skb)->dccpd_type = dh->dccph_type; if (dccp_packet_without_ack(skb)) DCCP_SKB_CB(skb)->dccpd_ack_seq = DCCP_PKT_WITHOUT_ACK_SEQ; else DCCP_SKB_CB(skb)->dccpd_ack_seq = dccp_hdr_ack_seq(skb); /* Step 2: * Look up flow ID in table and get corresponding socket */ sk = __inet6_lookup_skb(&dccp_hashinfo, skb, dh->dccph_sport, dh->dccph_dport); /* * Step 2: * If no socket ... */ if (sk == NULL) { dccp_pr_debug("failed to look up flow ID in table and " "get corresponding socket\n"); goto no_dccp_socket; } /* * Step 2: * ... or S.state == TIMEWAIT, * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (sk->sk_state == DCCP_TIME_WAIT) { dccp_pr_debug("sk->sk_state == DCCP_TIME_WAIT: do_time_wait\n"); inet_twsk_put(inet_twsk(sk)); goto no_dccp_socket; } /* * RFC 4340, sec. 9.2.1: Minimum Checksum Coverage * o if MinCsCov = 0, only packets with CsCov = 0 are accepted * o if MinCsCov > 0, also accept packets with CsCov >= MinCsCov */ min_cov = dccp_sk(sk)->dccps_pcrlen; if (dh->dccph_cscov && (min_cov == 0 || dh->dccph_cscov < min_cov)) { dccp_pr_debug("Packet CsCov %d does not satisfy MinCsCov %d\n", dh->dccph_cscov, min_cov); /* FIXME: send Data Dropped option (see also dccp_v4_rcv) */ goto discard_and_relse; } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; return sk_receive_skb(sk, skb, 1) ? -1 : 0; no_dccp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; /* * Step 2: * If no socket ... * Generate Reset(No Connection) unless P.type == Reset * Drop packet and return */ if (dh->dccph_type != DCCP_PKT_RESET) { DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; dccp_v6_ctl_send_reset(sk, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sock_put(sk); goto discard_it; } static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; struct inet_connection_sock *icsk = inet_csk(sk); struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct dccp_sock *dp = dccp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct flowi6 fl6; struct dst_entry *dst; int addr_type; int err; dp->dccps_role = DCCP_ROLE_CLIENT; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (np->sndflow) { fl6.flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (flowlabel == NULL) return -EINVAL; usin->sin6_addr = flowlabel->dst; fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if (ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 1; addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } np->daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* * DCCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); if (__ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; icsk->icsk_af_ops = &dccp_ipv6_mapped; sk->sk_backlog_rcv = dccp_v4_do_rcv; err = dccp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; icsk->icsk_af_ops = &dccp_ipv6_af_ops; sk->sk_backlog_rcv = dccp_v6_do_rcv; goto failure; } ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr); ipv6_addr_set_v4mapped(inet->inet_rcv_saddr, &np->rcv_saddr); return err; } if (!ipv6_addr_any(&np->rcv_saddr)) saddr = &np->rcv_saddr; fl6.flowi6_proto = IPPROTO_DCCP; fl6.daddr = np->daddr; fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); final_p = fl6_update_dst(&fl6, np->opt, &final); dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } if (saddr == NULL) { saddr = &fl6.saddr; np->rcv_saddr = *saddr; } /* set the source address */ np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; __ip6_dst_store(sk, dst, NULL, NULL); icsk->icsk_ext_hdr_len = 0; if (np->opt != NULL) icsk->icsk_ext_hdr_len = (np->opt->opt_flen + np->opt->opt_nflen); inet->inet_dport = usin->sin6_port; dccp_set_state(sk, DCCP_REQUESTING); err = inet6_hash_connect(&dccp_death_row, sk); if (err) goto late_failure; dp->dccps_iss = secure_dccpv6_sequence_number(np->saddr.s6_addr32, np->daddr.s6_addr32, inet->inet_sport, inet->inet_dport); err = dccp_connect(sk); if (err) goto late_failure; return 0; late_failure: dccp_set_state(sk, DCCP_CLOSED); __sk_dst_reset(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = { .queue_xmit = inet6_csk_xmit, .send_check = dccp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .conn_request = dccp_v6_conn_request, .syn_recv_sock = dccp_v6_request_recv_sock, .net_header_len = sizeof(struct ipv6hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif }; /* * DCCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = dccp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .conn_request = dccp_v6_conn_request, .syn_recv_sock = dccp_v6_request_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif }; /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int dccp_v6_init_sock(struct sock *sk) { static __u8 dccp_v6_ctl_sock_initialized; int err = dccp_init_sock(sk, dccp_v6_ctl_sock_initialized); if (err == 0) { if (unlikely(!dccp_v6_ctl_sock_initialized)) dccp_v6_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; } return err; } static void dccp_v6_destroy_sock(struct sock *sk) { dccp_destroy_sock(sk); inet6_destroy_sock(sk); } static struct timewait_sock_ops dccp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct dccp6_timewait_sock), }; static struct proto dccp_v6_prot = { .name = "DCCPv6", .owner = THIS_MODULE, .close = dccp_close, .connect = dccp_v6_connect, .disconnect = dccp_disconnect, .ioctl = dccp_ioctl, .init = dccp_v6_init_sock, .setsockopt = dccp_setsockopt, .getsockopt = dccp_getsockopt, .sendmsg = dccp_sendmsg, .recvmsg = dccp_recvmsg, .backlog_rcv = dccp_v6_do_rcv, .hash = dccp_v6_hash, .unhash = inet_unhash, .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, .destroy = dccp_v6_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .rsk_prot = &dccp6_request_sock_ops, .twsk_prot = &dccp6_timewait_sock_ops, .h.hashinfo = &dccp_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_dccp_setsockopt, .compat_getsockopt = compat_dccp_getsockopt, #endif }; static const struct inet6_protocol dccp_v6_protocol = { .handler = dccp_v6_rcv, .err_handler = dccp_v6_err, .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, }; static const struct proto_ops inet6_dccp_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_stream_connect, .socketpair = sock_no_socketpair, .accept = inet_accept, .getname = inet6_getname, .poll = dccp_poll, .ioctl = inet6_ioctl, .listen = inet_dccp_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_sock_common_setsockopt, .compat_getsockopt = compat_sock_common_getsockopt, #endif }; static struct inet_protosw dccp_v6_protosw = { .type = SOCK_DCCP, .protocol = IPPROTO_DCCP, .prot = &dccp_v6_prot, .ops = &inet6_dccp_ops, .flags = INET_PROTOSW_ICSK, }; static int __net_init dccp_v6_init_net(struct net *net) { if (dccp_hashinfo.bhash == NULL) return -ESOCKTNOSUPPORT; return inet_ctl_sock_create(&net->dccp.v6_ctl_sk, PF_INET6, SOCK_DCCP, IPPROTO_DCCP, net); } static void __net_exit dccp_v6_exit_net(struct net *net) { inet_ctl_sock_destroy(net->dccp.v6_ctl_sk); } static struct pernet_operations dccp_v6_ops = { .init = dccp_v6_init_net, .exit = dccp_v6_exit_net, }; static int __init dccp_v6_init(void) { int err = proto_register(&dccp_v6_prot, 1); if (err != 0) goto out; err = inet6_add_protocol(&dccp_v6_protocol, IPPROTO_DCCP); if (err != 0) goto out_unregister_proto; inet6_register_protosw(&dccp_v6_protosw); err = register_pernet_subsys(&dccp_v6_ops); if (err != 0) goto out_destroy_ctl_sock; out: return err; out_destroy_ctl_sock: inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); inet6_unregister_protosw(&dccp_v6_protosw); out_unregister_proto: proto_unregister(&dccp_v6_prot); goto out; } static void __exit dccp_v6_exit(void) { unregister_pernet_subsys(&dccp_v6_ops); inet6_del_protocol(&dccp_v6_protocol, IPPROTO_DCCP); inet6_unregister_protosw(&dccp_v6_protosw); proto_unregister(&dccp_v6_prot); } module_init(dccp_v6_init); module_exit(dccp_v6_exit); /* * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33) * values directly, Also cover the case where the protocol is not specified, * i.e. net-pf-PF_INET6-proto-0-type-SOCK_DCCP */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 33, 6); MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 0, 6); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@mandriva.com>"); MODULE_DESCRIPTION("DCCPv6 - Datagram Congestion Controlled Protocol");
gpl-2.0
TeamWin/android_kernel_htc_a32e
drivers/clk/tegra/clk.c
2087
2622
/* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/of.h> #include <linux/clk/tegra.h> #include "clk.h" /* Global data of Tegra CPU CAR ops */ static struct tegra_cpu_car_ops dummy_car_ops; struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops; void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list, struct clk *clks[], int clk_max) { struct clk *clk; for (; dup_list->clk_id < clk_max; dup_list++) { clk = clks[dup_list->clk_id]; dup_list->lookup.clk = clk; clkdev_add(&dup_list->lookup); } } void __init tegra_init_from_table(struct tegra_clk_init_table *tbl, struct clk *clks[], int clk_max) { struct clk *clk; for (; tbl->clk_id < clk_max; tbl++) { clk = clks[tbl->clk_id]; if (IS_ERR_OR_NULL(clk)) return; if (tbl->parent_id < clk_max) { struct clk *parent = clks[tbl->parent_id]; if (clk_set_parent(clk, parent)) { pr_err("%s: Failed to set parent %s of %s\n", __func__, __clk_get_name(parent), __clk_get_name(clk)); WARN_ON(1); } } if (tbl->rate) if (clk_set_rate(clk, tbl->rate)) { pr_err("%s: Failed to set rate %lu of %s\n", __func__, tbl->rate, __clk_get_name(clk)); WARN_ON(1); } if (tbl->state) if (clk_prepare_enable(clk)) { pr_err("%s: Failed to enable %s\n", __func__, __clk_get_name(clk)); WARN_ON(1); } } } static const struct of_device_id tegra_dt_clk_match[] = { { .compatible = "nvidia,tegra20-car", .data = tegra20_clock_init }, { .compatible = "nvidia,tegra30-car", .data = tegra30_clock_init }, { .compatible = "nvidia,tegra114-car", .data = tegra114_clock_init }, { } }; void __init tegra_clocks_init(void) { of_clk_init(tegra_dt_clk_match); } tegra_clk_apply_init_table_func tegra_clk_apply_init_table; void __init tegra_clocks_apply_init_table(void) { if (!tegra_clk_apply_init_table) return; tegra_clk_apply_init_table(); }
gpl-2.0
yuzaipiaofei/android_kernel_cyanogen_msm8916
drivers/staging/media/solo6x10/solo6x10-tw28.c
2087
26067
/* * Copyright (C) 2010-2013 Bluecherry, LLC <http://www.bluecherrydvr.com> * * Original author: * Ben Collins <bcollins@ubuntu.com> * * Additional work by: * John Brooks <john.brooks@bluecherry.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/delay.h> #include "solo6x10.h" #include "solo6x10-tw28.h" #define DEFAULT_HDELAY_NTSC (32 - 8) #define DEFAULT_HACTIVE_NTSC (720 + 16) #define DEFAULT_VDELAY_NTSC (7 - 2) #define DEFAULT_VACTIVE_NTSC (240 + 4) #define DEFAULT_HDELAY_PAL (32 + 4) #define DEFAULT_HACTIVE_PAL (864-DEFAULT_HDELAY_PAL) #define DEFAULT_VDELAY_PAL (6) #define DEFAULT_VACTIVE_PAL (312-DEFAULT_VDELAY_PAL) static const u8 tbl_tw2864_ntsc_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x30 */ 0x12, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xa0, 0x88, 0x5a, 0x01, 0x08, 0x08, 0x08, 0x08, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */ 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44, 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00, 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xc1, 0x0f, 0x11, 0x11, 0x81, 0x00, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xb5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00, }; static const u8 tbl_tw2864_pal_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */ 0x18, 0xf5, 0x0c, 0xd0, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xA3, 0x00, 0x00, 0x02, 0x00, 0xcc, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xd8, 0xbc, 0xb8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x72, 0x3e, 0x14, 0xa5, 0xe4, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xa0, 0x90, 0x5a, 0x01, 0x0a, 0x0a, 0x0a, 0x0a, 0x1a, 0x1a, 0x1a, 0x1a, /* 0xa0 */ 0x00, 0x00, 0x00, 0xf0, 0xf0, 0xf0, 0xf0, 0x44, 0x44, 0x0a, 0x00, 0xff, 0xef, 0xef, 0xef, 0xef, /* 0xb0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xb1, 0xe4, 0x40, 0x00, 0x77, 0x77, 0x01, 0x13, 0x57, 0x9b, 0xdf, 0x20, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xc1, 0x0f, 0x11, 0x11, 0x81, 0x00, 0xe0, 0xbb, 0xbb, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xb5, 0x09, 0x00, 0xa0, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x11, 0x40, 0xaf, 0xff, 0x00, 0x00, 0x00, }; static const u8 tbl_tw2865_ntsc_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x00 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x10 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x02, /* 0x20 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0xf0, 0x70, 0x48, 0x80, 0x80, 0x00, 0x02, /* 0x30 */ 0x12, 0xff, 0x09, 0xd0, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x00, 0x90, 0x68, 0x00, 0x38, 0x80, 0x80, /* 0x40 */ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43, 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */ 0xE9, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80, 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13, 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1B, 0x1A, /* 0xa0 */ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44, 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */ 0xFF, 0xE7, 0xE9, 0xE9, 0xEB, 0xFF, 0xD6, 0xD8, 0xD8, 0xD7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80, 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xB5, 0x09, 0x78, 0x85, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0, }; static const u8 tbl_tw2865_pal_template[] = { 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x00 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x10 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x20 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0xf0, 0x70, 0x30, 0x80, 0x80, 0x00, 0x12, /* 0x30 */ 0x11, 0xff, 0x01, 0xc3, 0x00, 0x00, 0x01, 0x7f, 0x00, 0x94, 0x90, 0x48, 0x00, 0x38, 0x7F, 0x80, /* 0x40 */ 0x80, 0x80, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x43, 0x08, 0x00, 0x00, 0x01, 0xf1, 0x03, 0xEF, 0x03, /* 0x70 */ 0xEA, 0x03, 0xD9, 0x15, 0x15, 0xE4, 0xA3, 0x80, 0x00, 0x02, 0x00, 0xCC, 0x00, 0x80, 0x44, 0x50, /* 0x80 */ 0x22, 0x01, 0xD8, 0xBC, 0xB8, 0x44, 0x38, 0x00, 0x00, 0x78, 0x44, 0x3D, 0x14, 0xA5, 0xE0, 0x05, /* 0x90 */ 0x00, 0x28, 0x44, 0x44, 0xA0, 0x90, 0x52, 0x13, 0x08, 0x08, 0x08, 0x08, 0x1A, 0x1A, 0x1A, 0x1A, /* 0xa0 */ 0x00, 0x00, 0x00, 0xF0, 0xF0, 0xF0, 0xF0, 0x44, 0x44, 0x4A, 0x00, 0xFF, 0xEF, 0xEF, 0xEF, 0xEF, /* 0xb0 */ 0xFF, 0xE7, 0xE9, 0xE9, 0xE9, 0xFF, 0xD7, 0xD8, 0xD9, 0xD8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0 */ 0x00, 0x00, 0x55, 0x00, 0xE4, 0x39, 0x00, 0x80, 0x77, 0x77, 0x03, 0x20, 0x57, 0x9b, 0xdf, 0x31, /* 0xd0 */ 0x64, 0xa8, 0xec, 0xd1, 0x0f, 0x11, 0x11, 0x81, 0x10, 0xC0, 0xAA, 0xAA, 0x00, 0x11, 0x00, 0x00, /* 0xe0 */ 0x11, 0x00, 0x00, 0x11, 0x00, 0x00, 0x11, 0x00, 0x83, 0xB5, 0x09, 0x00, 0xA0, 0x00, 0x01, 0x20, /* 0xf0 */ 0x64, 0x51, 0x40, 0xaf, 0xFF, 0xF0, 0x00, 0xC0, }; #define is_tw286x(__solo, __id) (!(__solo->tw2815 & (1 << __id))) static u8 tw_readbyte(struct solo_dev *solo_dev, int chip_id, u8 tw6x_off, u8 tw_off) { if (is_tw286x(solo_dev, chip_id)) return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw6x_off); else return solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw_off); } static void tw_writebyte(struct solo_dev *solo_dev, int chip_id, u8 tw6x_off, u8 tw_off, u8 val) { if (is_tw286x(solo_dev, chip_id)) solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw6x_off, val); else solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_id), tw_off, val); } static void tw_write_and_verify(struct solo_dev *solo_dev, u8 addr, u8 off, u8 val) { int i; for (i = 0; i < 5; i++) { u8 rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, addr, off); if (rval == val) return; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, addr, off, val); msleep_interruptible(1); } /* printk("solo6x10/tw28: Error writing register: %02x->%02x [%02x]\n", */ /* addr, off, val); */ } static int tw2865_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_tw2865_common[256]; int i; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) memcpy(tbl_tw2865_common, tbl_tw2865_pal_template, sizeof(tbl_tw2865_common)); else memcpy(tbl_tw2865_common, tbl_tw2865_ntsc_template, sizeof(tbl_tw2865_common)); /* ALINK Mode */ if (solo_dev->nr_chans == 4) { tbl_tw2865_common[0xd2] = 0x01; tbl_tw2865_common[0xcf] = 0x00; } else if (solo_dev->nr_chans == 8) { tbl_tw2865_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2865_common[0xcf] = 0x80; } else if (solo_dev->nr_chans == 16) { tbl_tw2865_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2865_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2865_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2865_common[0xcf] = 0x80; } for (i = 0; i < 0xff; i++) { /* Skip read only registers */ switch (i) { case 0xb8 ... 0xc1: case 0xc4 ... 0xc7: case 0xfd: continue; } switch (i & ~0x30) { case 0x00: case 0x0c ... 0x0d: continue; } tw_write_and_verify(solo_dev, dev_addr, i, tbl_tw2865_common[i]); } return 0; } static int tw2864_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_tw2864_common[256]; int i; if (solo_dev->video_type == SOLO_VO_FMT_TYPE_PAL) memcpy(tbl_tw2864_common, tbl_tw2864_pal_template, sizeof(tbl_tw2864_common)); else memcpy(tbl_tw2864_common, tbl_tw2864_ntsc_template, sizeof(tbl_tw2864_common)); if (solo_dev->tw2865 == 0) { /* IRQ Mode */ if (solo_dev->nr_chans == 4) { tbl_tw2864_common[0xd2] = 0x01; tbl_tw2864_common[0xcf] = 0x00; } else if (solo_dev->nr_chans == 8) { tbl_tw2864_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x40; } else if (solo_dev->nr_chans == 16) { tbl_tw2864_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2864_common[0xcf] = 0x43; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2864_common[0xcf] = 0x40; } } else { /* ALINK Mode. Assumes that the first tw28xx is a * 2865 and these are in cascade. */ for (i = 0; i <= 4; i++) tbl_tw2864_common[0x08 | i << 4] = 0x12; if (solo_dev->nr_chans == 8) { tbl_tw2864_common[0xd2] = 0x02; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x80; } else if (solo_dev->nr_chans == 16) { tbl_tw2864_common[0xd2] = 0x03; if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2864_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2864_common[0xcf] = 0x83; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2864_common[0xcf] = 0x80; } } for (i = 0; i < 0xff; i++) { /* Skip read only registers */ switch (i) { case 0xb8 ... 0xc1: case 0xfd: continue; } switch (i & ~0x30) { case 0x00: case 0x0c: case 0x0d: continue; } tw_write_and_verify(solo_dev, dev_addr, i, tbl_tw2864_common[i]); } return 0; } static int tw2815_setup(struct solo_dev *solo_dev, u8 dev_addr) { u8 tbl_ntsc_tw2815_common[] = { 0x00, 0xc8, 0x20, 0xd0, 0x06, 0xf0, 0x08, 0x80, 0x80, 0x80, 0x80, 0x02, 0x06, 0x00, 0x11, }; u8 tbl_pal_tw2815_common[] = { 0x00, 0x88, 0x20, 0xd0, 0x05, 0x20, 0x28, 0x80, 0x80, 0x80, 0x80, 0x82, 0x06, 0x00, 0x11, }; u8 tbl_tw2815_sfr[] = { 0x00, 0x00, 0x00, 0xc0, 0x45, 0xa0, 0xd0, 0x2f, /* 0x00 */ 0x64, 0x80, 0x80, 0x82, 0x82, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x05, 0x00, 0x00, 0x80, 0x06, 0x00, /* 0x10 */ 0x00, 0x00, 0x00, 0xff, 0x8f, 0x00, 0x00, 0x00, 0x88, 0x88, 0xc0, 0x00, 0x20, 0x64, 0xa8, 0xec, /* 0x20 */ 0x31, 0x75, 0xb9, 0xfd, 0x00, 0x00, 0x88, 0x88, 0x88, 0x11, 0x00, 0x88, 0x88, 0x00, /* 0x30 */ }; u8 *tbl_tw2815_common; int i; int ch; tbl_ntsc_tw2815_common[0x06] = 0; /* Horizontal Delay Control */ tbl_ntsc_tw2815_common[0x02] = DEFAULT_HDELAY_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_NTSC >> 8); /* Horizontal Active Control */ tbl_ntsc_tw2815_common[0x03] = DEFAULT_HACTIVE_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x03 & (DEFAULT_HACTIVE_NTSC >> 8)) << 2); /* Vertical Delay Control */ tbl_ntsc_tw2815_common[0x04] = DEFAULT_VDELAY_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VDELAY_NTSC >> 8)) << 4); /* Vertical Active Control */ tbl_ntsc_tw2815_common[0x05] = DEFAULT_VACTIVE_NTSC & 0xff; tbl_ntsc_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VACTIVE_NTSC >> 8)) << 5); tbl_pal_tw2815_common[0x06] = 0; /* Horizontal Delay Control */ tbl_pal_tw2815_common[0x02] = DEFAULT_HDELAY_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= 0x03 & (DEFAULT_HDELAY_PAL >> 8); /* Horizontal Active Control */ tbl_pal_tw2815_common[0x03] = DEFAULT_HACTIVE_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x03 & (DEFAULT_HACTIVE_PAL >> 8)) << 2); /* Vertical Delay Control */ tbl_pal_tw2815_common[0x04] = DEFAULT_VDELAY_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VDELAY_PAL >> 8)) << 4); /* Vertical Active Control */ tbl_pal_tw2815_common[0x05] = DEFAULT_VACTIVE_PAL & 0xff; tbl_pal_tw2815_common[0x06] |= ((0x01 & (DEFAULT_VACTIVE_PAL >> 8)) << 5); tbl_tw2815_common = (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC) ? tbl_ntsc_tw2815_common : tbl_pal_tw2815_common; /* Dual ITU-R BT.656 format */ tbl_tw2815_common[0x0d] |= 0x04; /* Audio configuration */ tbl_tw2815_sfr[0x62 - 0x40] &= ~(3 << 6); if (solo_dev->nr_chans == 4) { tbl_tw2815_sfr[0x63 - 0x40] |= 1; tbl_tw2815_sfr[0x62 - 0x40] |= 3 << 6; } else if (solo_dev->nr_chans == 8) { tbl_tw2815_sfr[0x63 - 0x40] |= 2; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6; } else if (solo_dev->nr_chans == 16) { tbl_tw2815_sfr[0x63 - 0x40] |= 3; if (dev_addr == TW_CHIP_OFFSET_ADDR(0)) tbl_tw2815_sfr[0x62 - 0x40] |= 1 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(1)) tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(2)) tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 6; else if (dev_addr == TW_CHIP_OFFSET_ADDR(3)) tbl_tw2815_sfr[0x62 - 0x40] |= 2 << 6; } /* Output mode of R_ADATM pin (0 mixing, 1 record) */ /* tbl_tw2815_sfr[0x63 - 0x40] |= 0 << 2; */ /* 8KHz, used to be 16KHz, but changed for remote client compat */ tbl_tw2815_sfr[0x62 - 0x40] |= 0 << 2; tbl_tw2815_sfr[0x6c - 0x40] |= 0 << 2; /* Playback of right channel */ tbl_tw2815_sfr[0x6c - 0x40] |= 1 << 5; /* Reserved value (XXX ??) */ tbl_tw2815_sfr[0x5c - 0x40] |= 1 << 5; /* Analog output gain and mix ratio playback on full */ tbl_tw2815_sfr[0x70 - 0x40] |= 0xff; /* Select playback audio and mute all except */ tbl_tw2815_sfr[0x71 - 0x40] |= 0x10; tbl_tw2815_sfr[0x6d - 0x40] |= 0x0f; /* End of audio configuration */ for (ch = 0; ch < 4; ch++) { tbl_tw2815_common[0x0d] &= ~3; switch (ch) { case 0: tbl_tw2815_common[0x0d] |= 0x21; break; case 1: tbl_tw2815_common[0x0d] |= 0x20; break; case 2: tbl_tw2815_common[0x0d] |= 0x23; break; case 3: tbl_tw2815_common[0x0d] |= 0x22; break; } for (i = 0; i < 0x0f; i++) { if (i == 0x00) continue; /* read-only */ solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, (ch * 0x10) + i, tbl_tw2815_common[i]); } } for (i = 0x40; i < 0x76; i++) { /* Skip read-only and nop registers */ if (i == 0x40 || i == 0x59 || i == 0x5a || i == 0x5d || i == 0x5e || i == 0x5f) continue; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, dev_addr, i, tbl_tw2815_sfr[i - 0x40]); } return 0; } #define FIRST_ACTIVE_LINE 0x0008 #define LAST_ACTIVE_LINE 0x0102 static void saa7128_setup(struct solo_dev *solo_dev) { int i; unsigned char regs[128] = { 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1C, 0x2B, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x00, 0x59, 0x1d, 0x75, 0x3f, 0x06, 0x3f, 0x00, 0x00, 0x1c, 0x33, 0x00, 0x3f, 0x00, 0x00, 0x3f, 0x00, 0x1a, 0x1a, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0x10, 0x97, 0x4c, 0x18, 0x9b, 0x93, 0x9f, 0xff, 0x7c, 0x34, 0x3f, 0x3f, 0x3f, 0x83, 0x83, 0x80, 0x0d, 0x0f, 0xc3, 0x06, 0x02, 0x80, 0x71, 0x77, 0xa7, 0x67, 0x66, 0x2e, 0x7b, 0x11, 0x4f, 0x1f, 0x7c, 0xf0, 0x21, 0x77, 0x41, 0x88, 0x41, 0x12, 0xed, 0x10, 0x10, 0x00, 0x41, 0xc3, 0x00, 0x3e, 0xb8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x08, 0xff, 0x80, 0x00, 0xff, 0xff, }; regs[0x7A] = FIRST_ACTIVE_LINE & 0xff; regs[0x7B] = LAST_ACTIVE_LINE & 0xff; regs[0x7C] = ((1 << 7) | (((LAST_ACTIVE_LINE >> 8) & 1) << 6) | (((FIRST_ACTIVE_LINE >> 8) & 1) << 4)); /* PAL: XXX: We could do a second set of regs to avoid this */ if (solo_dev->video_type != SOLO_VO_FMT_TYPE_NTSC) { regs[0x28] = 0xE1; regs[0x5A] = 0x0F; regs[0x61] = 0x02; regs[0x62] = 0x35; regs[0x63] = 0xCB; regs[0x64] = 0x8A; regs[0x65] = 0x09; regs[0x66] = 0x2A; regs[0x6C] = 0xf1; regs[0x6E] = 0x20; regs[0x7A] = 0x06 + 12; regs[0x7b] = 0x24 + 12; regs[0x7c] |= 1 << 6; } /* First 0x25 bytes are read-only? */ for (i = 0x26; i < 128; i++) { if (i == 0x60 || i == 0x7D) continue; solo_i2c_writebyte(solo_dev, SOLO_I2C_SAA, 0x46, i, regs[i]); } return; } int solo_tw28_init(struct solo_dev *solo_dev) { int i; u8 value; solo_dev->tw28_cnt = 0; /* Detect techwell chip type(s) */ for (i = 0; i < solo_dev->nr_chans / 4; i++) { value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(i), 0xFF); switch (value >> 3) { case 0x18: solo_dev->tw2865 |= 1 << i; solo_dev->tw28_cnt++; break; case 0x0c: solo_dev->tw2864 |= 1 << i; solo_dev->tw28_cnt++; break; default: value = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(i), 0x59); if ((value >> 3) == 0x04) { solo_dev->tw2815 |= 1 << i; solo_dev->tw28_cnt++; } } } if (solo_dev->tw28_cnt != (solo_dev->nr_chans >> 2)) { dev_err(&solo_dev->pdev->dev, "Could not initialize any techwell chips\n"); return -EINVAL; } saa7128_setup(solo_dev); for (i = 0; i < solo_dev->tw28_cnt; i++) { if ((solo_dev->tw2865 & (1 << i))) tw2865_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); else if ((solo_dev->tw2864 & (1 << i))) tw2864_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); else tw2815_setup(solo_dev, TW_CHIP_OFFSET_ADDR(i)); } return 0; } /* * We accessed the video status signal in the Techwell chip through * iic/i2c because the video status reported by register REG_VI_STATUS1 * (address 0x012C) of the SOLO6010 chip doesn't give the correct video * status signal values. */ int tw28_get_video_status(struct solo_dev *solo_dev, u8 ch) { u8 val, chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AV_STAT_ADDR, TW_AV_STAT_ADDR) & 0x0f; return val & (1 << ch) ? 1 : 0; } #if 0 /* Status of audio from up to 4 techwell chips are combined into 1 variable. * See techwell datasheet for details. */ u16 tw28_get_audio_status(struct solo_dev *solo_dev) { u8 val; u16 status = 0; int i; for (i = 0; i < solo_dev->tw28_cnt; i++) { val = (tw_readbyte(solo_dev, i, TW286x_AV_STAT_ADDR, TW_AV_STAT_ADDR) & 0xf0) >> 4; status |= val << (i * 4); } return status; } #endif bool tw28_has_sharpness(struct solo_dev *solo_dev, u8 ch) { return is_tw286x(solo_dev, ch / 4); } int tw28_set_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 val) { char sval; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; if (val > 255 || val < 0) return -ERANGE; switch (ctrl) { case V4L2_CID_SHARPNESS: /* Only 286x has sharpness */ if (is_tw286x(solo_dev, chip_num)) { u8 v = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num)); v &= 0xf0; v |= val; solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num), v); } else { return -EINVAL; } break; case V4L2_CID_HUE: if (is_tw286x(solo_dev, chip_num)) sval = val - 128; else sval = (char)val; tw_writebyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), TW_HUE_ADDR(ch), sval); break; case V4L2_CID_SATURATION: /* 286x chips have a U and V component for saturation */ if (is_tw286x(solo_dev, chip_num)) { solo_i2c_writebyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SATURATIONU_ADDR(ch), val); } tw_writebyte(solo_dev, chip_num, TW286x_SATURATIONV_ADDR(ch), TW_SATURATION_ADDR(ch), val); break; case V4L2_CID_CONTRAST: tw_writebyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), TW_CONTRAST_ADDR(ch), val); break; case V4L2_CID_BRIGHTNESS: if (is_tw286x(solo_dev, chip_num)) sval = val - 128; else sval = (char)val; tw_writebyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), TW_BRIGHTNESS_ADDR(ch), sval); break; default: return -EINVAL; } return 0; } int tw28_get_ctrl_val(struct solo_dev *solo_dev, u32 ctrl, u8 ch, s32 *val) { u8 rval, chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; switch (ctrl) { case V4L2_CID_SHARPNESS: /* Only 286x has sharpness */ if (is_tw286x(solo_dev, chip_num)) { rval = solo_i2c_readbyte(solo_dev, SOLO_I2C_TW, TW_CHIP_OFFSET_ADDR(chip_num), TW286x_SHARPNESS(chip_num)); *val = rval & 0x0f; } else *val = 0; break; case V4L2_CID_HUE: rval = tw_readbyte(solo_dev, chip_num, TW286x_HUE_ADDR(ch), TW_HUE_ADDR(ch)); if (is_tw286x(solo_dev, chip_num)) *val = (s32)((char)rval) + 128; else *val = rval; break; case V4L2_CID_SATURATION: *val = tw_readbyte(solo_dev, chip_num, TW286x_SATURATIONU_ADDR(ch), TW_SATURATION_ADDR(ch)); break; case V4L2_CID_CONTRAST: *val = tw_readbyte(solo_dev, chip_num, TW286x_CONTRAST_ADDR(ch), TW_CONTRAST_ADDR(ch)); break; case V4L2_CID_BRIGHTNESS: rval = tw_readbyte(solo_dev, chip_num, TW286x_BRIGHTNESS_ADDR(ch), TW_BRIGHTNESS_ADDR(ch)); if (is_tw286x(solo_dev, chip_num)) *val = (s32)((char)rval) + 128; else *val = rval; break; default: return -EINVAL; } return 0; } #if 0 /* * For audio output volume, the output channel is only 1. In this case we * don't need to offset TW_CHIP_OFFSET_ADDR. The TW_CHIP_OFFSET_ADDR used * is the base address of the techwell chip. */ void tw2815_Set_AudioOutVol(struct solo_dev *solo_dev, unsigned int u_val) { unsigned int val; unsigned int chip_num; chip_num = (solo_dev->nr_chans - 1) / 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR, TW_AUDIO_OUTPUT_VOL_ADDR); u_val = (val & 0x0f) | (u_val << 4); tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_OUTPUT_VOL_ADDR, TW_AUDIO_OUTPUT_VOL_ADDR, u_val); } #endif u8 tw28_get_audio_gain(struct solo_dev *solo_dev, u8 ch) { u8 val; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch)); return (ch % 2) ? (val >> 4) : (val & 0x0f); } void tw28_set_audio_gain(struct solo_dev *solo_dev, u8 ch, u8 val) { u8 old_val; u8 chip_num; /* Get the right chip and on-chip channel */ chip_num = ch / 4; ch %= 4; old_val = tw_readbyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch)); val = (old_val & ((ch % 2) ? 0x0f : 0xf0)) | ((ch % 2) ? (val << 4) : val); tw_writebyte(solo_dev, chip_num, TW286x_AUDIO_INPUT_GAIN_ADDR(ch), TW_AUDIO_INPUT_GAIN_ADDR(ch), val); }
gpl-2.0
C-Aniruddh/ace-kernel_lettuce
sound/soc/omap/omap3pandora.c
2599
9161
/* * omap3pandora.c -- SoC audio for Pandora Handheld Console * * Author: Gražvydas Ignotas <notasas@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <linux/platform_data/asoc-ti-mcbsp.h> #include "omap-mcbsp.h" #define OMAP3_PANDORA_DAC_POWER_GPIO 118 #define OMAP3_PANDORA_AMP_POWER_GPIO 14 #define PREFIX "ASoC omap3pandora: " static struct regulator *omap3pandora_dac_reg; static int omap3pandora_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int ret; /* Set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000, SND_SOC_CLOCK_IN); if (ret < 0) { pr_err(PREFIX "can't set codec system clock\n"); return ret; } /* Set McBSP clock to external */ ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_SYSCLK_CLKS_EXT, 256 * params_rate(params), SND_SOC_CLOCK_IN); if (ret < 0) { pr_err(PREFIX "can't set cpu system clock\n"); return ret; } ret = snd_soc_dai_set_clkdiv(cpu_dai, OMAP_MCBSP_CLKGDV, 8); if (ret < 0) { pr_err(PREFIX "can't set SRG clock divider\n"); return ret; } return 0; } static int omap3pandora_dac_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { int ret; /* * The PCM1773 DAC datasheet requires 1ms delay between switching * VCC power on/off and /PD pin high/low */ if (SND_SOC_DAPM_EVENT_ON(event)) { ret = regulator_enable(omap3pandora_dac_reg); if (ret) { dev_err(w->dapm->dev, "Failed to power DAC: %d\n", ret); return ret; } mdelay(1); gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 1); } else { gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 0); mdelay(1); regulator_disable(omap3pandora_dac_reg); } return 0; } static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { if (SND_SOC_DAPM_EVENT_ON(event)) gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 1); else gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 0); return 0; } /* * Audio paths on Pandora board: * * |O| ---> PCM DAC +-> AMP -> Headphone Jack * |M| A +--------> Line Out * |A| <~~clk~~+ * |P| <--- TWL4030 <--------- Line In and MICs */ static const struct snd_soc_dapm_widget omap3pandora_out_dapm_widgets[] = { SND_SOC_DAPM_DAC_E("PCM DAC", "HiFi Playback", SND_SOC_NOPM, 0, 0, omap3pandora_dac_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA_E("Headphone Amplifier", SND_SOC_NOPM, 0, 0, NULL, 0, omap3pandora_hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), }; static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { SND_SOC_DAPM_MIC("Mic (internal)", NULL), SND_SOC_DAPM_MIC("Mic (external)", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"}, }; static const struct snd_soc_dapm_route omap3pandora_in_map[] = { {"AUXL", NULL, "Line In"}, {"AUXR", NULL, "Line In"}, {"MAINMIC", NULL, "Mic (internal)"}, {"Mic (internal)", NULL, "Mic Bias 1"}, {"SUBMIC", NULL, "Mic (external)"}, {"Mic (external)", NULL, "Mic Bias 2"}, }; static int omap3pandora_out_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* All TWL4030 output pins are floating */ snd_soc_dapm_nc_pin(dapm, "EARPIECE"); snd_soc_dapm_nc_pin(dapm, "PREDRIVEL"); snd_soc_dapm_nc_pin(dapm, "PREDRIVER"); snd_soc_dapm_nc_pin(dapm, "HSOL"); snd_soc_dapm_nc_pin(dapm, "HSOR"); snd_soc_dapm_nc_pin(dapm, "CARKITL"); snd_soc_dapm_nc_pin(dapm, "CARKITR"); snd_soc_dapm_nc_pin(dapm, "HFL"); snd_soc_dapm_nc_pin(dapm, "HFR"); snd_soc_dapm_nc_pin(dapm, "VIBRA"); ret = snd_soc_dapm_new_controls(dapm, omap3pandora_out_dapm_widgets, ARRAY_SIZE(omap3pandora_out_dapm_widgets)); if (ret < 0) return ret; return snd_soc_dapm_add_routes(dapm, omap3pandora_out_map, ARRAY_SIZE(omap3pandora_out_map)); } static int omap3pandora_in_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; int ret; /* Not comnnected */ snd_soc_dapm_nc_pin(dapm, "HSMIC"); snd_soc_dapm_nc_pin(dapm, "CARKITMIC"); snd_soc_dapm_nc_pin(dapm, "DIGIMIC0"); snd_soc_dapm_nc_pin(dapm, "DIGIMIC1"); ret = snd_soc_dapm_new_controls(dapm, omap3pandora_in_dapm_widgets, ARRAY_SIZE(omap3pandora_in_dapm_widgets)); if (ret < 0) return ret; return snd_soc_dapm_add_routes(dapm, omap3pandora_in_map, ARRAY_SIZE(omap3pandora_in_map)); } static struct snd_soc_ops omap3pandora_ops = { .hw_params = omap3pandora_hw_params, }; /* Digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link omap3pandora_dai[] = { { .name = "PCM1773", .stream_name = "HiFi Out", .cpu_dai_name = "omap-mcbsp.2", .codec_dai_name = "twl4030-hifi", .platform_name = "omap-pcm-audio", .codec_name = "twl4030-codec", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &omap3pandora_ops, .init = omap3pandora_out_init, }, { .name = "TWL4030", .stream_name = "Line/Mic In", .cpu_dai_name = "omap-mcbsp.4", .codec_dai_name = "twl4030-hifi", .platform_name = "omap-pcm-audio", .codec_name = "twl4030-codec", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &omap3pandora_ops, .init = omap3pandora_in_init, } }; /* SoC card */ static struct snd_soc_card snd_soc_card_omap3pandora = { .name = "omap3pandora", .owner = THIS_MODULE, .dai_link = omap3pandora_dai, .num_links = ARRAY_SIZE(omap3pandora_dai), }; static struct platform_device *omap3pandora_snd_device; static int __init omap3pandora_soc_init(void) { int ret; if (!machine_is_omap3_pandora()) return -ENODEV; pr_info("OMAP3 Pandora SoC init\n"); ret = gpio_request(OMAP3_PANDORA_DAC_POWER_GPIO, "dac_power"); if (ret) { pr_err(PREFIX "Failed to get DAC power GPIO\n"); return ret; } ret = gpio_direction_output(OMAP3_PANDORA_DAC_POWER_GPIO, 0); if (ret) { pr_err(PREFIX "Failed to set DAC power GPIO direction\n"); goto fail0; } ret = gpio_request(OMAP3_PANDORA_AMP_POWER_GPIO, "amp_power"); if (ret) { pr_err(PREFIX "Failed to get amp power GPIO\n"); goto fail0; } ret = gpio_direction_output(OMAP3_PANDORA_AMP_POWER_GPIO, 0); if (ret) { pr_err(PREFIX "Failed to set amp power GPIO direction\n"); goto fail1; } omap3pandora_snd_device = platform_device_alloc("soc-audio", -1); if (omap3pandora_snd_device == NULL) { pr_err(PREFIX "Platform device allocation failed\n"); ret = -ENOMEM; goto fail1; } platform_set_drvdata(omap3pandora_snd_device, &snd_soc_card_omap3pandora); ret = platform_device_add(omap3pandora_snd_device); if (ret) { pr_err(PREFIX "Unable to add platform device\n"); goto fail2; } omap3pandora_dac_reg = regulator_get(&omap3pandora_snd_device->dev, "vcc"); if (IS_ERR(omap3pandora_dac_reg)) { pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n", dev_name(&omap3pandora_snd_device->dev), PTR_ERR(omap3pandora_dac_reg)); ret = PTR_ERR(omap3pandora_dac_reg); goto fail3; } return 0; fail3: platform_device_del(omap3pandora_snd_device); fail2: platform_device_put(omap3pandora_snd_device); fail1: gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO); fail0: gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO); return ret; } module_init(omap3pandora_soc_init); static void __exit omap3pandora_soc_exit(void) { regulator_put(omap3pandora_dac_reg); platform_device_unregister(omap3pandora_snd_device); gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO); gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO); } module_exit(omap3pandora_soc_exit); MODULE_AUTHOR("Grazvydas Ignotas <notasas@gmail.com>"); MODULE_DESCRIPTION("ALSA SoC OMAP3 Pandora"); MODULE_LICENSE("GPL");
gpl-2.0
d3trax/asuswrt-merlin
release/src/router/samba36/lib/zlib/inftrees.c
2855
13756
/* inftrees.c -- generate Huffman trees for efficient decoding * Copyright (C) 1995-2005 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #define MAXBITS 15 const char inflate_copyright[] = " inflate 1.2.3 Copyright 1995-2005 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* Build a set of tables to decode the provided canonical Huffman code. The code lengths are lens[0..codes-1]. The result starts at *table, whose indices are 0..2^bits-1. work is a writable array of at least lens shorts, which is used as a work area. type is the type of code to be generated, CODES, LENS, or DISTS. On return, zero is success, -1 is an invalid code, and +1 means that ENOUGH isn't enough. table on return points to the next available entry's address. bits is the requested root table index bits, and on return it is the actual root table index bits. It will differ if the request is greater than the longest code or if it is less than the shortest code. */ int inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; code FAR * FAR *table; unsigned FAR *bits; unsigned short FAR *work; { unsigned len; /* a code's length in bits */ unsigned sym; /* index of code symbols */ unsigned min, max; /* minimum and maximum code lengths */ unsigned root; /* number of index bits for root table */ unsigned curr; /* number of index bits for current table */ unsigned drop; /* code bits to drop for sub-table */ int left; /* number of prefix codes available */ unsigned used; /* code entries in table used */ unsigned huff; /* Huffman code */ unsigned incr; /* for incrementing code, index */ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ code this; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ int end; /* use base and extra for symbol > end */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 201, 196}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64}; /* Process a set of code lengths to create a canonical Huffman code. The code lengths are lens[0..codes-1]. Each length corresponds to the symbols 0..codes-1. The Huffman code is generated by first sorting the symbols by length from short to long, and retaining the symbol order for codes with equal lengths. Then the code starts with all zero bits for the first code of the shortest length, and the codes are integer increments for the same length, and zeros are appended as the length increases. For the deflate format, these bits are stored backwards from their more natural integer increment ordering, and so when the decoding tables are built in the large loop below, the integer codes are incremented backwards. This routine assumes, but does not check, that all of the entries in lens[] are in the range 0..MAXBITS. The caller must assure this. 1..MAXBITS is interpreted as that code length. zero means that that symbol does not occur in this code. The codes are sorted by computing a count of codes for each length, creating from that a table of starting indices for each length in the sorted table, and then entering the symbols in order in the sorted table. The sorted table is work[], with that space being provided by the caller. The length counts are used for other purposes as well, i.e. finding the minimum and maximum length codes, determining if there are any codes at all, checking for a valid set of lengths, and looking ahead at length counts to determine sub-table sizes when building the decoding tables. */ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ for (len = 0; len <= MAXBITS; len++) count[len] = 0; for (sym = 0; sym < codes; sym++) count[lens[sym]]++; /* bound code lengths, force root to be within code lengths */ root = *bits; for (max = MAXBITS; max >= 1; max--) if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ this.op = (unsigned char)64; /* invalid code marker */ this.bits = (unsigned char)1; this.val = (unsigned short)0; *(*table)++ = this; /* make a table to force an error */ *(*table)++ = this; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } for (min = 1; min <= MAXBITS; min++) if (count[min] != 0) break; if (root < min) root = min; /* check for an over-subscribed or incomplete set of lengths */ left = 1; for (len = 1; len <= MAXBITS; len++) { left <<= 1; left -= count[len]; if (left < 0) return -1; /* over-subscribed */ } if (left > 0 && (type == CODES || max != 1)) return -1; /* incomplete set */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + count[len]; /* sort symbols by length, by symbol order within each length */ for (sym = 0; sym < codes; sym++) if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; /* Create and fill in decoding tables. In this loop, the table being filled is at next and has curr index bits. The code being used is huff with length len. That code is converted to an index by dropping drop bits off of the bottom. For codes where len is less than drop + curr, those top drop + curr - len bits are incremented through all values to fill the table with replicated entries. root is the number of index bits for the root table. When len exceeds root, sub-tables are created pointed to by the root entry with an index of the low root bits of huff. This is saved in low to check for when a new sub-table should be started. drop is zero when the root table is being filled, and drop is root when sub-tables are being filled. When a new sub-table is needed, it is necessary to look ahead in the code lengths to determine what size sub-table is needed. The length counts are used for this, and so count[] is decremented as codes are entered in the tables. used keeps track of how many table entries have been allocated from the provided *table space. It is checked when a LENS table is being made against the space in *table, ENOUGH, minus the maximum space needed by the worst case distance code, MAXD. This should never happen, but the sufficiency of ENOUGH has not been proven exhaustively, hence the check. This assumes that when type == LENS, bits == 9. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This routine permits incomplete codes, so another loop after this one fills in the rest of the decoding tables with invalid code markers. */ /* set up for code type */ switch (type) { case CODES: base = extra = work; /* dummy value--not used */ end = 19; break; case LENS: base = lbase; base -= 257; extra = lext; extra -= 257; end = 256; break; default: /* DISTS */ base = dbase; extra = dext; end = -1; } /* initialize state for loop */ huff = 0; /* starting code */ sym = 0; /* starting code symbol */ len = min; /* starting code length */ next = *table; /* current table to fill in */ curr = root; /* current table index bits */ drop = 0; /* current bits to drop from code for index */ low = (unsigned)(-1); /* trigger new sub-table when len > root */ used = 1U << root; /* use root table entries */ mask = used - 1; /* mask for comparing low */ /* check available table space */ if (type == LENS && used >= ENOUGH - MAXD) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ this.bits = (unsigned char)(len - drop); if ((int)(work[sym]) < end) { this.op = (unsigned char)0; this.val = work[sym]; } else if ((int)(work[sym]) > end) { this.op = (unsigned char)(extra[work[sym]]); this.val = base[work[sym]]; } else { this.op = (unsigned char)(32 + 64); /* end of block */ this.val = 0; } /* replicate for those indices with low len bits equal to huff */ incr = 1U << (len - drop); fill = 1U << curr; min = fill; /* save offset to next table */ do { fill -= incr; next[(huff >> drop) + fill] = this; } while (fill != 0); /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; /* go to next symbol, update count, len */ sym++; if (--(count[len]) == 0) { if (len == max) break; len = lens[work[sym]]; } /* create new sub-table if needed */ if (len > root && (huff & mask) != low) { /* if first time, transition to sub-tables */ if (drop == 0) drop = root; /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop; left = (int)(1 << curr); while (curr + drop < max) { left -= count[curr + drop]; if (left <= 0) break; curr++; left <<= 1; } /* check for enough space */ used += 1U << curr; if (type == LENS && used >= ENOUGH - MAXD) return 1; /* point entry in root table to sub-table */ low = huff & mask; (*table)[low].op = (unsigned char)curr; (*table)[low].bits = (unsigned char)root; (*table)[low].val = (unsigned short)(next - *table); } } /* Fill in rest of table for incomplete codes. This loop is similar to the loop above in incrementing huff for table indices. It is assumed that len is equal to curr + drop, so there is no loop needed to increment through high index bits. When the current sub-table is filled, the loop drops back to the root table to fill in any remaining entries there. */ this.op = (unsigned char)64; /* invalid code marker */ this.bits = (unsigned char)(len - drop); this.val = (unsigned short)0; while (huff != 0) { /* when done with sub-table, drop back to root table */ if (drop != 0 && (huff & mask) != low) { drop = 0; len = root; next = *table; this.bits = (unsigned char)len; } /* put invalid code marker in table */ next[huff >> drop] = this; /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; } /* set return parameters */ *table += used; *bits = root; return 0; }
gpl-2.0
bigbiff/android_kernel_samsung_zeroflte
arch/arm/mach-msm/gpiomux-v1.c
4903
1207
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/kernel.h> #include "gpiomux.h" #include "proc_comm.h" void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val) { unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) | ((gpio & 0x3ff) << 4); unsigned tlmm_disable = 0; int rc; rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &tlmm_config, &tlmm_disable); if (rc) pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n", __func__, rc, tlmm_config, tlmm_disable); }
gpl-2.0
bju2000/Sense7_Kernel_b2wlj
drivers/media/video/ivtv/ivtv-firmware.c
7975
11880
/* ivtv firmware functions. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-mailbox.h" #include "ivtv-firmware.h" #include "ivtv-yuv.h" #include "ivtv-ioctl.h" #include "ivtv-cards.h" #include <linux/firmware.h> #include <media/saa7127.h> #define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE #define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6 #define IVTV_MASK_VPU_ENABLE16 0xFFFFFFFB #define IVTV_CMD_VDM_STOP 0x00000000 #define IVTV_CMD_AO_STOP 0x00000005 #define IVTV_CMD_APU_PING 0x00000000 #define IVTV_CMD_VPU_STOP15 0xFFFFFFFE #define IVTV_CMD_VPU_STOP16 0xFFFFFFEE #define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF #define IVTV_CMD_SPU_STOP 0x00000001 #define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A #define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640 #define IVTV_SDRAM_SLEEPTIME 600 #define IVTV_DECODE_INIT_MPEG_FILENAME "v4l-cx2341x-init.mpg" #define IVTV_DECODE_INIT_MPEG_SIZE (152*1024) /* Encoder/decoder firmware sizes */ #define IVTV_FW_ENC_SIZE (376836) #define IVTV_FW_DEC_SIZE (256*1024) static int load_fw_direct(const char *fn, volatile u8 __iomem *mem, struct ivtv *itv, long size) { const struct firmware *fw = NULL; int retries = 3; retry: if (retries && request_firmware(&fw, fn, &itv->pdev->dev) == 0) { int i; volatile u32 __iomem *dst = (volatile u32 __iomem *)mem; const u32 *src = (const u32 *)fw->data; if (fw->size != size) { /* Due to race conditions in firmware loading (esp. with udev <0.95) the wrong file was sometimes loaded. So we check filesizes to see if at least the right-sized file was loaded. If not, then we retry. */ IVTV_INFO("Retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size); release_firmware(fw); retries--; goto retry; } for (i = 0; i < fw->size; i += 4) { /* no need for endianness conversion on the ppc */ __raw_writel(*src, dst); dst++; src++; } IVTV_INFO("Loaded %s firmware (%zd bytes)\n", fn, fw->size); release_firmware(fw); return size; } IVTV_ERR("Unable to open firmware %s (must be %ld bytes)\n", fn, size); IVTV_ERR("Did you put the firmware in the hotplug firmware directory?\n"); return -ENOMEM; } void ivtv_halt_firmware(struct ivtv *itv) { IVTV_DEBUG_INFO("Preparing for firmware halt.\n"); if (itv->has_cx23415 && itv->dec_mbox.mbox) ivtv_vapi(itv, CX2341X_DEC_HALT_FW, 0); if (itv->enc_mbox.mbox) ivtv_vapi(itv, CX2341X_ENC_HALT_FW, 0); ivtv_msleep_timeout(10, 0); itv->enc_mbox.mbox = itv->dec_mbox.mbox = NULL; IVTV_DEBUG_INFO("Stopping VDM\n"); write_reg(IVTV_CMD_VDM_STOP, IVTV_REG_VDM); IVTV_DEBUG_INFO("Stopping AO\n"); write_reg(IVTV_CMD_AO_STOP, IVTV_REG_AO); IVTV_DEBUG_INFO("pinging (?) APU\n"); write_reg(IVTV_CMD_APU_PING, IVTV_REG_APU); IVTV_DEBUG_INFO("Stopping VPU\n"); if (!itv->has_cx23415) write_reg(IVTV_CMD_VPU_STOP16, IVTV_REG_VPU); else write_reg(IVTV_CMD_VPU_STOP15, IVTV_REG_VPU); IVTV_DEBUG_INFO("Resetting Hw Blocks\n"); write_reg(IVTV_CMD_HW_BLOCKS_RST, IVTV_REG_HW_BLOCKS); IVTV_DEBUG_INFO("Stopping SPU\n"); write_reg(IVTV_CMD_SPU_STOP, IVTV_REG_SPU); ivtv_msleep_timeout(10, 0); IVTV_DEBUG_INFO("init Encoder SDRAM pre-charge\n"); write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_ENC_SDRAM_PRECHARGE); IVTV_DEBUG_INFO("init Encoder SDRAM refresh to 1us\n"); write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_ENC_SDRAM_REFRESH); if (itv->has_cx23415) { IVTV_DEBUG_INFO("init Decoder SDRAM pre-charge\n"); write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_DEC_SDRAM_PRECHARGE); IVTV_DEBUG_INFO("init Decoder SDRAM refresh to 1us\n"); write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_DEC_SDRAM_REFRESH); } IVTV_DEBUG_INFO("Sleeping for %dms\n", IVTV_SDRAM_SLEEPTIME); ivtv_msleep_timeout(IVTV_SDRAM_SLEEPTIME, 0); } void ivtv_firmware_versions(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; /* Encoder */ ivtv_vapi_result(itv, data, CX2341X_ENC_GET_VERSION, 0); IVTV_INFO("Encoder revision: 0x%08x\n", data[0]); if (data[0] != 0x02060039) IVTV_WARN("Recommended firmware version is 0x02060039.\n"); if (itv->has_cx23415) { /* Decoder */ ivtv_vapi_result(itv, data, CX2341X_DEC_GET_VERSION, 0); IVTV_INFO("Decoder revision: 0x%08x\n", data[0]); } } static int ivtv_firmware_copy(struct ivtv *itv) { IVTV_DEBUG_INFO("Loading encoder image\n"); if (load_fw_direct(CX2341X_FIRM_ENC_FILENAME, itv->enc_mem, itv, IVTV_FW_ENC_SIZE) != IVTV_FW_ENC_SIZE) { IVTV_DEBUG_WARN("failed loading encoder firmware\n"); return -3; } if (!itv->has_cx23415) return 0; IVTV_DEBUG_INFO("Loading decoder image\n"); if (load_fw_direct(CX2341X_FIRM_DEC_FILENAME, itv->dec_mem, itv, IVTV_FW_DEC_SIZE) != IVTV_FW_DEC_SIZE) { IVTV_DEBUG_WARN("failed loading decoder firmware\n"); return -1; } return 0; } static volatile struct ivtv_mailbox __iomem *ivtv_search_mailbox(const volatile u8 __iomem *mem, u32 size) { int i; /* mailbox is preceded by a 16 byte 'magic cookie' starting at a 256-byte address boundary */ for (i = 0; i < size; i += 0x100) { if (readl(mem + i) == 0x12345678 && readl(mem + i + 4) == 0x34567812 && readl(mem + i + 8) == 0x56781234 && readl(mem + i + 12) == 0x78123456) { return (volatile struct ivtv_mailbox __iomem *)(mem + i + 16); } } return NULL; } int ivtv_firmware_init(struct ivtv *itv) { int err; ivtv_halt_firmware(itv); /* load firmware */ err = ivtv_firmware_copy(itv); if (err) { IVTV_DEBUG_WARN("Error %d loading firmware\n", err); return err; } /* start firmware */ write_reg(read_reg(IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE, IVTV_REG_SPU); ivtv_msleep_timeout(100, 0); if (itv->has_cx23415) write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE15, IVTV_REG_VPU); else write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE16, IVTV_REG_VPU); ivtv_msleep_timeout(100, 0); /* find mailboxes and ping firmware */ itv->enc_mbox.mbox = ivtv_search_mailbox(itv->enc_mem, IVTV_ENCODER_SIZE); if (itv->enc_mbox.mbox == NULL) IVTV_ERR("Encoder mailbox not found\n"); else if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0)) { IVTV_ERR("Encoder firmware dead!\n"); itv->enc_mbox.mbox = NULL; } if (itv->enc_mbox.mbox == NULL) return -ENODEV; if (!itv->has_cx23415) return 0; itv->dec_mbox.mbox = ivtv_search_mailbox(itv->dec_mem, IVTV_DECODER_SIZE); if (itv->dec_mbox.mbox == NULL) { IVTV_ERR("Decoder mailbox not found\n"); } else if (itv->has_cx23415 && ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0)) { IVTV_ERR("Decoder firmware dead!\n"); itv->dec_mbox.mbox = NULL; } else { /* Firmware okay, so check yuv output filter table */ ivtv_yuv_filter_check(itv); } return itv->dec_mbox.mbox ? 0 : -ENODEV; } void ivtv_init_mpeg_decoder(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; long readbytes; volatile u8 __iomem *mem_offset; data[0] = 0; data[1] = itv->cxhdl.width; /* YUV source width */ data[2] = itv->cxhdl.height; data[3] = itv->cxhdl.audio_properties; /* Audio settings to use, bitmap. see docs. */ if (ivtv_api(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, data)) { IVTV_ERR("ivtv_init_mpeg_decoder failed to set decoder source\n"); return; } if (ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1) != 0) { IVTV_ERR("ivtv_init_mpeg_decoder failed to start playback\n"); return; } ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data); mem_offset = itv->dec_mem + data[1]; if ((readbytes = load_fw_direct(IVTV_DECODE_INIT_MPEG_FILENAME, mem_offset, itv, IVTV_DECODE_INIT_MPEG_SIZE)) <= 0) { IVTV_DEBUG_WARN("failed to read mpeg decoder initialisation file %s\n", IVTV_DECODE_INIT_MPEG_FILENAME); } else { ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, readbytes, 0); ivtv_msleep_timeout(100, 0); } ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 4, 0, 0, 0, 1); } /* Try to restart the card & restore previous settings */ int ivtv_firmware_restart(struct ivtv *itv) { int rc = 0; v4l2_std_id std; if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) /* Display test image during restart */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_TEST_IMAGE, itv->card->video_outputs[itv->active_output].video_output, 0); mutex_lock(&itv->udma.lock); rc = ivtv_firmware_init(itv); if (rc) { mutex_unlock(&itv->udma.lock); return rc; } /* Allow settings to reload */ ivtv_mailbox_cache_invalidate(itv); /* Restore encoder video standard */ std = itv->std; itv->std = 0; ivtv_s_std_enc(itv, &std); if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { ivtv_init_mpeg_decoder(itv); /* Restore decoder video standard */ std = itv->std_out; itv->std_out = 0; ivtv_s_std_dec(itv, &std); /* Restore framebuffer if active */ if (itv->ivtvfb_restore) itv->ivtvfb_restore(itv); /* Restore alpha settings */ ivtv_set_osd_alpha(itv); /* Restore normal output */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_NORMAL, itv->card->video_outputs[itv->active_output].video_output, 0); } mutex_unlock(&itv->udma.lock); return rc; } /* Check firmware running state. The checks fall through allowing multiple failures to be logged. */ int ivtv_firmware_check(struct ivtv *itv, char *where) { int res = 0; /* Check encoder is still running */ if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0) < 0) { IVTV_WARN("Encoder has died : %s\n", where); res = -1; } /* Also check audio. Only check if not in use & encoder is okay */ if (!res && !atomic_read(&itv->capturing) && (!atomic_read(&itv->decoding) || (atomic_read(&itv->decoding) < 2 && test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)))) { if (ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12) < 0) { IVTV_WARN("Audio has died (Encoder OK) : %s\n", where); res = -2; } } if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { /* Second audio check. Skip if audio already failed */ if (res != -2 && read_dec(0x100) != read_dec(0x104)) { /* Wait & try again to be certain. */ ivtv_msleep_timeout(14, 0); if (read_dec(0x100) != read_dec(0x104)) { IVTV_WARN("Audio has died (Decoder) : %s\n", where); res = -1; } } /* Check decoder is still running */ if (ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0) < 0) { IVTV_WARN("Decoder has died : %s\n", where); res = -1; } } /* If something failed & currently idle, try to reload */ if (res && !atomic_read(&itv->capturing) && !atomic_read(&itv->decoding)) { IVTV_INFO("Detected in %s that firmware had failed - " "Reloading\n", where); res = ivtv_firmware_restart(itv); /* * Even if restarted ok, still signal a problem had occurred. * The caller can come through this function again to check * if things are really ok after the restart. */ if (!res) { IVTV_INFO("Firmware restart okay\n"); res = -EAGAIN; } else { IVTV_INFO("Firmware restart failed\n"); } } else if (res) { res = -EIO; } return res; }
gpl-2.0
TeamJB/kernel_samsung_smdk4412
drivers/media/video/ivtv/ivtv-firmware.c
7975
11880
/* ivtv firmware functions. Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com> Copyright (C) 2004 Chris Kennedy <c@groovy.org> Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ivtv-driver.h" #include "ivtv-mailbox.h" #include "ivtv-firmware.h" #include "ivtv-yuv.h" #include "ivtv-ioctl.h" #include "ivtv-cards.h" #include <linux/firmware.h> #include <media/saa7127.h> #define IVTV_MASK_SPU_ENABLE 0xFFFFFFFE #define IVTV_MASK_VPU_ENABLE15 0xFFFFFFF6 #define IVTV_MASK_VPU_ENABLE16 0xFFFFFFFB #define IVTV_CMD_VDM_STOP 0x00000000 #define IVTV_CMD_AO_STOP 0x00000005 #define IVTV_CMD_APU_PING 0x00000000 #define IVTV_CMD_VPU_STOP15 0xFFFFFFFE #define IVTV_CMD_VPU_STOP16 0xFFFFFFEE #define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF #define IVTV_CMD_SPU_STOP 0x00000001 #define IVTV_CMD_SDRAM_PRECHARGE_INIT 0x0000001A #define IVTV_CMD_SDRAM_REFRESH_INIT 0x80000640 #define IVTV_SDRAM_SLEEPTIME 600 #define IVTV_DECODE_INIT_MPEG_FILENAME "v4l-cx2341x-init.mpg" #define IVTV_DECODE_INIT_MPEG_SIZE (152*1024) /* Encoder/decoder firmware sizes */ #define IVTV_FW_ENC_SIZE (376836) #define IVTV_FW_DEC_SIZE (256*1024) static int load_fw_direct(const char *fn, volatile u8 __iomem *mem, struct ivtv *itv, long size) { const struct firmware *fw = NULL; int retries = 3; retry: if (retries && request_firmware(&fw, fn, &itv->pdev->dev) == 0) { int i; volatile u32 __iomem *dst = (volatile u32 __iomem *)mem; const u32 *src = (const u32 *)fw->data; if (fw->size != size) { /* Due to race conditions in firmware loading (esp. with udev <0.95) the wrong file was sometimes loaded. So we check filesizes to see if at least the right-sized file was loaded. If not, then we retry. */ IVTV_INFO("Retry: file loaded was not %s (expected size %ld, got %zd)\n", fn, size, fw->size); release_firmware(fw); retries--; goto retry; } for (i = 0; i < fw->size; i += 4) { /* no need for endianness conversion on the ppc */ __raw_writel(*src, dst); dst++; src++; } IVTV_INFO("Loaded %s firmware (%zd bytes)\n", fn, fw->size); release_firmware(fw); return size; } IVTV_ERR("Unable to open firmware %s (must be %ld bytes)\n", fn, size); IVTV_ERR("Did you put the firmware in the hotplug firmware directory?\n"); return -ENOMEM; } void ivtv_halt_firmware(struct ivtv *itv) { IVTV_DEBUG_INFO("Preparing for firmware halt.\n"); if (itv->has_cx23415 && itv->dec_mbox.mbox) ivtv_vapi(itv, CX2341X_DEC_HALT_FW, 0); if (itv->enc_mbox.mbox) ivtv_vapi(itv, CX2341X_ENC_HALT_FW, 0); ivtv_msleep_timeout(10, 0); itv->enc_mbox.mbox = itv->dec_mbox.mbox = NULL; IVTV_DEBUG_INFO("Stopping VDM\n"); write_reg(IVTV_CMD_VDM_STOP, IVTV_REG_VDM); IVTV_DEBUG_INFO("Stopping AO\n"); write_reg(IVTV_CMD_AO_STOP, IVTV_REG_AO); IVTV_DEBUG_INFO("pinging (?) APU\n"); write_reg(IVTV_CMD_APU_PING, IVTV_REG_APU); IVTV_DEBUG_INFO("Stopping VPU\n"); if (!itv->has_cx23415) write_reg(IVTV_CMD_VPU_STOP16, IVTV_REG_VPU); else write_reg(IVTV_CMD_VPU_STOP15, IVTV_REG_VPU); IVTV_DEBUG_INFO("Resetting Hw Blocks\n"); write_reg(IVTV_CMD_HW_BLOCKS_RST, IVTV_REG_HW_BLOCKS); IVTV_DEBUG_INFO("Stopping SPU\n"); write_reg(IVTV_CMD_SPU_STOP, IVTV_REG_SPU); ivtv_msleep_timeout(10, 0); IVTV_DEBUG_INFO("init Encoder SDRAM pre-charge\n"); write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_ENC_SDRAM_PRECHARGE); IVTV_DEBUG_INFO("init Encoder SDRAM refresh to 1us\n"); write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_ENC_SDRAM_REFRESH); if (itv->has_cx23415) { IVTV_DEBUG_INFO("init Decoder SDRAM pre-charge\n"); write_reg(IVTV_CMD_SDRAM_PRECHARGE_INIT, IVTV_REG_DEC_SDRAM_PRECHARGE); IVTV_DEBUG_INFO("init Decoder SDRAM refresh to 1us\n"); write_reg(IVTV_CMD_SDRAM_REFRESH_INIT, IVTV_REG_DEC_SDRAM_REFRESH); } IVTV_DEBUG_INFO("Sleeping for %dms\n", IVTV_SDRAM_SLEEPTIME); ivtv_msleep_timeout(IVTV_SDRAM_SLEEPTIME, 0); } void ivtv_firmware_versions(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; /* Encoder */ ivtv_vapi_result(itv, data, CX2341X_ENC_GET_VERSION, 0); IVTV_INFO("Encoder revision: 0x%08x\n", data[0]); if (data[0] != 0x02060039) IVTV_WARN("Recommended firmware version is 0x02060039.\n"); if (itv->has_cx23415) { /* Decoder */ ivtv_vapi_result(itv, data, CX2341X_DEC_GET_VERSION, 0); IVTV_INFO("Decoder revision: 0x%08x\n", data[0]); } } static int ivtv_firmware_copy(struct ivtv *itv) { IVTV_DEBUG_INFO("Loading encoder image\n"); if (load_fw_direct(CX2341X_FIRM_ENC_FILENAME, itv->enc_mem, itv, IVTV_FW_ENC_SIZE) != IVTV_FW_ENC_SIZE) { IVTV_DEBUG_WARN("failed loading encoder firmware\n"); return -3; } if (!itv->has_cx23415) return 0; IVTV_DEBUG_INFO("Loading decoder image\n"); if (load_fw_direct(CX2341X_FIRM_DEC_FILENAME, itv->dec_mem, itv, IVTV_FW_DEC_SIZE) != IVTV_FW_DEC_SIZE) { IVTV_DEBUG_WARN("failed loading decoder firmware\n"); return -1; } return 0; } static volatile struct ivtv_mailbox __iomem *ivtv_search_mailbox(const volatile u8 __iomem *mem, u32 size) { int i; /* mailbox is preceded by a 16 byte 'magic cookie' starting at a 256-byte address boundary */ for (i = 0; i < size; i += 0x100) { if (readl(mem + i) == 0x12345678 && readl(mem + i + 4) == 0x34567812 && readl(mem + i + 8) == 0x56781234 && readl(mem + i + 12) == 0x78123456) { return (volatile struct ivtv_mailbox __iomem *)(mem + i + 16); } } return NULL; } int ivtv_firmware_init(struct ivtv *itv) { int err; ivtv_halt_firmware(itv); /* load firmware */ err = ivtv_firmware_copy(itv); if (err) { IVTV_DEBUG_WARN("Error %d loading firmware\n", err); return err; } /* start firmware */ write_reg(read_reg(IVTV_REG_SPU) & IVTV_MASK_SPU_ENABLE, IVTV_REG_SPU); ivtv_msleep_timeout(100, 0); if (itv->has_cx23415) write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE15, IVTV_REG_VPU); else write_reg(read_reg(IVTV_REG_VPU) & IVTV_MASK_VPU_ENABLE16, IVTV_REG_VPU); ivtv_msleep_timeout(100, 0); /* find mailboxes and ping firmware */ itv->enc_mbox.mbox = ivtv_search_mailbox(itv->enc_mem, IVTV_ENCODER_SIZE); if (itv->enc_mbox.mbox == NULL) IVTV_ERR("Encoder mailbox not found\n"); else if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0)) { IVTV_ERR("Encoder firmware dead!\n"); itv->enc_mbox.mbox = NULL; } if (itv->enc_mbox.mbox == NULL) return -ENODEV; if (!itv->has_cx23415) return 0; itv->dec_mbox.mbox = ivtv_search_mailbox(itv->dec_mem, IVTV_DECODER_SIZE); if (itv->dec_mbox.mbox == NULL) { IVTV_ERR("Decoder mailbox not found\n"); } else if (itv->has_cx23415 && ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0)) { IVTV_ERR("Decoder firmware dead!\n"); itv->dec_mbox.mbox = NULL; } else { /* Firmware okay, so check yuv output filter table */ ivtv_yuv_filter_check(itv); } return itv->dec_mbox.mbox ? 0 : -ENODEV; } void ivtv_init_mpeg_decoder(struct ivtv *itv) { u32 data[CX2341X_MBOX_MAX_DATA]; long readbytes; volatile u8 __iomem *mem_offset; data[0] = 0; data[1] = itv->cxhdl.width; /* YUV source width */ data[2] = itv->cxhdl.height; data[3] = itv->cxhdl.audio_properties; /* Audio settings to use, bitmap. see docs. */ if (ivtv_api(itv, CX2341X_DEC_SET_DECODER_SOURCE, 4, data)) { IVTV_ERR("ivtv_init_mpeg_decoder failed to set decoder source\n"); return; } if (ivtv_vapi(itv, CX2341X_DEC_START_PLAYBACK, 2, 0, 1) != 0) { IVTV_ERR("ivtv_init_mpeg_decoder failed to start playback\n"); return; } ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data); mem_offset = itv->dec_mem + data[1]; if ((readbytes = load_fw_direct(IVTV_DECODE_INIT_MPEG_FILENAME, mem_offset, itv, IVTV_DECODE_INIT_MPEG_SIZE)) <= 0) { IVTV_DEBUG_WARN("failed to read mpeg decoder initialisation file %s\n", IVTV_DECODE_INIT_MPEG_FILENAME); } else { ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, readbytes, 0); ivtv_msleep_timeout(100, 0); } ivtv_vapi(itv, CX2341X_DEC_STOP_PLAYBACK, 4, 0, 0, 0, 1); } /* Try to restart the card & restore previous settings */ int ivtv_firmware_restart(struct ivtv *itv) { int rc = 0; v4l2_std_id std; if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) /* Display test image during restart */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_TEST_IMAGE, itv->card->video_outputs[itv->active_output].video_output, 0); mutex_lock(&itv->udma.lock); rc = ivtv_firmware_init(itv); if (rc) { mutex_unlock(&itv->udma.lock); return rc; } /* Allow settings to reload */ ivtv_mailbox_cache_invalidate(itv); /* Restore encoder video standard */ std = itv->std; itv->std = 0; ivtv_s_std_enc(itv, &std); if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { ivtv_init_mpeg_decoder(itv); /* Restore decoder video standard */ std = itv->std_out; itv->std_out = 0; ivtv_s_std_dec(itv, &std); /* Restore framebuffer if active */ if (itv->ivtvfb_restore) itv->ivtvfb_restore(itv); /* Restore alpha settings */ ivtv_set_osd_alpha(itv); /* Restore normal output */ ivtv_call_hw(itv, IVTV_HW_SAA7127, video, s_routing, SAA7127_INPUT_TYPE_NORMAL, itv->card->video_outputs[itv->active_output].video_output, 0); } mutex_unlock(&itv->udma.lock); return rc; } /* Check firmware running state. The checks fall through allowing multiple failures to be logged. */ int ivtv_firmware_check(struct ivtv *itv, char *where) { int res = 0; /* Check encoder is still running */ if (ivtv_vapi(itv, CX2341X_ENC_PING_FW, 0) < 0) { IVTV_WARN("Encoder has died : %s\n", where); res = -1; } /* Also check audio. Only check if not in use & encoder is okay */ if (!res && !atomic_read(&itv->capturing) && (!atomic_read(&itv->decoding) || (atomic_read(&itv->decoding) < 2 && test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)))) { if (ivtv_vapi(itv, CX2341X_ENC_MISC, 1, 12) < 0) { IVTV_WARN("Audio has died (Encoder OK) : %s\n", where); res = -2; } } if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { /* Second audio check. Skip if audio already failed */ if (res != -2 && read_dec(0x100) != read_dec(0x104)) { /* Wait & try again to be certain. */ ivtv_msleep_timeout(14, 0); if (read_dec(0x100) != read_dec(0x104)) { IVTV_WARN("Audio has died (Decoder) : %s\n", where); res = -1; } } /* Check decoder is still running */ if (ivtv_vapi(itv, CX2341X_DEC_PING_FW, 0) < 0) { IVTV_WARN("Decoder has died : %s\n", where); res = -1; } } /* If something failed & currently idle, try to reload */ if (res && !atomic_read(&itv->capturing) && !atomic_read(&itv->decoding)) { IVTV_INFO("Detected in %s that firmware had failed - " "Reloading\n", where); res = ivtv_firmware_restart(itv); /* * Even if restarted ok, still signal a problem had occurred. * The caller can come through this function again to check * if things are really ok after the restart. */ if (!res) { IVTV_INFO("Firmware restart okay\n"); res = -EAGAIN; } else { IVTV_INFO("Firmware restart failed\n"); } } else if (res) { res = -EIO; } return res; }
gpl-2.0
GustavoRD78/78Kernel-5.1.1-23.4.A.0.546
drivers/media/rc/keymaps/rc-snapstream-firefly.c
9511
2662
/* * SnapStream Firefly X10 RF remote keytable * * Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <linux/module.h> #include <media/rc-map.h> static struct rc_map_table snapstream_firefly[] = { { 0x2c, KEY_ZOOM }, /* Maximize */ { 0x02, KEY_CLOSE }, { 0x0d, KEY_1 }, { 0x0e, KEY_2 }, { 0x0f, KEY_3 }, { 0x10, KEY_4 }, { 0x11, KEY_5 }, { 0x12, KEY_6 }, { 0x13, KEY_7 }, { 0x14, KEY_8 }, { 0x15, KEY_9 }, { 0x17, KEY_0 }, { 0x16, KEY_BACK }, { 0x18, KEY_KPENTER }, /* ent */ { 0x09, KEY_VOLUMEUP }, { 0x08, KEY_VOLUMEDOWN }, { 0x0a, KEY_MUTE }, { 0x0b, KEY_CHANNELUP }, { 0x0c, KEY_CHANNELDOWN }, { 0x00, KEY_VENDOR }, /* firefly */ { 0x2e, KEY_INFO }, { 0x2f, KEY_OPTION }, { 0x1d, KEY_LEFT }, { 0x1f, KEY_RIGHT }, { 0x22, KEY_DOWN }, { 0x1a, KEY_UP }, { 0x1e, KEY_OK }, { 0x1c, KEY_MENU }, { 0x20, KEY_EXIT }, { 0x27, KEY_RECORD }, { 0x25, KEY_PLAY }, { 0x28, KEY_STOP }, { 0x24, KEY_REWIND }, { 0x26, KEY_FORWARD }, { 0x29, KEY_PAUSE }, { 0x2b, KEY_PREVIOUS }, { 0x2a, KEY_NEXT }, { 0x06, KEY_AUDIO }, /* Music */ { 0x05, KEY_IMAGES }, /* Photos */ { 0x04, KEY_DVD }, { 0x03, KEY_TV }, { 0x07, KEY_VIDEO }, { 0x01, KEY_HELP }, { 0x2d, KEY_MODE }, /* Mouse */ { 0x19, KEY_A }, { 0x1b, KEY_B }, { 0x21, KEY_C }, { 0x23, KEY_D }, }; static struct rc_map_list snapstream_firefly_map = { .map = { .scan = snapstream_firefly, .size = ARRAY_SIZE(snapstream_firefly), .rc_type = RC_TYPE_OTHER, .name = RC_MAP_SNAPSTREAM_FIREFLY, } }; static int __init init_rc_map_snapstream_firefly(void) { return rc_map_register(&snapstream_firefly_map); } static void __exit exit_rc_map_snapstream_firefly(void) { rc_map_unregister(&snapstream_firefly_map); } module_init(init_rc_map_snapstream_firefly) module_exit(exit_rc_map_snapstream_firefly) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
gpl-2.0
TheTypoMaster/android_kernel_htc_a31ul
drivers/isdn/hisax/st5481_b.c
9511
9957
/* * Driver for ST5481 USB ISDN modem * * Author Frode Isaksen * Copyright 2001 by Frode Isaksen <fisaksen@bewan.com> * 2001 by Kai Germaschewski <kai.germaschewski@gmx.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include <linux/gfp.h> #include <linux/usb.h> #include <linux/netdevice.h> #include <linux/bitrev.h> #include "st5481.h" static inline void B_L1L2(struct st5481_bcs *bcs, int pr, void *arg) { struct hisax_if *ifc = (struct hisax_if *) &bcs->b_if; ifc->l1l2(ifc, pr, arg); } /* * Encode and transmit next frame. */ static void usb_b_out(struct st5481_bcs *bcs, int buf_nr) { struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; struct urb *urb; unsigned int packet_size, offset; int len, buf_size, bytes_sent; int i; struct sk_buff *skb; if (test_and_set_bit(buf_nr, &b_out->busy)) { DBG(4, "ep %d urb %d busy", (bcs->channel + 1) * 2, buf_nr); return; } urb = b_out->urb[buf_nr]; // Adjust isoc buffer size according to flow state if (b_out->flow_event & (OUT_DOWN | OUT_UNDERRUN)) { buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST; packet_size = SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST; DBG(4, "B%d,adjust flow,add %d bytes", bcs->channel + 1, B_FLOW_ADJUST); } else if (b_out->flow_event & OUT_UP) { buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST; packet_size = SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST; DBG(4, "B%d,adjust flow,remove %d bytes", bcs->channel + 1, B_FLOW_ADJUST); } else { buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT; packet_size = 8; } b_out->flow_event = 0; len = 0; while (len < buf_size) { if ((skb = b_out->tx_skb)) { DBG_SKB(0x100, skb); DBG(4, "B%d,len=%d", bcs->channel + 1, skb->len); if (bcs->mode == L1_MODE_TRANS) { bytes_sent = buf_size - len; if (skb->len < bytes_sent) bytes_sent = skb->len; { /* swap tx bytes to get hearable audio data */ register unsigned char *src = skb->data; register unsigned char *dest = urb->transfer_buffer + len; register unsigned int count; for (count = 0; count < bytes_sent; count++) *dest++ = bitrev8(*src++); } len += bytes_sent; } else { len += isdnhdlc_encode(&b_out->hdlc_state, skb->data, skb->len, &bytes_sent, urb->transfer_buffer + len, buf_size-len); } skb_pull(skb, bytes_sent); if (!skb->len) { // Frame sent b_out->tx_skb = NULL; B_L1L2(bcs, PH_DATA | CONFIRM, (void *)(unsigned long) skb->truesize); dev_kfree_skb_any(skb); /* if (!(bcs->tx_skb = skb_dequeue(&bcs->sq))) { */ /* st5481B_sched_event(bcs, B_XMTBUFREADY); */ /* } */ } } else { if (bcs->mode == L1_MODE_TRANS) { memset(urb->transfer_buffer + len, 0xff, buf_size-len); len = buf_size; } else { // Send flags len += isdnhdlc_encode(&b_out->hdlc_state, NULL, 0, &bytes_sent, urb->transfer_buffer + len, buf_size-len); } } } // Prepare the URB for (i = 0, offset = 0; offset < len; i++) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = packet_size; offset += packet_size; packet_size = SIZE_ISO_PACKETS_B_OUT; } urb->transfer_buffer_length = len; urb->number_of_packets = i; urb->dev = adapter->usb_dev; DBG_ISO_PACKET(0x200, urb); SUBMIT_URB(urb, GFP_NOIO); } /* * Start transferring (flags or data) on the B channel, since * FIFO counters has been set to a non-zero value. */ static void st5481B_start_xfer(void *context) { struct st5481_bcs *bcs = context; DBG(4, "B%d", bcs->channel + 1); // Start transmitting (flags or data) on B channel usb_b_out(bcs, 0); usb_b_out(bcs, 1); } /* * If the adapter has only 2 LEDs, the green * LED will blink with a rate depending * on the number of channels opened. */ static void led_blink(struct st5481_adapter *adapter) { u_char leds = adapter->leds; // 50 frames/sec for each channel if (++adapter->led_counter % 50) { return; } if (adapter->led_counter % 100) { leds |= GREEN_LED; } else { leds &= ~GREEN_LED; } st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, leds, NULL, NULL); } static void usb_b_out_complete(struct urb *urb) { struct st5481_bcs *bcs = urb->context; struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; int buf_nr; buf_nr = get_buf_nr(b_out->urb, urb); test_and_clear_bit(buf_nr, &b_out->busy); if (unlikely(urb->status < 0)) { switch (urb->status) { case -ENOENT: case -ESHUTDOWN: case -ECONNRESET: DBG(4, "urb killed status %d", urb->status); return; // Give up default: WARNING("urb status %d", urb->status); if (b_out->busy == 0) { st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2 | USB_DIR_OUT, NULL, NULL); } break; } } usb_b_out(bcs, buf_nr); if (adapter->number_of_leds == 2) led_blink(adapter); } /* * Start or stop the transfer on the B channel. */ static void st5481B_mode(struct st5481_bcs *bcs, int mode) { struct st5481_b_out *b_out = &bcs->b_out; struct st5481_adapter *adapter = bcs->adapter; DBG(4, "B%d,mode=%d", bcs->channel + 1, mode); if (bcs->mode == mode) return; bcs->mode = mode; // Cancel all USB transfers on this B channel usb_unlink_urb(b_out->urb[0]); usb_unlink_urb(b_out->urb[1]); b_out->busy = 0; st5481_in_mode(&bcs->b_in, mode); if (bcs->mode != L1_MODE_NULL) { // Open the B channel if (bcs->mode != L1_MODE_TRANS) { u32 features = HDLC_BITREVERSE; if (bcs->mode == L1_MODE_HDLC_56K) features |= HDLC_56KBIT; isdnhdlc_out_init(&b_out->hdlc_state, features); } st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2, NULL, NULL); // Enable B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1 + (bcs->channel * 2), OUT_UP + OUT_DOWN + OUT_UNDERRUN, NULL, NULL); // Enable B channel FIFOs st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 32, st5481B_start_xfer, bcs); if (adapter->number_of_leds == 4) { if (bcs->channel == 0) { adapter->leds |= B1_LED; } else { adapter->leds |= B2_LED; } } } else { // Disble B channel interrupts st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL); // Disable B channel FIFOs st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 0, NULL, NULL); if (adapter->number_of_leds == 4) { if (bcs->channel == 0) { adapter->leds &= ~B1_LED; } else { adapter->leds &= ~B2_LED; } } else { st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL); } if (b_out->tx_skb) { dev_kfree_skb_any(b_out->tx_skb); b_out->tx_skb = NULL; } } } static int st5481_setup_b_out(struct st5481_bcs *bcs) { struct usb_device *dev = bcs->adapter->usb_dev; struct usb_interface *intf; struct usb_host_interface *altsetting = NULL; struct usb_host_endpoint *endpoint; struct st5481_b_out *b_out = &bcs->b_out; DBG(4, ""); intf = usb_ifnum_to_if(dev, 0); if (intf) altsetting = usb_altnum_to_altsetting(intf, 3); if (!altsetting) return -ENXIO; // Allocate URBs and buffers for the B channel out endpoint = &altsetting->endpoint[EP_B1_OUT - 1 + bcs->channel * 2]; DBG(4, "endpoint address=%02x,packet size=%d", endpoint->desc.bEndpointAddress, le16_to_cpu(endpoint->desc.wMaxPacketSize)); // Allocate memory for 8000bytes/sec + extra bytes if underrun return st5481_setup_isocpipes(b_out->urb, dev, usb_sndisocpipe(dev, endpoint->desc.bEndpointAddress), NUM_ISO_PACKETS_B, SIZE_ISO_PACKETS_B_OUT, NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST, usb_b_out_complete, bcs); } static void st5481_release_b_out(struct st5481_bcs *bcs) { struct st5481_b_out *b_out = &bcs->b_out; DBG(4, ""); st5481_release_isocpipes(b_out->urb); } int st5481_setup_b(struct st5481_bcs *bcs) { int retval; DBG(4, ""); retval = st5481_setup_b_out(bcs); if (retval) goto err; bcs->b_in.bufsize = HSCX_BUFMAX; bcs->b_in.num_packets = NUM_ISO_PACKETS_B; bcs->b_in.packet_size = SIZE_ISO_PACKETS_B_IN; bcs->b_in.ep = (bcs->channel ? EP_B2_IN : EP_B1_IN) | USB_DIR_IN; bcs->b_in.counter = bcs->channel ? IN_B2_COUNTER : IN_B1_COUNTER; bcs->b_in.adapter = bcs->adapter; bcs->b_in.hisax_if = &bcs->b_if.ifc; retval = st5481_setup_in(&bcs->b_in); if (retval) goto err_b_out; return 0; err_b_out: st5481_release_b_out(bcs); err: return retval; } /* * Release buffers and URBs for the B channels */ void st5481_release_b(struct st5481_bcs *bcs) { DBG(4, ""); st5481_release_in(&bcs->b_in); st5481_release_b_out(bcs); } /* * st5481_b_l2l1 is the entry point for upper layer routines that want to * transmit on the B channel. PH_DATA | REQUEST is a normal packet that * we either start transmitting (if idle) or queue (if busy). * PH_PULL | REQUEST can be called to request a callback message * (PH_PULL | CONFIRM) * once the link is idle. After a "pull" callback, the upper layer * routines can use PH_PULL | INDICATION to send data. */ void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg) { struct st5481_bcs *bcs = ifc->priv; struct sk_buff *skb = arg; long mode; DBG(4, ""); switch (pr) { case PH_DATA | REQUEST: BUG_ON(bcs->b_out.tx_skb); bcs->b_out.tx_skb = skb; break; case PH_ACTIVATE | REQUEST: mode = (long) arg; DBG(4, "B%d,PH_ACTIVATE_REQUEST %ld", bcs->channel + 1, mode); st5481B_mode(bcs, mode); B_L1L2(bcs, PH_ACTIVATE | INDICATION, NULL); break; case PH_DEACTIVATE | REQUEST: DBG(4, "B%d,PH_DEACTIVATE_REQUEST", bcs->channel + 1); st5481B_mode(bcs, L1_MODE_NULL); B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL); break; default: WARNING("pr %#x\n", pr); } }
gpl-2.0
uplusplus/ls300_smdkc110
drivers/net/chelsio/mv88x201x.c
12327
8786
/***************************************************************************** * * * File: mv88x201x.c * * $Revision: 1.12 $ * * $Date: 2005/04/15 19:27:14 $ * * Description: * * Marvell PHY (mv88x201x) functionality. * * part of the Chelsio 10Gb Ethernet Driver. * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License, version 2, as * * published by the Free Software Foundation. * * * * You should have received a copy of the GNU General Public License along * * with this program; if not, write to the Free Software Foundation, Inc., * * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED * * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF * * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: maintainers@chelsio.com * * * * Authors: Dimitrios Michailidis <dm@chelsio.com> * * Tina Yang <tainay@chelsio.com> * * Felix Marti <felix@chelsio.com> * * Scott Bardone <sbardone@chelsio.com> * * Kurt Ottaway <kottaway@chelsio.com> * * Frank DiMambro <frank@chelsio.com> * * * * History: * * * ****************************************************************************/ #include "cphy.h" #include "elmer0.h" /* * The 88x2010 Rev C. requires some link status registers * to be read * twice in order to get the right values. Future * revisions will fix * this problem and then this macro * can disappear. */ #define MV88x2010_LINK_STATUS_BUGS 1 static int led_init(struct cphy *cphy) { /* Setup the LED registers so we can turn on/off. * Writing these bits maps control to another * register. mmd(0x1) addr(0x7) */ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd); return 0; } static int led_link(struct cphy *cphy, u32 do_enable) { u32 led = 0; #define LINK_ENABLE_BIT 0x1 cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led); if (do_enable & LINK_ENABLE_BIT) { led |= LINK_ENABLE_BIT; cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); } else { led &= ~LINK_ENABLE_BIT; cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); } return 0; } /* Port Reset */ static int mv88x201x_reset(struct cphy *cphy, int wait) { /* This can be done through registers. It is not required since * a full chip reset is used. */ return 0; } static int mv88x201x_interrupt_enable(struct cphy *cphy) { /* Enable PHY LASI interrupts. */ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, MDIO_PMA_LASI_LSALARM); /* Enable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer |= ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88x201x_interrupt_disable(struct cphy *cphy) { /* Disable PHY LASI interrupts. */ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0); /* Disable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer &= ~ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88x201x_interrupt_clear(struct cphy *cphy) { u32 elmer; u32 val; #ifdef MV88x2010_LINK_STATUS_BUGS /* Required to read twice before clear takes affect. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); /* Read this register after the others above it else * the register doesn't clear correctly. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); #endif /* Clear link status. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); /* Clear PHY LASI interrupts. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); #ifdef MV88x2010_LINK_STATUS_BUGS /* Do it again. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); #endif /* Clear Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); elmer |= ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); } return 0; } static int mv88x201x_interrupt_handler(struct cphy *cphy) { /* Clear interrupts */ mv88x201x_interrupt_clear(cphy); /* We have only enabled link change interrupts and so * cphy_cause must be a link change interrupt. */ return cphy_cause_link_change; } static int mv88x201x_set_loopback(struct cphy *cphy, int on) { return 0; } static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, int *speed, int *duplex, int *fc) { u32 val = 0; if (link_ok) { /* Read link status. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); val &= MDIO_STAT1_LSTATUS; *link_ok = (val == MDIO_STAT1_LSTATUS); /* Turn on/off Link LED */ led_link(cphy, *link_ok); } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; if (fc) *fc = PAUSE_RX | PAUSE_TX; return 0; } static void mv88x201x_destroy(struct cphy *cphy) { kfree(cphy); } static struct cphy_ops mv88x201x_ops = { .destroy = mv88x201x_destroy, .reset = mv88x201x_reset, .interrupt_enable = mv88x201x_interrupt_enable, .interrupt_disable = mv88x201x_interrupt_disable, .interrupt_clear = mv88x201x_interrupt_clear, .interrupt_handler = mv88x201x_interrupt_handler, .get_link_status = mv88x201x_get_link_status, .set_loopback = mv88x201x_set_loopback, .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS | MDIO_DEVS_WIS), }; static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr, const struct mdio_ops *mdio_ops) { u32 val; struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); if (!cphy) return NULL; cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops); /* Commands the PHY to enable XFP's clock. */ cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val); cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1); /* Clear link status. Required because of a bug in the PHY. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val); cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val); /* Allows for Link,Ack LED turn on/off */ led_init(cphy); return cphy; } /* Chip Reset */ static int mv88x201x_phy_reset(adapter_t *adapter) { u32 val; t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~4; t1_tpi_write(adapter, A_ELMER0_GPO, val); msleep(100); t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); msleep(1000); /* Now lets enable the Laser. Delay 100us */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= 0x8000; t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(100); return 0; } const struct gphy t1_mv88x201x_ops = { .create = mv88x201x_phy_create, .reset = mv88x201x_phy_reset };
gpl-2.0
moonlightly/android_kernel_huawei_msm7x27a
drivers/media/dvb/b2c2/flexcop-dma.c
14375
4316
/* * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III * flexcop-dma.c - configuring and controlling the DMA of the FlexCop * see flexcop.c for copyright information */ #include "flexcop.h" int flexcop_dma_allocate(struct pci_dev *pdev, struct flexcop_dma *dma, u32 size) { u8 *tcpu; dma_addr_t tdma = 0; if (size % 2) { err("dma buffersize has to be even."); return -EINVAL; } if ((tcpu = pci_alloc_consistent(pdev, size, &tdma)) != NULL) { dma->pdev = pdev; dma->cpu_addr0 = tcpu; dma->dma_addr0 = tdma; dma->cpu_addr1 = tcpu + size/2; dma->dma_addr1 = tdma + size/2; dma->size = size/2; return 0; } return -ENOMEM; } EXPORT_SYMBOL(flexcop_dma_allocate); void flexcop_dma_free(struct flexcop_dma *dma) { pci_free_consistent(dma->pdev, dma->size*2, dma->cpu_addr0, dma->dma_addr0); memset(dma,0,sizeof(struct flexcop_dma)); } EXPORT_SYMBOL(flexcop_dma_free); int flexcop_dma_config(struct flexcop_device *fc, struct flexcop_dma *dma, flexcop_dma_index_t dma_idx) { flexcop_ibi_value v0x0,v0x4,v0xc; v0x0.raw = v0x4.raw = v0xc.raw = 0; v0x0.dma_0x0.dma_address0 = dma->dma_addr0 >> 2; v0xc.dma_0xc.dma_address1 = dma->dma_addr1 >> 2; v0x4.dma_0x4_write.dma_addr_size = dma->size / 4; if ((dma_idx & FC_DMA_1) == dma_idx) { fc->write_ibi_reg(fc,dma1_000,v0x0); fc->write_ibi_reg(fc,dma1_004,v0x4); fc->write_ibi_reg(fc,dma1_00c,v0xc); } else if ((dma_idx & FC_DMA_2) == dma_idx) { fc->write_ibi_reg(fc,dma2_010,v0x0); fc->write_ibi_reg(fc,dma2_014,v0x4); fc->write_ibi_reg(fc,dma2_01c,v0xc); } else { err("either DMA1 or DMA2 can be configured within one " "flexcop_dma_config call."); return -EINVAL; } return 0; } EXPORT_SYMBOL(flexcop_dma_config); /* start the DMA transfers, but not the DMA IRQs */ int flexcop_dma_xfer_control(struct flexcop_device *fc, flexcop_dma_index_t dma_idx, flexcop_dma_addr_index_t index, int onoff) { flexcop_ibi_value v0x0,v0xc; flexcop_ibi_register r0x0,r0xc; if ((dma_idx & FC_DMA_1) == dma_idx) { r0x0 = dma1_000; r0xc = dma1_00c; } else if ((dma_idx & FC_DMA_2) == dma_idx) { r0x0 = dma2_010; r0xc = dma2_01c; } else { err("either transfer DMA1 or DMA2 can be started within one " "flexcop_dma_xfer_control call."); return -EINVAL; } v0x0 = fc->read_ibi_reg(fc,r0x0); v0xc = fc->read_ibi_reg(fc,r0xc); deb_rdump("reg: %03x: %x\n",r0x0,v0x0.raw); deb_rdump("reg: %03x: %x\n",r0xc,v0xc.raw); if (index & FC_DMA_SUBADDR_0) v0x0.dma_0x0.dma_0start = onoff; if (index & FC_DMA_SUBADDR_1) v0xc.dma_0xc.dma_1start = onoff; fc->write_ibi_reg(fc,r0x0,v0x0); fc->write_ibi_reg(fc,r0xc,v0xc); deb_rdump("reg: %03x: %x\n",r0x0,v0x0.raw); deb_rdump("reg: %03x: %x\n",r0xc,v0xc.raw); return 0; } EXPORT_SYMBOL(flexcop_dma_xfer_control); static int flexcop_dma_remap(struct flexcop_device *fc, flexcop_dma_index_t dma_idx, int onoff) { flexcop_ibi_register r = (dma_idx & FC_DMA_1) ? dma1_00c : dma2_01c; flexcop_ibi_value v = fc->read_ibi_reg(fc,r); deb_info("%s\n",__func__); v.dma_0xc.remap_enable = onoff; fc->write_ibi_reg(fc,r,v); return 0; } int flexcop_dma_control_size_irq(struct flexcop_device *fc, flexcop_dma_index_t no, int onoff) { flexcop_ibi_value v = fc->read_ibi_reg(fc,ctrl_208); if (no & FC_DMA_1) v.ctrl_208.DMA1_IRQ_Enable_sig = onoff; if (no & FC_DMA_2) v.ctrl_208.DMA2_IRQ_Enable_sig = onoff; fc->write_ibi_reg(fc,ctrl_208,v); return 0; } EXPORT_SYMBOL(flexcop_dma_control_size_irq); int flexcop_dma_control_timer_irq(struct flexcop_device *fc, flexcop_dma_index_t no, int onoff) { flexcop_ibi_value v = fc->read_ibi_reg(fc,ctrl_208); if (no & FC_DMA_1) v.ctrl_208.DMA1_Timer_Enable_sig = onoff; if (no & FC_DMA_2) v.ctrl_208.DMA2_Timer_Enable_sig = onoff; fc->write_ibi_reg(fc,ctrl_208,v); return 0; } EXPORT_SYMBOL(flexcop_dma_control_timer_irq); /* 1 cycles = 1.97 msec */ int flexcop_dma_config_timer(struct flexcop_device *fc, flexcop_dma_index_t dma_idx, u8 cycles) { flexcop_ibi_register r = (dma_idx & FC_DMA_1) ? dma1_004 : dma2_014; flexcop_ibi_value v = fc->read_ibi_reg(fc,r); flexcop_dma_remap(fc,dma_idx,0); deb_info("%s\n",__func__); v.dma_0x4_write.dmatimer = cycles; fc->write_ibi_reg(fc,r,v); return 0; } EXPORT_SYMBOL(flexcop_dma_config_timer);
gpl-2.0
tibere86/CM12-M8
drivers/media/platform/msm/camera_v2/yushanII/ilp0100_customer_platform.c
40
12774
/******************************************************************************* ################################################################################ # (C) STMicroelectronics 2012 # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 and only version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # #------------------------------------------------------------------------------ # Imaging Division ################################################################################ ********************************************************************************/ /*! * \file ilp0100_customer_platform.c * \brief definition of platform specific functions * \author sheena jain */ #include "ilp0100_customer_platform.h" #include <linux/errno.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <asm/segment.h> #include <asm/uaccess.h> #include <linux/buffer_head.h> #include "file_operation.h" #include <linux/firmware.h> #ifdef LINUX_TEST char gbuf[1000]; #endif #define READ_FIRMWARE_COUNT 3 static int fw_count = 0; static const struct firmware *fw[READ_FIRMWARE_COUNT]; //! \defgroup Platform_Functions /**************************************************************/ /* Platform Functions */ /**************************************************************/ /*! * \fn ilp0100_error Ilp0100_readFirmware(Ilp0100_structInitFirmware *InitFirmwareData) * \brief Ilp0100 Read firmware function. * \ingroup Platform_Functions * \param[out] InitFirmware * \retval ILP0100_ERROR_NONE : Success * \retval "Other Error Code" : Failure */ ilp0100_error Ilp0100_readFirmware(struct msm_sensor_ctrl_t *s_ctrl, Ilp0100_structInitFirmware *InitFirmwareData) { ilp0100_error Ret=ILP0100_ERROR_NONE; #ifdef ST_SPECIFIC uint8_t *pFileName[3]; #else uint8_t *pFileName[READ_FIRMWARE_COUNT]; #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_START((void*)&InitFirmwareData); #ifdef ST_SPECIFIC //! Fimware Binary to be read //pFileName[0]="/prj/imgsw/users/sheenaj/SB_1308/CVerifEnv/Ztrans/Csource/api/Ilp0100_Basic_IPM_Code_out.bin"; pFileName[0]=".\\ILP0100_IPM_Code_out.bin"; //! Copy the contents of Firmware Binary in a Buffer Ret=Ilp0100_readFileInBuffer(pFileName[0],&InitFirmwareData->pIlp0100Firmware, &InitFirmwareData->Ilp0100FirmwareSize ); if(Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("\n Error in Reading Firmware code Binary"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } //! Calib Data Binary to be read pFileName[1]=".\\ILP0100_IPM_Data_out.bin"; //! Copy the contents of Calib Data Binary in a Buffer Ret=Ilp0100_readFileInBuffer(pFileName[1],&InitFirmwareData->pIlp0100SensorGenericCalibData, &InitFirmwareData->Ilp0100SensorGenericCalibDataSize ); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("\n Error in Reading Calib Data"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } //! For part to part Calibration Data /*InitFirmwareData->pIlp0100SensorRawPart2PartCalibData =0x0; InitFirmwareData->Ilp0100SensorRawPart2PartCalibDataSize= 0; */ //! Part to Part Calib Data Binary to be read pFileName[2]=".\\ILP0100_lscbuffer_out.bin"; //! Copy the contents of Part to PArt Calib Data Binary in a Buffer Ret=Ilp0100_readFileInBuffer(pFileName[2],&InitFirmwareData->pIlp0100SensorRawPart2PartCalibData, &InitFirmwareData->Ilp0100SensorRawPart2PartCalibDataSize ); if(Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("\n Error in Reading Part to Part Calib Data Binary"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } #else /*hTC*/ /*Fimware Binary to be read*/ pFileName[0]="ILP0100_IPM_Code_out.bin"; /*Copy the contents of Firmware Binary in a Buffer*/ Ret=Ilp0100_readFileInBuffer(s_ctrl, pFileName[0], &InitFirmwareData->pIlp0100Firmware, &(InitFirmwareData->Ilp0100FirmwareSize)); if(Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("\n Error in Reading Firmware code Binary"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } /*Calib Data Binary to be read*/ pFileName[1]="ILP0100_IPM_Data_out.bin"; /*Copy the contents of Calib Data Binary in a Buffer*/ Ret=Ilp0100_readFileInBuffer(s_ctrl, pFileName[1], &InitFirmwareData->pIlp0100SensorGenericCalibData, &(InitFirmwareData->Ilp0100SensorGenericCalibDataSize)); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("\n Error in Reading Calib Data"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } pFileName[2]="lscbuffer_rev2.bin"; //! Copy the contents of Part to PArt Calib Data Binary in a Buffer Ret=Ilp0100_readFileInBuffer(s_ctrl, pFileName[2],&InitFirmwareData->pIlp0100SensorRawPart2PartCalibData, &InitFirmwareData->Ilp0100SensorRawPart2PartCalibDataSize ); if(Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("\n Error in Reading Part to Part Calib Data Binary"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_END(Ret); return Ret; } void Ilp0100_store_firmwarwe(const struct firmware *fw_yushanII){ fw[fw_count]=fw_yushanII; fw_count ++; return; } void Ilp0100_release_firmware(void){ int i; for(i = 0; i< fw_count;i++){ release_firmware(fw[i]); } fw_count = 0; return; } /*! * \fn ilp0100_error Ilp0100_readFileInBuffer(uint8_t *pFileName,uint8_t **pAdd) * \brief Ilp0100 Read File In buffer function. * \ingroup Platform_Functions * \param[in] pFileName : Pointer to the filename * \param[out] pAdd : Address of the Buffer containing File Data * \retval ILP0100_ERROR_NONE : Success * \retval "Other Error Code" : Failure */ ilp0100_error Ilp0100_readFileInBuffer(struct msm_sensor_ctrl_t *s_ctrl, uint8_t *pFileName,uint8_t **pAdd, uint32_t* SizeOfBuffer) { ilp0100_error Ret=ILP0100_ERROR_NONE; #ifdef ST_SPECIFIC FILE *pFile; uint8_t *pBuffer; uint32_t Result; uint32_t Size; #else const struct firmware *fw_rawchip2 = NULL; int rc = 0; unsigned char *rawchipII_data_fw; u32 rawchipII_fw_size; #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_START((void*)&pFileName, (void*)&pAdd); #ifdef ST_SPECIFIC //! Open the binary File pFile = fopen ( pFileName , "rb" ); //ILP0100_DEBUG_LOG("FileName %s",pFileName); if (pFile==NULL) { ILP0100_ERROR_LOG("\nFile error "); ILP0100_LOG_FUNCTION_END(Ret); return ILP0100_ERROR; } //! Obtain file size: fseek (pFile , 0 , SEEK_END); Size = ftell (pFile); rewind (pFile); //! allocate buffer of file size pBuffer= (uint8_t*) malloc (sizeof(uint8_t)*Size); if (pBuffer == NULL) { ILP0100_ERROR_LOG ("\n Memory allocation failed"); ILP0100_LOG_FUNCTION_END(Ret); return ILP0100_ERROR; } //! Load the file in Buffer Result = fread (pBuffer,1,Size,pFile); if (Result != Size) { Ret=ILP0100_ERROR; ILP0100_ERROR_LOG("\n File Reading to buffer error"); ILP0100_ERROR_LOG("\n Result read=%d", Result); free(pBuffer); //! Free the allocated buffer in case of error ILP0100_LOG_FUNCTION_END(Ret); return Ret; } *pAdd= (uint8_t*)pBuffer; *SizeOfBuffer=Size; fclose(pFile); #else/*hTC*//*TODO:use request firmware*/ //HTC_CAM chuck add the mechanism of request FW. rc = request_firmware(&fw_rawchip2, pFileName, &(s_ctrl->pdev->dev)); if (rc!=0) { pr_info("request_firmware for error %d\n", rc); Ret = ILP0100_ERROR; return Ret; } Ilp0100_store_firmwarwe(fw_rawchip2); rawchipII_data_fw = (unsigned char *)fw_rawchip2->data; rawchipII_fw_size = (u32) fw_rawchip2->size; *pAdd = (uint8_t*)rawchipII_data_fw; *SizeOfBuffer = rawchipII_fw_size; //HTC_CAM_END #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_END(Ret); return Ret; } /*! * \fn ilp0100_error Ilp0100_interruptHandler() * \brief Ilp0100 function to handle Interrupts, Checks Status Register on both the Pins. * Calls the Interrupt manager to service the Interrupt. * \ingroup Platform_Functions * \retval ILP0100_ERROR_NONE : Success * \retval "Other Error Code" : Failure */ ilp0100_error Ilp0100_interruptHandler() { ilp0100_error Ret= ILP0100_ERROR_NONE; #ifdef ST_SPECIFIC bool_t Pin=INTR_PIN_0; uint32_t InterruptReadStatusPin0, InterruptReadStatusPin1; #else /* Should contain customer's variables */ #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_START(NULL); #ifdef ST_SPECIFIC //! Read the Interrupt Status Register on Pin0 Pin=INTR_PIN_0; Ret= Ilp0100_interruptReadStatus(&InterruptReadStatusPin0, Pin); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("Ilp0100_core_interruptReadStatus failed"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } //! If any Interrupt is there on Pin 0. Call Interrupt manger with Pin as Pin0. if(InterruptReadStatusPin0) { Ret= Ilp0100_interruptManager(InterruptReadStatusPin0, Pin); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("Ilp0100_interruptManagerPin0 failed"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } } Pin=INTR_PIN_1; //! Read the Interrupt Status Register on Pin1 Ret= Ilp0100_interruptReadStatus(&InterruptReadStatusPin1, Pin); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("Ilp0100_core_interruptReadStatus failed"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } //! If any Interrupt is there on Pin 1. Call Interrupt manger with Pin as Pin1. if(InterruptReadStatusPin1) { Ret= Ilp0100_interruptManager(InterruptReadStatusPin1, Pin); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("Ilp0100_interruptManagerPin1 failed"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } } #else /* Should contain customer's implementation */ /* Guidelines: */ /* Read the interrupt status and call the interrupt manager for each identified interrupt */ /* in order of priority */ #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_END(Ret); return Ret; } /*! * \fn ilp0100_error Ilp0100_interruptManager(uint32_t InterruptId, bool_t Pin) * \brief Ilp0100 function manage Interrupts. It calls the ISR as per the Interrupt Id. * Once the Interrupt is served, it calls Core function interruptClearStatus to clear * the status. * \ingroup Platform_Functions * \param[in] InterruptId : Id of the Interrupt * \param[in] Pin : Pin Value of GPIO Pin * \retval ILP0100_ERROR_NONE : Success * \retval "Other Error Code" : Failure */ ilp0100_error Ilp0100_interruptManager(uint32_t InterruptId, bool_t Pin) { ilp0100_error Ret=ILP0100_ERROR_NONE; ILP0100_LOG_FUNCTION_START((void*)&InterruptId, (void*)&Pin); #ifdef ST_SPECIFIC if(Pin==INTR_PIN_0) { for(i=0;i<32;i++) { //Switch Case for interrupt Id on Pin0 switch (InterruptId&(0x01<<i)) { case INTR_LONGEXP_GLACE_STATS_READY: break; case INTR_LONGEXP_HISTOGRAM_STATS_READY: break; case INTR_SHORTEXP_GLACE_STATS_READY: break; case INTR_SHORTEXP_HISTOGRAM_STATS_READY: break; default: break; } } } else { //Switch Case for interrupt Id on Pin1 } //! Clear the Interrupt Status Ret= Ilp0100_interruptClearStatus(InterruptId, Pin); if (Ret!=ILP0100_ERROR_NONE) { ILP0100_ERROR_LOG("Ilp0100_core_interruptClearStatus failed"); ILP0100_LOG_FUNCTION_END(Ret); return Ret; } #else /* Should contain customer's implementation */ /* Guidelines: */ /* Serve InterruptId interrupt */ #endif /*ST_SPECIFIC*/ ILP0100_LOG_FUNCTION_END(Ret); return Ret; } ilp0100_error Ilp0100_GetTimeStamp(uint32_t *pTimeStamp) { ilp0100_error Ret=ILP0100_ERROR_NONE; #ifdef ST_SPECIFIC *pTimeStamp=0x00; #else /* Should contain customer's implementation */ /* Guidelines: */ /* Get platform's time and return it */ #endif /*ST_SPECIFIC*/ return Ret; } #if 0 ilp0100_error Ilp0100_DumpLogInFile(uint8_t *pFileName) { ilp0100_error Ret=ILP0100_ERROR_NONE; uint8_t *pBuffer; FILE *pFile; uint32_t BufferSize; Ret= Ilp0100_loggingGetSize(&BufferSize); if(Ret==ILP0100_ERROR_NONE){ pBuffer = kmalloc(size_t size, gfp_t flags)(sizeof(uint8_t)*BufferSize); Ret = Ilp0100_loggingReadBack(pBuffer, &BufferSize); } /* Dump buffer in file */ /* dump_in_file is customer defined function to dump in ilp0100_log.bin file content of pBuffer */ if(Ret==ILP0100_ERROR_NONE){ pFile = fopen ( pFileName , "wb" ); fwrite(pBuffer, sizeof(uint8_t), BufferSize, pFile); fclose(pFile); } return Ret; } #endif
gpl-2.0
jaredjones/TrinityCore
src/server/ipc/Commands.cpp
40
2189
/* * Copyright (C) 2008-2015 TrinityCore <http://www.trinitycore.org/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "Commands.h" #include <zmqpp/message.hpp> zmqpp::message& operator>>(zmqpp::message& msg, IPCHeader& header) { msg >> header.Channel; msg >> header.Command; return msg; } zmqpp::message& operator>>(zmqpp::message& msg, Battlenet::RealmHandle& realm) { msg >> realm.Region; msg >> realm.Battlegroup; msg >> realm.Index; return msg; } zmqpp::message& operator>>(zmqpp::message& msg, Battlenet::Header& header) { msg >> header.Ipc; msg >> header.Realm; return msg; } zmqpp::message& operator>>(zmqpp::message& msg, Battlenet::ToonHandle& toonHandle) { msg >> toonHandle.AccountId; msg >> toonHandle.GameAccountId; msg >> toonHandle.Guid; msg >> toonHandle.Name; return msg; } zmqpp::message& operator<<(zmqpp::message& msg, IPCHeader const& header) { msg << header.Channel; msg << header.Command; return msg; } zmqpp::message& operator<<(zmqpp::message& msg, Battlenet::RealmHandle const& realm) { msg << realm.Region; msg << realm.Battlegroup; msg << realm.Index; return msg; } zmqpp::message& operator<<(zmqpp::message& msg, Battlenet::Header const& header) { msg << header.Ipc; msg << header.Realm; return msg; } zmqpp::message& operator<<(zmqpp::message& msg, Battlenet::ToonHandle const& toonHandle) { msg << toonHandle.AccountId; msg << toonHandle.GameAccountId; msg << toonHandle.Guid; msg << toonHandle.Name; return msg; }
gpl-2.0
bq-dev/android_kernel_bq_msm8976
drivers/spi/spi_qsd.c
40
75604
/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ /* * SPI driver for Qualcomm MSM platforms * */ #include <linux/version.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/workqueue.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/dma-mapping.h> #include <linux/sched.h> #include <linux/mutex.h> #include <linux/atomic.h> #include <linux/pm_runtime.h> #include <linux/qcom-spi.h> #include <linux/msm-sps.h> #include <linux/msm-bus.h> #include <linux/msm-bus-board.h> #include "spi_qsd.h" static int msm_spi_pm_resume_runtime(struct device *device); static int msm_spi_pm_suspend_runtime(struct device *device); static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd); static inline int msm_spi_configure_gsbi(struct msm_spi *dd, struct platform_device *pdev) { struct resource *resource; unsigned long gsbi_mem_phys_addr; size_t gsbi_mem_size; void __iomem *gsbi_base; resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!resource) return 0; gsbi_mem_phys_addr = resource->start; gsbi_mem_size = resource_size(resource); if (!devm_request_mem_region(&pdev->dev, gsbi_mem_phys_addr, gsbi_mem_size, SPI_DRV_NAME)) return -ENXIO; gsbi_base = devm_ioremap(&pdev->dev, gsbi_mem_phys_addr, gsbi_mem_size); if (!gsbi_base) return -ENXIO; /* Set GSBI to SPI mode */ writel_relaxed(GSBI_SPI_CONFIG, gsbi_base + GSBI_CTRL_REG); return 0; } static inline void msm_spi_register_init(struct msm_spi *dd) { writel_relaxed(0x00000001, dd->base + SPI_SW_RESET); msm_spi_set_state(dd, SPI_OP_STATE_RESET); writel_relaxed(0x00000000, dd->base + SPI_OPERATIONAL); writel_relaxed(0x00000000, dd->base + SPI_CONFIG); writel_relaxed(0x00000000, dd->base + SPI_IO_MODES); if (dd->qup_ver) writel_relaxed(0x00000000, dd->base + QUP_OPERATIONAL_MASK); } static int msm_spi_pinctrl_init(struct msm_spi *dd) { dd->pinctrl = devm_pinctrl_get(dd->dev); if (IS_ERR_OR_NULL(dd->pinctrl)) { dev_err(dd->dev, "Failed to get pin ctrl\n"); return PTR_ERR(dd->pinctrl); } dd->pins_active = pinctrl_lookup_state(dd->pinctrl, SPI_PINCTRL_STATE_DEFAULT); if (IS_ERR_OR_NULL(dd->pins_active)) { dev_err(dd->dev, "Failed to lookup pinctrl default state\n"); return PTR_ERR(dd->pins_active); } dd->pins_sleep = pinctrl_lookup_state(dd->pinctrl, SPI_PINCTRL_STATE_SLEEP); if (IS_ERR_OR_NULL(dd->pins_sleep)) { dev_err(dd->dev, "Failed to lookup pinctrl sleep state\n"); return PTR_ERR(dd->pins_sleep); } return 0; } static inline int msm_spi_request_gpios(struct msm_spi *dd) { int i = 0; int result = 0; if (!dd->pdata->use_pinctrl) { for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) { if (dd->spi_gpios[i] >= 0) { result = gpio_request(dd->spi_gpios[i], spi_rsrcs[i]); if (result) { dev_err(dd->dev, "%s: gpio_request for pin %d " "failed with error %d\n" , __func__, dd->spi_gpios[i], result); goto error; } } } } else { result = pinctrl_select_state(dd->pinctrl, dd->pins_active); if (result) { dev_err(dd->dev, "%s: Can not set %s pins\n", __func__, SPI_PINCTRL_STATE_DEFAULT); goto error; } } return 0; error: if (!dd->pdata->use_pinctrl) { for (; --i >= 0;) { if (dd->spi_gpios[i] >= 0) gpio_free(dd->spi_gpios[i]); } } return result; } static inline void msm_spi_free_gpios(struct msm_spi *dd) { int i; int result = 0; if (!dd->pdata->use_pinctrl) { for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) { if (dd->spi_gpios[i] >= 0) gpio_free(dd->spi_gpios[i]); } for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) { if (dd->cs_gpios[i].valid) { gpio_free(dd->cs_gpios[i].gpio_num); dd->cs_gpios[i].valid = 0; } } } else { result = pinctrl_select_state(dd->pinctrl, dd->pins_sleep); if (result) dev_err(dd->dev, "%s: Can not set %s pins\n", __func__, SPI_PINCTRL_STATE_SLEEP); } } static inline int msm_spi_request_cs_gpio(struct msm_spi *dd) { int cs_num; int rc; cs_num = dd->cur_msg->spi->chip_select; if (!(dd->cur_msg->spi->mode & SPI_LOOP)) { if (!dd->pdata->use_pinctrl) { if ((!(dd->cs_gpios[cs_num].valid)) && (dd->cs_gpios[cs_num].gpio_num >= 0)) { rc = gpio_request(dd->cs_gpios[cs_num].gpio_num, spi_cs_rsrcs[cs_num]); if (rc) { dev_err(dd->dev, "gpio_request for pin %d failed,error %d\n", dd->cs_gpios[cs_num].gpio_num, rc); return rc; } dd->cs_gpios[cs_num].valid = 1; } } } return 0; } static inline void msm_spi_free_cs_gpio(struct msm_spi *dd) { int cs_num; cs_num = dd->cur_msg->spi->chip_select; if (!dd->pdata->use_pinctrl) { if (dd->cs_gpios[cs_num].valid) { gpio_free(dd->cs_gpios[cs_num].gpio_num); dd->cs_gpios[cs_num].valid = 0; } } } /** * msm_spi_clk_max_rate: finds the nearest lower rate for a clk * @clk the clock for which to find nearest lower rate * @rate clock frequency in Hz * @return nearest lower rate or negative error value * * Public clock API extends clk_round_rate which is a ceiling function. This * function is a floor function implemented as a binary search using the * ceiling function. */ static long msm_spi_clk_max_rate(struct clk *clk, unsigned long rate) { long lowest_available, nearest_low, step_size, cur; long step_direction = -1; long guess = rate; int max_steps = 10; cur = clk_round_rate(clk, rate); if (cur == rate) return rate; /* if we got here then: cur > rate */ lowest_available = clk_round_rate(clk, 0); if (lowest_available > rate) return -EINVAL; step_size = (rate - lowest_available) >> 1; nearest_low = lowest_available; while (max_steps-- && step_size) { guess += step_size * step_direction; cur = clk_round_rate(clk, guess); if ((cur < rate) && (cur > nearest_low)) nearest_low = cur; /* * if we stepped too far, then start stepping in the other * direction with half the step size */ if (((cur > rate) && (step_direction > 0)) || ((cur < rate) && (step_direction < 0))) { step_direction = -step_direction; step_size >>= 1; } } return nearest_low; } static void msm_spi_clock_set(struct msm_spi *dd, int speed) { long rate; int rc; rate = msm_spi_clk_max_rate(dd->clk, speed); if (rate < 0) { dev_err(dd->dev, "%s: no match found for requested clock frequency:%d", __func__, speed); return; } rc = clk_set_rate(dd->clk, rate); if (!rc) dd->clock_speed = rate; } static void msm_spi_clk_path_vote(struct msm_spi *dd) { if (dd->clk_path_vote.client_hdl) msm_bus_scale_client_update_request( dd->clk_path_vote.client_hdl, MSM_SPI_CLK_PATH_RESUME_VEC); } static void msm_spi_clk_path_unvote(struct msm_spi *dd) { if (dd->clk_path_vote.client_hdl) msm_bus_scale_client_update_request( dd->clk_path_vote.client_hdl, MSM_SPI_CLK_PATH_SUSPEND_VEC); } static void msm_spi_clk_path_teardown(struct msm_spi *dd) { if (dd->pdata->active_only) msm_spi_clk_path_unvote(dd); if (dd->clk_path_vote.client_hdl) { msm_bus_scale_unregister_client(dd->clk_path_vote.client_hdl); dd->clk_path_vote.client_hdl = 0; } } /** * msm_spi_clk_path_init_structs: internal impl detail of msm_spi_clk_path_init * * allocates and initilizes the bus scaling vectors. */ static int msm_spi_clk_path_init_structs(struct msm_spi *dd) { struct msm_bus_vectors *paths = NULL; struct msm_bus_paths *usecases = NULL; dev_dbg(dd->dev, "initialises path clock voting structs"); paths = devm_kzalloc(dd->dev, sizeof(*paths) * 2, GFP_KERNEL); if (!paths) { dev_err(dd->dev, "msm_bus_paths.paths memory allocation failed"); return -ENOMEM; } usecases = devm_kzalloc(dd->dev, sizeof(*usecases) * 2, GFP_KERNEL); if (!usecases) { dev_err(dd->dev, "msm_bus_scale_pdata.usecases memory allocation failed"); goto path_init_err; } dd->clk_path_vote.pdata = devm_kzalloc(dd->dev, sizeof(*dd->clk_path_vote.pdata), GFP_KERNEL); if (!dd->clk_path_vote.pdata) { dev_err(dd->dev, "msm_bus_scale_pdata memory allocation failed"); goto path_init_err; } paths[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) { .src = dd->pdata->master_id, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }; paths[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_vectors) { .src = dd->pdata->master_id, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = MSM_SPI_CLK_PATH_AVRG_BW(dd), .ib = MSM_SPI_CLK_PATH_BRST_BW(dd), }; usecases[MSM_SPI_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) { .num_paths = 1, .vectors = &paths[MSM_SPI_CLK_PATH_SUSPEND_VEC], }; usecases[MSM_SPI_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) { .num_paths = 1, .vectors = &paths[MSM_SPI_CLK_PATH_RESUME_VEC], }; *dd->clk_path_vote.pdata = (struct msm_bus_scale_pdata) { .active_only = dd->pdata->active_only, .name = dev_name(dd->dev), .num_usecases = 2, .usecase = usecases, }; return 0; path_init_err: devm_kfree(dd->dev, paths); devm_kfree(dd->dev, usecases); devm_kfree(dd->dev, dd->clk_path_vote.pdata); dd->clk_path_vote.pdata = NULL; return -ENOMEM; } /** * msm_spi_clk_path_postponed_register: reg with bus-scaling after it is probed * * @return zero on success * * Workaround: SPI driver may be probed before the bus scaling driver. Calling * msm_bus_scale_register_client() will fail if the bus scaling driver is not * ready yet. Thus, this function should be called not from probe but from a * later context. Also, this function may be called more then once before * register succeed. At this case only one error message will be logged. At boot * time all clocks are on, so earlier SPI transactions should succeed. */ static int msm_spi_clk_path_postponed_register(struct msm_spi *dd) { dd->clk_path_vote.client_hdl = msm_bus_scale_register_client( dd->clk_path_vote.pdata); if (dd->clk_path_vote.client_hdl) { if (dd->clk_path_vote.reg_err) { /* log a success message if an error msg was logged */ dd->clk_path_vote.reg_err = false; dev_info(dd->dev, "msm_bus_scale_register_client(mstr-id:%d " "actv-only:%d):0x%x", dd->pdata->master_id, dd->pdata->active_only, dd->clk_path_vote.client_hdl); } if (dd->pdata->active_only) msm_spi_clk_path_vote(dd); } else { /* guard to log only one error on multiple failure */ if (!dd->clk_path_vote.reg_err) { dd->clk_path_vote.reg_err = true; dev_info(dd->dev, "msm_bus_scale_register_client(mstr-id:%d " "actv-only:%d):0", dd->pdata->master_id, dd->pdata->active_only); } } return dd->clk_path_vote.client_hdl ? 0 : -EAGAIN; } static void msm_spi_clk_path_init(struct msm_spi *dd) { /* * bail out if path voting is diabled (master_id == 0) or if it is * already registered (client_hdl != 0) */ if (!dd->pdata->master_id || dd->clk_path_vote.client_hdl) return; /* if fail once then try no more */ if (!dd->clk_path_vote.pdata && msm_spi_clk_path_init_structs(dd)) { dd->pdata->master_id = 0; return; }; /* on failure try again later */ if (msm_spi_clk_path_postponed_register(dd)) return; if (dd->pdata->active_only) msm_spi_clk_path_vote(dd); } static int msm_spi_calculate_size(int *fifo_size, int *block_size, int block, int mult) { int words; switch (block) { case 0: words = 1; /* 4 bytes */ break; case 1: words = 4; /* 16 bytes */ break; case 2: words = 8; /* 32 bytes */ break; default: return -EINVAL; } switch (mult) { case 0: *fifo_size = words * 2; break; case 1: *fifo_size = words * 4; break; case 2: *fifo_size = words * 8; break; case 3: *fifo_size = words * 16; break; default: return -EINVAL; } *block_size = words * sizeof(u32); /* in bytes */ return 0; } static void msm_spi_calculate_fifo_size(struct msm_spi *dd) { u32 spi_iom; int block; int mult; spi_iom = readl_relaxed(dd->base + SPI_IO_MODES); block = (spi_iom & SPI_IO_M_INPUT_BLOCK_SIZE) >> INPUT_BLOCK_SZ_SHIFT; mult = (spi_iom & SPI_IO_M_INPUT_FIFO_SIZE) >> INPUT_FIFO_SZ_SHIFT; if (msm_spi_calculate_size(&dd->input_fifo_size, &dd->input_block_size, block, mult)) { goto fifo_size_err; } block = (spi_iom & SPI_IO_M_OUTPUT_BLOCK_SIZE) >> OUTPUT_BLOCK_SZ_SHIFT; mult = (spi_iom & SPI_IO_M_OUTPUT_FIFO_SIZE) >> OUTPUT_FIFO_SZ_SHIFT; if (msm_spi_calculate_size(&dd->output_fifo_size, &dd->output_block_size, block, mult)) { goto fifo_size_err; } if (dd->qup_ver == SPI_QUP_VERSION_NONE) { /* DM mode is not available for this block size */ if (dd->input_block_size == 4 || dd->output_block_size == 4) dd->use_dma = 0; if (dd->use_dma) { dd->input_burst_size = max(dd->input_block_size, DM_BURST_SIZE); dd->output_burst_size = max(dd->output_block_size, DM_BURST_SIZE); } } return; fifo_size_err: dd->use_dma = 0; pr_err("%s: invalid FIFO size, SPI_IO_MODES=0x%x\n", __func__, spi_iom); return; } static void msm_spi_read_word_from_fifo(struct msm_spi *dd) { u32 data_in; int i; int shift; data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO); if (dd->read_buf) { for (i = 0; (i < dd->bytes_per_word) && dd->rx_bytes_remaining; i++) { /* The data format depends on bytes_per_word: 4 bytes: 0x12345678 3 bytes: 0x00123456 2 bytes: 0x00001234 1 byte : 0x00000012 */ shift = 8 * (dd->bytes_per_word - i - 1); *dd->read_buf++ = (data_in & (0xFF << shift)) >> shift; dd->rx_bytes_remaining--; } } else { if (dd->rx_bytes_remaining >= dd->bytes_per_word) dd->rx_bytes_remaining -= dd->bytes_per_word; else dd->rx_bytes_remaining = 0; } dd->read_xfr_cnt++; if (dd->multi_xfr) { if (!dd->rx_bytes_remaining) dd->read_xfr_cnt = 0; else if ((dd->read_xfr_cnt * dd->bytes_per_word) == dd->read_len) { struct spi_transfer *t = dd->cur_rx_transfer; if (t->transfer_list.next != &dd->cur_msg->transfers) { t = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); dd->read_buf = t->rx_buf; dd->read_len = t->len; dd->read_xfr_cnt = 0; dd->cur_rx_transfer = t; } } } } static inline bool msm_spi_is_valid_state(struct msm_spi *dd) { u32 spi_op = readl_relaxed(dd->base + SPI_STATE); return spi_op & SPI_OP_STATE_VALID; } static inline void msm_spi_udelay(unsigned int delay_usecs) { /* * For smaller values of delay, context switch time * would negate the usage of usleep */ if (delay_usecs > 20) usleep_range(delay_usecs, delay_usecs); else if (delay_usecs) udelay(delay_usecs); } static inline int msm_spi_wait_valid(struct msm_spi *dd) { unsigned int delay = 0; unsigned long timeout = 0; if (dd->clock_speed == 0) return -EINVAL; /* * Based on the SPI clock speed, sufficient time * should be given for the SPI state transition * to occur */ delay = (10 * USEC_PER_SEC) / dd->clock_speed; /* * For small delay values, the default timeout would * be one jiffy */ if (delay < SPI_DELAY_THRESHOLD) delay = SPI_DELAY_THRESHOLD; /* Adding one to round off to the nearest jiffy */ timeout = jiffies + msecs_to_jiffies(delay * SPI_DEFAULT_TIMEOUT) + 1; while (!msm_spi_is_valid_state(dd)) { if (time_after(jiffies, timeout)) { if (!msm_spi_is_valid_state(dd)) { if (dd->cur_msg) dd->cur_msg->status = -EIO; dev_err(dd->dev, "%s: SPI operational state" "not valid\n", __func__); return -ETIMEDOUT; } else return 0; } msm_spi_udelay(delay); } return 0; } static inline int msm_spi_set_state(struct msm_spi *dd, enum msm_spi_state state) { enum msm_spi_state cur_state; if (msm_spi_wait_valid(dd)) return -EIO; cur_state = readl_relaxed(dd->base + SPI_STATE); /* Per spec: For PAUSE_STATE to RESET_STATE, two writes of (10) are required */ if (((cur_state & SPI_OP_STATE) == SPI_OP_STATE_PAUSE) && (state == SPI_OP_STATE_RESET)) { writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE); writel_relaxed(SPI_OP_STATE_CLEAR_BITS, dd->base + SPI_STATE); } else { writel_relaxed((cur_state & ~SPI_OP_STATE) | state, dd->base + SPI_STATE); } if (msm_spi_wait_valid(dd)) return -EIO; return 0; } /** * msm_spi_set_bpw_and_no_io_flags: configure N, and no-input/no-output flags */ static inline void msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n) { *config &= ~(SPI_NO_INPUT|SPI_NO_OUTPUT); if (n != (*config & SPI_CFG_N)) *config = (*config & ~SPI_CFG_N) | n; if (dd->mode == SPI_BAM_MODE) { if (dd->read_buf == NULL) *config |= SPI_NO_INPUT; if (dd->write_buf == NULL) *config |= SPI_NO_OUTPUT; } } /** * msm_spi_calc_spi_config_loopback_and_input_first: Calculate the values that * should be updated into SPI_CONFIG's LOOPBACK and INPUT_FIRST flags * @return calculatd value for SPI_CONFIG */ static u32 msm_spi_calc_spi_config_loopback_and_input_first(u32 spi_config, u8 mode) { if (mode & SPI_LOOP) spi_config |= SPI_CFG_LOOPBACK; else spi_config &= ~SPI_CFG_LOOPBACK; if (mode & SPI_CPHA) spi_config &= ~SPI_CFG_INPUT_FIRST; else spi_config |= SPI_CFG_INPUT_FIRST; return spi_config; } /** * msm_spi_set_spi_config: prepares register SPI_CONFIG to process the * next transfer */ static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw) { u32 spi_config = readl_relaxed(dd->base + SPI_CONFIG); spi_config = msm_spi_calc_spi_config_loopback_and_input_first( spi_config, dd->cur_msg->spi->mode); if (dd->qup_ver == SPI_QUP_VERSION_NONE) /* flags removed from SPI_CONFIG in QUP version-2 */ msm_spi_set_bpw_and_no_io_flags(dd, &spi_config, bpw-1); /* * HS_MODE improves signal stability for spi-clk high rates * but is invalid in LOOPBACK mode. */ if ((dd->clock_speed >= SPI_HS_MIN_RATE) && !(dd->cur_msg->spi->mode & SPI_LOOP)) spi_config |= SPI_CFG_HS_MODE; else spi_config &= ~SPI_CFG_HS_MODE; writel_relaxed(spi_config, dd->base + SPI_CONFIG); } /** * msm_spi_set_mx_counts: set SPI_MX_INPUT_COUNT and SPI_MX_INPUT_COUNT * for FIFO-mode. set SPI_MX_INPUT_COUNT and SPI_MX_OUTPUT_COUNT for * BAM and DMOV modes. * @n_words The number of reads/writes of size N. */ static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words) { /* * n_words cannot exceed fifo_size, and only one READ COUNT * interrupt is generated per transaction, so for transactions * larger than fifo size READ COUNT must be disabled. * For those transactions we usually move to Data Mover mode. */ if (dd->mode == SPI_FIFO_MODE) { if (n_words <= dd->input_fifo_size) { writel_relaxed(n_words, dd->base + SPI_MX_READ_COUNT); msm_spi_set_write_count(dd, n_words); } else { writel_relaxed(0, dd->base + SPI_MX_READ_COUNT); msm_spi_set_write_count(dd, 0); } if (dd->qup_ver == SPI_QUP_VERSION_BFAM) { /* must be zero for FIFO */ writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT); writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT); } } else { /* must be zero for BAM and DMOV */ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT); msm_spi_set_write_count(dd, 0); /* * for DMA transfers, both QUP_MX_INPUT_COUNT and * QUP_MX_OUTPUT_COUNT must be zero to all cases but one. * That case is a non-balanced transfer when there is * only a read_buf. */ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) { if (dd->write_buf) writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT); else writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT); writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT); } } } static int msm_spi_bam_pipe_disconnect(struct msm_spi *dd, struct msm_spi_bam_pipe *pipe) { int ret = sps_disconnect(pipe->handle); if (ret) { dev_dbg(dd->dev, "%s disconnect bam %s pipe failed\n", __func__, pipe->name); return ret; } return 0; } static int msm_spi_bam_pipe_connect(struct msm_spi *dd, struct msm_spi_bam_pipe *pipe, struct sps_connect *config) { int ret; struct sps_register_event event = { .mode = SPS_TRIGGER_WAIT, .options = SPS_O_EOT, .xfer_done = &dd->transfer_complete, }; ret = sps_connect(pipe->handle, config); if (ret) { dev_err(dd->dev, "%s: sps_connect(%s:0x%p):%d", __func__, pipe->name, pipe->handle, ret); return ret; } ret = sps_register_event(pipe->handle, &event); if (ret) { dev_err(dd->dev, "%s sps_register_event(hndl:0x%p %s):%d", __func__, pipe->handle, pipe->name, ret); msm_spi_bam_pipe_disconnect(dd, pipe); return ret; } pipe->teardown_required = true; return 0; } static void msm_spi_bam_pipe_flush(struct msm_spi *dd, enum msm_spi_pipe_direction pipe_dir) { struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? (&dd->bam.prod) : (&dd->bam.cons); struct sps_connect config = pipe->config; int ret; ret = msm_spi_bam_pipe_disconnect(dd, pipe); if (ret) return; ret = msm_spi_bam_pipe_connect(dd, pipe, &config); if (ret) return; } static void msm_spi_bam_flush(struct msm_spi *dd) { dev_dbg(dd->dev, "%s flushing bam for recovery\n" , __func__); msm_spi_bam_pipe_flush(dd, SPI_BAM_CONSUMER_PIPE); msm_spi_bam_pipe_flush(dd, SPI_BAM_PRODUCER_PIPE); } static int msm_spi_bam_process_rx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt) { int ret = 0; u32 data_xfr_size = 0, rem_bc = 0; u32 prod_flags = 0; rem_bc = dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd; data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send; /* * set flags for last descriptor only */ if ((desc_cnt == 1) || (*bytes_to_send == data_xfr_size)) prod_flags = (dd->write_buf) ? 0 : (SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD); /* * enqueue read buffer in BAM */ ret = sps_transfer_one(dd->bam.prod.handle, dd->cur_rx_transfer->rx_dma + dd->bam.curr_rx_bytes_recvd, data_xfr_size, dd, prod_flags); if (ret < 0) { dev_err(dd->dev, "%s: Failed to queue producer BAM transfer", __func__); return ret; } dd->bam.curr_rx_bytes_recvd += data_xfr_size; *bytes_to_send -= data_xfr_size; dd->bam.bam_rx_len -= data_xfr_size; return data_xfr_size; } static int msm_spi_bam_process_tx(struct msm_spi *dd, u32 *bytes_to_send, u32 desc_cnt) { int ret = 0; u32 data_xfr_size = 0, rem_bc = 0; u32 cons_flags = 0; rem_bc = dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent; data_xfr_size = (rem_bc < *bytes_to_send) ? rem_bc : *bytes_to_send; /* * set flags for last descriptor only */ if ((desc_cnt == 1) || (*bytes_to_send == data_xfr_size)) cons_flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD; /* * enqueue write buffer in BAM */ ret = sps_transfer_one(dd->bam.cons.handle, dd->cur_tx_transfer->tx_dma + dd->bam.curr_tx_bytes_sent, data_xfr_size, dd, cons_flags); if (ret < 0) { dev_err(dd->dev, "%s: Failed to queue consumer BAM transfer", __func__); return ret; } dd->bam.curr_tx_bytes_sent += data_xfr_size; *bytes_to_send -= data_xfr_size; dd->bam.bam_tx_len -= data_xfr_size; return data_xfr_size; } /** * msm_spi_bam_begin_transfer: transfer dd->tx_bytes_remaining bytes * using BAM. * @brief BAM can transfer SPI_MAX_TRFR_BTWN_RESETS byte at a single * transfer. Between transfer QUP must change to reset state. A loop is * issuing a single BAM transfer at a time. * @return zero on success */ static int msm_spi_bam_begin_transfer(struct msm_spi *dd) { u32 tx_bytes_to_send = 0, rx_bytes_to_recv = 0; u32 n_words_xfr; s32 ret = 0; u32 prod_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1; u32 cons_desc_cnt = SPI_BAM_MAX_DESC_NUM - 1; u32 byte_count = 0; rx_bytes_to_recv = min_t(u32, dd->bam.bam_rx_len, SPI_MAX_TRFR_BTWN_RESETS); tx_bytes_to_send = min_t(u32, dd->bam.bam_tx_len, SPI_MAX_TRFR_BTWN_RESETS); n_words_xfr = DIV_ROUND_UP(rx_bytes_to_recv, dd->bytes_per_word); msm_spi_set_mx_counts(dd, n_words_xfr); ret = msm_spi_set_state(dd, SPI_OP_STATE_RUN); if (ret < 0) { dev_err(dd->dev, "%s: Failed to set QUP state to run", __func__); goto xfr_err; } while ((rx_bytes_to_recv + tx_bytes_to_send) && ((cons_desc_cnt + prod_desc_cnt) > 0)) { struct spi_transfer *t = NULL, *next; if (dd->read_buf && (prod_desc_cnt > 0)) { ret = msm_spi_bam_process_rx(dd, &rx_bytes_to_recv, prod_desc_cnt); if (ret < 0) goto xfr_err; if (!(dd->cur_rx_transfer->len - dd->bam.curr_rx_bytes_recvd)) t = dd->cur_rx_transfer; prod_desc_cnt--; } if (dd->write_buf && (cons_desc_cnt > 0)) { ret = msm_spi_bam_process_tx(dd, &tx_bytes_to_send, cons_desc_cnt); if (ret < 0) goto xfr_err; if (!(dd->cur_tx_transfer->len - dd->bam.curr_tx_bytes_sent)) t = dd->cur_tx_transfer; cons_desc_cnt--; } if (t && (t->transfer_list.next != &dd->cur_msg->transfers)) { next = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); dd->read_buf = next->rx_buf; dd->write_buf = next->tx_buf; dd->cur_rx_transfer = next; dd->cur_tx_transfer = next; dd->bam.curr_rx_bytes_recvd = 0; dd->bam.curr_tx_bytes_sent = 0; } byte_count += ret; } dd->tx_bytes_remaining -= min_t(u32, byte_count, SPI_MAX_TRFR_BTWN_RESETS); return 0; xfr_err: return ret; } static int msm_spi_bam_next_transfer(struct msm_spi *dd) { if (dd->mode != SPI_BAM_MODE) return 0; if (dd->tx_bytes_remaining > 0) { init_completion(&dd->transfer_complete); if (msm_spi_set_state(dd, SPI_OP_STATE_RESET)) return 0; if ((msm_spi_bam_begin_transfer(dd)) < 0) { dev_err(dd->dev, "%s: BAM transfer setup failed\n", __func__); return 0; } return 1; } return 0; } static int msm_spi_dma_send_next(struct msm_spi *dd) { int ret = 0; if (dd->mode == SPI_BAM_MODE) ret = msm_spi_bam_next_transfer(dd); return ret; } static inline void msm_spi_ack_transfer(struct msm_spi *dd) { writel_relaxed(SPI_OP_MAX_INPUT_DONE_FLAG | SPI_OP_MAX_OUTPUT_DONE_FLAG, dd->base + SPI_OPERATIONAL); /* Ensure done flag was cleared before proceeding further */ mb(); } /* Figure which irq occured and call the relevant functions */ static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id) { u32 op, ret = IRQ_NONE; struct msm_spi *dd = dev_id; if (pm_runtime_suspended(dd->dev)) { dev_warn(dd->dev, "QUP: pm runtime suspend, irq:%d\n", irq); return ret; } if (readl_relaxed(dd->base + SPI_ERROR_FLAGS) || readl_relaxed(dd->base + QUP_ERROR_FLAGS)) { struct spi_master *master = dev_get_drvdata(dd->dev); ret |= msm_spi_error_irq(irq, master); } op = readl_relaxed(dd->base + SPI_OPERATIONAL); if (op & SPI_OP_INPUT_SERVICE_FLAG) { writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG, dd->base + SPI_OPERATIONAL); /* * Ensure service flag was cleared before further * processing of interrupt. */ mb(); ret |= msm_spi_input_irq(irq, dev_id); } if (op & SPI_OP_OUTPUT_SERVICE_FLAG) { writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG, dd->base + SPI_OPERATIONAL); /* * Ensure service flag was cleared before further * processing of interrupt. */ mb(); ret |= msm_spi_output_irq(irq, dev_id); } if (dd->done) { complete(&dd->transfer_complete); dd->done = 0; } return ret; } static irqreturn_t msm_spi_input_irq(int irq, void *dev_id) { struct msm_spi *dd = dev_id; dd->stat_rx++; if (dd->mode == SPI_MODE_NONE) return IRQ_HANDLED; if (dd->mode == SPI_FIFO_MODE) { while ((readl_relaxed(dd->base + SPI_OPERATIONAL) & SPI_OP_IP_FIFO_NOT_EMPTY) && (dd->rx_bytes_remaining > 0)) { msm_spi_read_word_from_fifo(dd); } if (dd->rx_bytes_remaining == 0) msm_spi_complete(dd); } return IRQ_HANDLED; } static void msm_spi_write_word_to_fifo(struct msm_spi *dd) { u32 word; u8 byte; int i; word = 0; if (dd->write_buf) { for (i = 0; (i < dd->bytes_per_word) && dd->tx_bytes_remaining; i++) { dd->tx_bytes_remaining--; byte = *dd->write_buf++; word |= (byte << (BITS_PER_BYTE * (3 - i))); } } else if (dd->tx_bytes_remaining > dd->bytes_per_word) dd->tx_bytes_remaining -= dd->bytes_per_word; else dd->tx_bytes_remaining = 0; dd->write_xfr_cnt++; if (dd->multi_xfr) { if (!dd->tx_bytes_remaining) dd->write_xfr_cnt = 0; else if ((dd->write_xfr_cnt * dd->bytes_per_word) == dd->write_len) { struct spi_transfer *t = dd->cur_tx_transfer; if (t->transfer_list.next != &dd->cur_msg->transfers) { t = list_entry(t->transfer_list.next, struct spi_transfer, transfer_list); dd->write_buf = t->tx_buf; dd->write_len = t->len; dd->write_xfr_cnt = 0; dd->cur_tx_transfer = t; } } } writel_relaxed(word, dd->base + SPI_OUTPUT_FIFO); } static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd) { int count = 0; while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) && !(readl_relaxed(dd->base + SPI_OPERATIONAL) & SPI_OP_OUTPUT_FIFO_FULL)) { msm_spi_write_word_to_fifo(dd); count++; } } static irqreturn_t msm_spi_output_irq(int irq, void *dev_id) { struct msm_spi *dd = dev_id; dd->stat_tx++; if (dd->mode == SPI_MODE_NONE) return IRQ_HANDLED; /* Output FIFO is empty. Transmit any outstanding write data. */ if (dd->mode == SPI_FIFO_MODE) msm_spi_write_rmn_to_fifo(dd); return IRQ_HANDLED; } static irqreturn_t msm_spi_error_irq(int irq, void *dev_id) { struct spi_master *master = dev_id; struct msm_spi *dd = spi_master_get_devdata(master); u32 spi_err; spi_err = readl_relaxed(dd->base + SPI_ERROR_FLAGS); if (spi_err & SPI_ERR_OUTPUT_OVER_RUN_ERR) dev_warn(master->dev.parent, "SPI output overrun error\n"); if (spi_err & SPI_ERR_INPUT_UNDER_RUN_ERR) dev_warn(master->dev.parent, "SPI input underrun error\n"); if (spi_err & SPI_ERR_OUTPUT_UNDER_RUN_ERR) dev_warn(master->dev.parent, "SPI output underrun error\n"); msm_spi_get_clk_err(dd, &spi_err); if (spi_err & SPI_ERR_CLK_OVER_RUN_ERR) dev_warn(master->dev.parent, "SPI clock overrun error\n"); if (spi_err & SPI_ERR_CLK_UNDER_RUN_ERR) dev_warn(master->dev.parent, "SPI clock underrun error\n"); msm_spi_clear_error_flags(dd); msm_spi_ack_clk_err(dd); /* Ensure clearing of QUP_ERROR_FLAGS was completed */ mb(); return IRQ_HANDLED; } static int msm_spi_bam_map_buffers(struct msm_spi *dd) { int ret = -EINVAL; struct device *dev; struct spi_transfer *first_xfr; struct spi_transfer *nxt_xfr; void *tx_buf, *rx_buf; u32 tx_len, rx_len; int num_xfrs_grped = dd->num_xfrs_grped; dev = dd->dev; first_xfr = dd->cur_transfer; do { tx_buf = (void *)first_xfr->tx_buf; rx_buf = first_xfr->rx_buf; tx_len = rx_len = first_xfr->len; if (tx_buf != NULL) { first_xfr->tx_dma = dma_map_single(dev, tx_buf, tx_len, DMA_TO_DEVICE); if (dma_mapping_error(dev, first_xfr->tx_dma)) { ret = -ENOMEM; goto error; } } if (rx_buf != NULL) { first_xfr->rx_dma = dma_map_single(dev, rx_buf, rx_len, DMA_FROM_DEVICE); if (dma_mapping_error(dev, first_xfr->rx_dma)) { if (tx_buf != NULL) dma_unmap_single(dev, first_xfr->tx_dma, tx_len, DMA_TO_DEVICE); ret = -ENOMEM; goto error; } } nxt_xfr = list_entry(first_xfr->transfer_list.next, struct spi_transfer, transfer_list); if (nxt_xfr == NULL) break; num_xfrs_grped--; first_xfr = nxt_xfr; } while (num_xfrs_grped > 0); return 0; error: msm_spi_dma_unmap_buffers(dd); return ret; } static int msm_spi_dma_map_buffers(struct msm_spi *dd) { int ret = 0; if (dd->mode == SPI_BAM_MODE) ret = msm_spi_bam_map_buffers(dd); return ret; } static void msm_spi_bam_unmap_buffers(struct msm_spi *dd) { struct device *dev; int num_xfrs_grped = dd->num_xfrs_grped; struct spi_transfer *first_xfr; struct spi_transfer *nxt_xfr; void *tx_buf, *rx_buf; u32 tx_len, rx_len; dev = &dd->cur_msg->spi->dev; first_xfr = dd->cur_transfer; /* mapped by client */ if (dd->cur_msg->is_dma_mapped) return; do { tx_buf = (void *)first_xfr->tx_buf; rx_buf = first_xfr->rx_buf; tx_len = rx_len = first_xfr->len; if (tx_buf != NULL) dma_unmap_single(dev, first_xfr->tx_dma, tx_len, DMA_TO_DEVICE); if (rx_buf != NULL) dma_unmap_single(dev, first_xfr->rx_dma, rx_len, DMA_FROM_DEVICE); nxt_xfr = list_entry(first_xfr->transfer_list.next, struct spi_transfer, transfer_list); if (nxt_xfr == NULL) break; num_xfrs_grped--; first_xfr = nxt_xfr; } while (num_xfrs_grped > 0); } static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd) { if (dd->mode == SPI_BAM_MODE) msm_spi_bam_unmap_buffers(dd); } /** * msm_spi_use_dma - decides whether to use Data-Mover or BAM for * the given transfer * @dd: device * @tr: transfer * * Start using DMA if: * 1. Is supported by HW * 2. Is not diabled by platfrom data * 3. Transfer size is greater than 3*block size. * 4. Buffers are aligned to cache line. * 5. Bytes-per-word is 8,16 or 32. */ static inline bool msm_spi_use_dma(struct msm_spi *dd, struct spi_transfer *tr, u8 bpw) { if (!dd->use_dma) return false; /* check constraints from platform data */ if ((dd->qup_ver == SPI_QUP_VERSION_BFAM) && !dd->pdata->use_bam) return false; if (dd->cur_msg_len < 3*dd->input_block_size) return false; if ((dd->qup_ver != SPI_QUP_VERSION_BFAM) && dd->multi_xfr && !dd->read_len && !dd->write_len) return false; if (dd->qup_ver == SPI_QUP_VERSION_NONE) { u32 cache_line = dma_get_cache_alignment(); if (tr->tx_buf) { if (!IS_ALIGNED((size_t)tr->tx_buf, cache_line)) return 0; } if (tr->rx_buf) { if (!IS_ALIGNED((size_t)tr->rx_buf, cache_line)) return false; } if (tr->cs_change && ((bpw != 8) && (bpw != 16) && (bpw != 32))) return false; } return true; } /** * msm_spi_set_transfer_mode: Chooses optimal transfer mode. Sets dd->mode and * prepares to process a transfer. */ static void msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count) { if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) { dd->mode = SPI_BAM_MODE; } else { dd->mode = SPI_FIFO_MODE; if (dd->multi_xfr) { dd->read_len = dd->cur_transfer->len; dd->write_len = dd->cur_transfer->len; } } } /** * msm_spi_set_qup_io_modes: prepares register QUP_IO_MODES to process a * transfer */ static void msm_spi_set_qup_io_modes(struct msm_spi *dd) { u32 spi_iom; spi_iom = readl_relaxed(dd->base + SPI_IO_MODES); /* Set input and output transfer mode: FIFO, DMOV, or BAM */ spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE); spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT)); spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT)); /* Turn on packing for data mover */ if (dd->mode == SPI_BAM_MODE) spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN; else spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN); /*if (dd->mode == SPI_BAM_MODE) { spi_iom |= SPI_IO_C_NO_TRI_STATE; spi_iom &= ~(SPI_IO_C_CS_SELECT | SPI_IO_C_CS_N_POLARITY); }*/ writel_relaxed(spi_iom, dd->base + SPI_IO_MODES); } static u32 msm_spi_calc_spi_ioc_clk_polarity(u32 spi_ioc, u8 mode) { if (mode & SPI_CPOL) spi_ioc |= SPI_IO_C_CLK_IDLE_HIGH; else spi_ioc &= ~SPI_IO_C_CLK_IDLE_HIGH; return spi_ioc; } /** * msm_spi_set_spi_io_control: prepares register SPI_IO_CONTROL to process the * next transfer * @return the new set value of SPI_IO_CONTROL */ static u32 msm_spi_set_spi_io_control(struct msm_spi *dd) { u32 spi_ioc, spi_ioc_orig, chip_select; spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); spi_ioc_orig = spi_ioc; spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc , dd->cur_msg->spi->mode); /* Set chip-select */ chip_select = dd->cur_msg->spi->chip_select << 2; if ((spi_ioc & SPI_IO_C_CS_SELECT) != chip_select) spi_ioc = (spi_ioc & ~SPI_IO_C_CS_SELECT) | chip_select; if (!dd->cur_transfer->cs_change) spi_ioc |= SPI_IO_C_MX_CS_MODE; if (spi_ioc != spi_ioc_orig) writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL); /* * Ensure that the IO control mode register gets written * before proceeding with the transfer. */ mb(); return spi_ioc; } /** * msm_spi_set_qup_op_mask: prepares register QUP_OPERATIONAL_MASK to process * the next transfer */ static void msm_spi_set_qup_op_mask(struct msm_spi *dd) { /* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status * change in BAM mode */ u32 mask = (dd->mode == SPI_BAM_MODE) ? QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG : 0; writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK); } static void msm_spi_process_transfer(struct msm_spi *dd) { u8 bpw; u32 max_speed; u32 read_count; u32 timeout; u32 spi_ioc; u32 int_loopback = 0; int ret; dd->tx_bytes_remaining = dd->cur_msg_len; dd->rx_bytes_remaining = dd->cur_msg_len; dd->read_buf = dd->cur_transfer->rx_buf; dd->write_buf = dd->cur_transfer->tx_buf; init_completion(&dd->transfer_complete); if (dd->cur_transfer->bits_per_word) bpw = dd->cur_transfer->bits_per_word; else if (dd->cur_msg->spi->bits_per_word) bpw = dd->cur_msg->spi->bits_per_word; else bpw = 8; dd->bytes_per_word = (bpw + 7) / 8; if (dd->cur_transfer->speed_hz) max_speed = dd->cur_transfer->speed_hz; else max_speed = dd->cur_msg->spi->max_speed_hz; if (!dd->clock_speed || max_speed != dd->clock_speed) msm_spi_clock_set(dd, max_speed); timeout = 100 * msecs_to_jiffies( DIV_ROUND_UP(dd->cur_msg_len * 8, DIV_ROUND_UP(max_speed, MSEC_PER_SEC))); read_count = DIV_ROUND_UP(dd->cur_msg_len, dd->bytes_per_word); if (dd->cur_msg->spi->mode & SPI_LOOP) int_loopback = 1; if (msm_spi_set_state(dd, SPI_OP_STATE_RESET)) dev_err(dd->dev, "%s: Error setting QUP to reset-state", __func__); msm_spi_set_transfer_mode(dd, bpw, read_count); msm_spi_set_mx_counts(dd, read_count); if (dd->mode == SPI_BAM_MODE) { ret = msm_spi_dma_map_buffers(dd); if (ret < 0) { pr_err("Mapping DMA buffers\n"); dd->cur_msg->status = ret; return; } } msm_spi_set_qup_io_modes(dd); msm_spi_set_spi_config(dd, bpw); msm_spi_set_qup_config(dd, bpw); spi_ioc = msm_spi_set_spi_io_control(dd); msm_spi_set_qup_op_mask(dd); /* The output fifo interrupt handler will handle all writes after the first. Restricting this to one write avoids contention issues and race conditions between this thread and the int handler */ if (dd->mode == SPI_FIFO_MODE) { if (msm_spi_prepare_for_write(dd)) goto transfer_end; msm_spi_start_write(dd, read_count); } else if (dd->mode == SPI_BAM_MODE) { if ((msm_spi_bam_begin_transfer(dd)) < 0) { dev_err(dd->dev, "%s: BAM transfer setup failed\n", __func__); dd->cur_msg->status = -EIO; goto transfer_end; } } /* * On BAM mode, current state here is run. * Only enter the RUN state after the first word is written into * the output FIFO. Otherwise, the output FIFO EMPTY interrupt * might fire before the first word is written resulting in a * possible race condition. */ if (dd->mode != SPI_BAM_MODE) if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) { dev_warn(dd->dev, "%s: Failed to set QUP to run-state. Mode:%d", __func__, dd->mode); goto transfer_end; } /* Assume success, this might change later upon transaction result */ dd->cur_msg->status = 0; do { if (!wait_for_completion_timeout(&dd->transfer_complete, timeout)) { dev_err(dd->dev, "%s: SPI transaction timeout\n", __func__); dd->cur_msg->status = -EIO; break; } } while (msm_spi_dma_send_next(dd)); msm_spi_udelay(dd->xfrs_delay_usec); transfer_end: if (dd->mode == SPI_BAM_MODE) msm_spi_bam_flush(dd); msm_spi_dma_unmap_buffers(dd); dd->mode = SPI_MODE_NONE; msm_spi_set_state(dd, SPI_OP_STATE_RESET); writel_relaxed(spi_ioc & ~SPI_IO_C_MX_CS_MODE, dd->base + SPI_IO_CONTROL); } static void get_transfer_length(struct msm_spi *dd) { struct spi_transfer *tr; int num_xfrs = 0; int readlen = 0; int writelen = 0; dd->cur_msg_len = 0; dd->multi_xfr = 0; dd->read_len = dd->write_len = 0; list_for_each_entry(tr, &dd->cur_msg->transfers, transfer_list) { if (tr->tx_buf) writelen += tr->len; if (tr->rx_buf) readlen += tr->len; dd->cur_msg_len += tr->len; num_xfrs++; } if (num_xfrs == 2) { struct spi_transfer *first_xfr = dd->cur_transfer; dd->multi_xfr = 1; tr = list_entry(first_xfr->transfer_list.next, struct spi_transfer, transfer_list); /* * We update dd->read_len and dd->write_len only * for WR-WR and WR-RD transfers. */ if ((first_xfr->tx_buf) && (!first_xfr->rx_buf)) { if (((tr->tx_buf) && (!tr->rx_buf)) || ((!tr->tx_buf) && (tr->rx_buf))) { dd->read_len = readlen; dd->write_len = writelen; } } } else if (num_xfrs > 1) dd->multi_xfr = 1; } static inline void write_force_cs(struct msm_spi *dd, bool set_flag) { u32 spi_ioc; u32 spi_ioc_orig; spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); spi_ioc_orig = spi_ioc; if (set_flag) spi_ioc |= SPI_IO_C_FORCE_CS; else spi_ioc &= ~SPI_IO_C_FORCE_CS; if (spi_ioc != spi_ioc_orig) writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL); } static inline int combine_transfers(struct msm_spi *dd) { int xfrs_grped = 1; dd->xfrs_delay_usec = 0; dd->bam.bam_rx_len = dd->bam.bam_tx_len = 0; dd->cur_msg_len = dd->cur_transfer->len; if (dd->cur_transfer->tx_buf) dd->bam.bam_tx_len += dd->cur_transfer->len; if (dd->cur_transfer->rx_buf) dd->bam.bam_rx_len += dd->cur_transfer->len; dd->xfrs_delay_usec = dd->cur_transfer->delay_usecs; return xfrs_grped; } static void msm_spi_process_message(struct msm_spi *dd) { int xfrs_grped = 0; int rc; u32 spi_ioc; dd->num_xfrs_grped = 0; dd->bam.curr_rx_bytes_recvd = dd->bam.curr_tx_bytes_sent = 0; dd->write_xfr_cnt = dd->read_xfr_cnt = 0; rc = msm_spi_request_cs_gpio(dd); if (rc) return; dd->cur_transfer = list_first_entry(&dd->cur_msg->transfers, struct spi_transfer, transfer_list); get_transfer_length(dd); spi_ioc = msm_spi_set_spi_io_control(dd); if (dd->qup_ver || (dd->multi_xfr && !dd->read_len && !dd->write_len)) { if (dd->qup_ver) write_force_cs(dd, 0); /* * Handling of multi-transfers. * FIFO mode is used by default */ list_for_each_entry(dd->cur_transfer, &dd->cur_msg->transfers, transfer_list) { if (!dd->cur_transfer->len) goto error; if (xfrs_grped) { xfrs_grped--; continue; } else { dd->read_len = dd->write_len = 0; xfrs_grped = combine_transfers(dd); dd->num_xfrs_grped = xfrs_grped; if (dd->qup_ver) write_force_cs(dd, 1); } dd->cur_tx_transfer = dd->cur_transfer; dd->cur_rx_transfer = dd->cur_transfer; msm_spi_process_transfer(dd); if (dd->qup_ver && dd->cur_transfer->cs_change) write_force_cs(dd, 0); xfrs_grped--; } } else { /* Handling of a single transfer or * WR-WR or WR-RD transfers */ if ((!dd->cur_msg->is_dma_mapped) && (msm_spi_use_dma(dd, dd->cur_transfer, dd->cur_transfer->bits_per_word))) { /* Mapping of DMA buffers */ int ret = msm_spi_dma_map_buffers(dd); if (ret < 0) { dd->cur_msg->status = ret; goto error; } } dd->cur_tx_transfer = dd->cur_transfer; dd->cur_rx_transfer = dd->cur_transfer; dd->num_xfrs_grped = 1; msm_spi_process_transfer(dd); } if (dd->qup_ver) write_force_cs(dd, 0); return; error: msm_spi_free_cs_gpio(dd); } static void reset_core(struct msm_spi *dd) { u32 spi_ioc; msm_spi_register_init(dd); /* * The SPI core generates a bogus input overrun error on some targets, * when a transition from run to reset state occurs and if the FIFO has * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN * bit. */ msm_spi_enable_error_flags(dd); spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); spi_ioc |= SPI_IO_C_NO_TRI_STATE; writel_relaxed(spi_ioc , dd->base + SPI_IO_CONTROL); /* * Ensure that the IO control is written to before returning. */ mb(); msm_spi_set_state(dd, SPI_OP_STATE_RESET); } static void put_local_resources(struct msm_spi *dd) { if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) { dev_err(dd->dev, "%s: error clk put\n", __func__); return; } msm_spi_disable_irqs(dd); clk_disable_unprepare(dd->clk); clk_disable_unprepare(dd->pclk); /* Free the spi clk, miso, mosi, cs gpio */ if (dd->pdata && dd->pdata->gpio_release) dd->pdata->gpio_release(); msm_spi_free_gpios(dd); } static int get_local_resources(struct msm_spi *dd) { int ret = -EINVAL; if (IS_ERR_OR_NULL(dd->clk) || IS_ERR_OR_NULL(dd->pclk)) { dev_err(dd->dev, "%s: error clk put\n", __func__); return ret; } /* Configure the spi clk, miso, mosi and cs gpio */ if (dd->pdata->gpio_config) { ret = dd->pdata->gpio_config(); if (ret) { dev_err(dd->dev, "%s: error configuring GPIOs\n", __func__); return ret; } } ret = msm_spi_request_gpios(dd); if (ret) return ret; ret = clk_prepare_enable(dd->clk); if (ret) goto clk0_err; ret = clk_prepare_enable(dd->pclk); if (ret) goto clk1_err; msm_spi_enable_irqs(dd); return 0; clk1_err: clk_disable_unprepare(dd->clk); clk0_err: msm_spi_free_gpios(dd); return ret; } /** * msm_spi_transfer_one_message: To process one spi message at a time * @master: spi master controller reference * @msg: one multi-segment SPI transaction * @return zero on success or negative error value * */ static int msm_spi_transfer_one_message(struct spi_master *master, struct spi_message *msg) { struct msm_spi *dd; struct spi_transfer *tr; unsigned long flags; u32 status_error = 0; dd = spi_master_get_devdata(master); if (list_empty(&msg->transfers) || !msg->complete) return -EINVAL; list_for_each_entry(tr, &msg->transfers, transfer_list) { /* Check message parameters */ if (tr->speed_hz > dd->pdata->max_clock_speed || (tr->bits_per_word && (tr->bits_per_word < 4 || tr->bits_per_word > 32)) || (tr->tx_buf == NULL && tr->rx_buf == NULL)) { dev_err(dd->dev, "Invalid transfer: %d Hz, %d bpw tx=%p, rx=%p\n", tr->speed_hz, tr->bits_per_word, tr->tx_buf, tr->rx_buf); status_error = -EINVAL; msg->status = status_error; spi_finalize_current_message(master); return 0; } } mutex_lock(&dd->core_lock); spin_lock_irqsave(&dd->queue_lock, flags); dd->transfer_pending = 1; dd->cur_msg = msg; spin_unlock_irqrestore(&dd->queue_lock, flags); /* * get local resources for each transfer to ensure we're in a good * state and not interfering with other EE's using this device */ if (dd->pdata->is_shared) { if (get_local_resources(dd)) { mutex_unlock(&dd->core_lock); spi_finalize_current_message(master); return -EINVAL; } reset_core(dd); if (dd->use_dma) { msm_spi_bam_pipe_connect(dd, &dd->bam.prod, &dd->bam.prod.config); msm_spi_bam_pipe_connect(dd, &dd->bam.cons, &dd->bam.cons.config); } } if (dd->suspended || !msm_spi_is_valid_state(dd)) { dev_err(dd->dev, "%s: SPI operational state not valid\n", __func__); status_error = 1; } if (status_error) dd->cur_msg->status = -EIO; else msm_spi_process_message(dd); spin_lock_irqsave(&dd->queue_lock, flags); dd->transfer_pending = 0; spin_unlock_irqrestore(&dd->queue_lock, flags); /* * Put local resources prior to calling finalize to ensure the hw * is in a known state before notifying the calling thread (which is a * different context since we're running in the spi kthread here) to * prevent race conditions between us and any other EE's using this hw. */ if (dd->pdata->is_shared) { if (dd->use_dma) { msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod); msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons); } put_local_resources(dd); } mutex_unlock(&dd->core_lock); if (dd->suspended) wake_up_interruptible(&dd->continue_suspend); status_error = dd->cur_msg->status; spi_finalize_current_message(master); return status_error; } static int msm_spi_prepare_transfer_hardware(struct spi_master *master) { struct msm_spi *dd = spi_master_get_devdata(master); int resume_state = 0; resume_state = pm_runtime_get_sync(dd->dev); if (resume_state < 0) goto spi_finalize; /* * Counter-part of system-suspend when runtime-pm is not enabled. * This way, resume can be left empty and device will be put in * active mode only if client requests anything on the bus */ if (!pm_runtime_enabled(dd->dev)) resume_state = msm_spi_pm_resume_runtime(dd->dev); if (resume_state < 0) goto spi_finalize; if (dd->suspended) { resume_state = -EBUSY; goto spi_finalize; } return 0; spi_finalize: spi_finalize_current_message(master); return resume_state; } static int msm_spi_unprepare_transfer_hardware(struct spi_master *master) { struct msm_spi *dd = spi_master_get_devdata(master); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); return 0; } static int msm_spi_setup(struct spi_device *spi) { struct msm_spi *dd; int rc = 0; u32 spi_ioc; u32 spi_config; u32 mask; if (spi->bits_per_word < 4 || spi->bits_per_word > 32) { dev_err(&spi->dev, "%s: invalid bits_per_word %d\n", __func__, spi->bits_per_word); return -EINVAL; } if (spi->chip_select > SPI_NUM_CHIPSELECTS-1) { dev_err(&spi->dev, "%s, chip select %d exceeds max value %d\n", __func__, spi->chip_select, SPI_NUM_CHIPSELECTS - 1); return -EINVAL; } dd = spi_master_get_devdata(spi->master); rc = pm_runtime_get_sync(dd->dev); if (rc < 0 && !dd->is_init_complete && pm_runtime_enabled(dd->dev)) { pm_runtime_set_suspended(dd->dev); pm_runtime_put_sync(dd->dev); rc = 0; goto err_setup_exit; } else rc = 0; mutex_lock(&dd->core_lock); /* Counter-part of system-suspend when runtime-pm is not enabled. */ if (!pm_runtime_enabled(dd->dev)) { rc = msm_spi_pm_resume_runtime(dd->dev); if (rc < 0 && !dd->is_init_complete) { rc = 0; mutex_unlock(&dd->core_lock); goto err_setup_exit; } } if (dd->suspended) { rc = -EBUSY; mutex_unlock(&dd->core_lock); goto err_setup_exit; } if (dd->pdata->is_shared) { rc = get_local_resources(dd); if (rc) goto no_resources; } spi_ioc = readl_relaxed(dd->base + SPI_IO_CONTROL); mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select; if (spi->mode & SPI_CS_HIGH) spi_ioc |= mask; else spi_ioc &= ~mask; spi_ioc = msm_spi_calc_spi_ioc_clk_polarity(spi_ioc, spi->mode); writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL); spi_config = readl_relaxed(dd->base + SPI_CONFIG); spi_config = msm_spi_calc_spi_config_loopback_and_input_first( spi_config, spi->mode); writel_relaxed(spi_config, dd->base + SPI_CONFIG); /* Ensure previous write completed before disabling the clocks */ mb(); if (dd->pdata->is_shared) put_local_resources(dd); /* Counter-part of system-resume when runtime-pm is not enabled. */ if (!pm_runtime_enabled(dd->dev)) msm_spi_pm_suspend_runtime(dd->dev); no_resources: mutex_unlock(&dd->core_lock); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); err_setup_exit: return rc; } #ifdef CONFIG_DEBUG_FS static int debugfs_iomem_x32_set(void *data, u64 val) { struct msm_spi_regs *debugfs_spi_regs = (struct msm_spi_regs *)data; struct msm_spi *dd = debugfs_spi_regs->dd; int ret; ret = pm_runtime_get_sync(dd->dev); if (ret < 0) return ret; writel_relaxed(val, (dd->base + debugfs_spi_regs->offset)); /* Ensure the previous write completed. */ mb(); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); return 0; } static int debugfs_iomem_x32_get(void *data, u64 *val) { struct msm_spi_regs *debugfs_spi_regs = (struct msm_spi_regs *)data; struct msm_spi *dd = debugfs_spi_regs->dd; int ret; ret = pm_runtime_get_sync(dd->dev); if (ret < 0) return ret; *val = readl_relaxed(dd->base + debugfs_spi_regs->offset); /* Ensure the previous read completed. */ mb(); pm_runtime_mark_last_busy(dd->dev); pm_runtime_put_autosuspend(dd->dev); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get, debugfs_iomem_x32_set, "0x%08llx\n"); static void spi_debugfs_init(struct msm_spi *dd) { dd->dent_spi = debugfs_create_dir(dev_name(dd->dev), NULL); if (dd->dent_spi) { int i; for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) { debugfs_spi_regs[i].dd = dd; dd->debugfs_spi_regs[i] = debugfs_create_file( debugfs_spi_regs[i].name, debugfs_spi_regs[i].mode, dd->dent_spi, debugfs_spi_regs+i, &fops_iomem_x32); } } } static void spi_debugfs_exit(struct msm_spi *dd) { if (dd->dent_spi) { int i; debugfs_remove_recursive(dd->dent_spi); dd->dent_spi = NULL; for (i = 0; i < ARRAY_SIZE(debugfs_spi_regs); i++) dd->debugfs_spi_regs[i] = NULL; } } #else static void spi_debugfs_init(struct msm_spi *dd) {} static void spi_debugfs_exit(struct msm_spi *dd) {} #endif /* ===Device attributes begin=== */ static ssize_t show_stats(struct device *dev, struct device_attribute *attr, char *buf) { struct spi_master *master = dev_get_drvdata(dev); struct msm_spi *dd = spi_master_get_devdata(master); return snprintf(buf, PAGE_SIZE, "Device %s\n" "rx fifo_size = %d spi words\n" "tx fifo_size = %d spi words\n" "use_dma ? %s\n" "rx block size = %d bytes\n" "tx block size = %d bytes\n" "input burst size = %d bytes\n" "output burst size = %d bytes\n" "DMA configuration:\n" "tx_ch=%d, rx_ch=%d, tx_crci= %d, rx_crci=%d\n" "--statistics--\n" "Rx isrs = %d\n" "Tx isrs = %d\n" "--debug--\n" "NA yet\n", dev_name(dev), dd->input_fifo_size, dd->output_fifo_size, dd->use_dma ? "yes" : "no", dd->input_block_size, dd->output_block_size, dd->input_burst_size, dd->output_burst_size, dd->tx_dma_chan, dd->rx_dma_chan, dd->tx_dma_crci, dd->rx_dma_crci, dd->stat_rx, dd->stat_tx ); } /* Reset statistics on write */ static ssize_t set_stats(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct msm_spi *dd = dev_get_drvdata(dev); dd->stat_rx = 0; dd->stat_tx = 0; return count; } static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats); static struct attribute *dev_attrs[] = { &dev_attr_stats.attr, NULL, }; static struct attribute_group dev_attr_grp = { .attrs = dev_attrs, }; /* ===Device attributes end=== */ static void msm_spi_bam_pipe_teardown(struct msm_spi *dd, enum msm_spi_pipe_direction pipe_dir) { struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? (&dd->bam.prod) : (&dd->bam.cons); if (!pipe->teardown_required) return; msm_spi_bam_pipe_disconnect(dd, pipe); dma_free_coherent(dd->dev, pipe->config.desc.size, pipe->config.desc.base, pipe->config.desc.phys_base); sps_free_endpoint(pipe->handle); pipe->handle = 0; pipe->teardown_required = false; } static int msm_spi_bam_pipe_init(struct msm_spi *dd, enum msm_spi_pipe_direction pipe_dir) { int rc = 0; struct sps_pipe *pipe_handle; struct msm_spi_bam_pipe *pipe = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? (&dd->bam.prod) : (&dd->bam.cons); struct sps_connect *pipe_conf = &pipe->config; pipe->name = (pipe_dir == SPI_BAM_CONSUMER_PIPE) ? "cons" : "prod"; pipe->handle = 0; pipe_handle = sps_alloc_endpoint(); if (!pipe_handle) { dev_err(dd->dev, "%s: Failed to allocate BAM endpoint\n" , __func__); return -ENOMEM; } memset(pipe_conf, 0, sizeof(*pipe_conf)); rc = sps_get_config(pipe_handle, pipe_conf); if (rc) { dev_err(dd->dev, "%s: Failed to get BAM pipe config\n" , __func__); goto config_err; } if (pipe_dir == SPI_BAM_CONSUMER_PIPE) { pipe_conf->source = dd->bam.handle; pipe_conf->destination = SPS_DEV_HANDLE_MEM; pipe_conf->mode = SPS_MODE_SRC; pipe_conf->src_pipe_index = dd->pdata->bam_producer_pipe_index; pipe_conf->dest_pipe_index = 0; } else { pipe_conf->source = SPS_DEV_HANDLE_MEM; pipe_conf->destination = dd->bam.handle; pipe_conf->mode = SPS_MODE_DEST; pipe_conf->src_pipe_index = 0; pipe_conf->dest_pipe_index = dd->pdata->bam_consumer_pipe_index; } pipe_conf->options = SPS_O_EOT | SPS_O_AUTO_ENABLE; pipe_conf->desc.size = SPI_BAM_MAX_DESC_NUM * sizeof(struct sps_iovec); pipe_conf->desc.base = dma_alloc_coherent(dd->dev, pipe_conf->desc.size, &pipe_conf->desc.phys_base, GFP_KERNEL); if (!pipe_conf->desc.base) { dev_err(dd->dev, "%s: Failed allocate BAM pipe memory" , __func__); rc = -ENOMEM; goto config_err; } /* zero descriptor FIFO for convenient debugging of first descs */ memset(pipe_conf->desc.base, 0x00, pipe_conf->desc.size); pipe->handle = pipe_handle; return 0; config_err: sps_free_endpoint(pipe_handle); return rc; } static void msm_spi_bam_teardown(struct msm_spi *dd) { msm_spi_bam_pipe_teardown(dd, SPI_BAM_PRODUCER_PIPE); msm_spi_bam_pipe_teardown(dd, SPI_BAM_CONSUMER_PIPE); if (dd->bam.deregister_required) { sps_deregister_bam_device(dd->bam.handle); dd->bam.deregister_required = false; } } static int msm_spi_bam_init(struct msm_spi *dd) { struct sps_bam_props bam_props = {0}; uintptr_t bam_handle; int rc = 0; rc = sps_phy2h(dd->bam.phys_addr, &bam_handle); if (rc || !bam_handle) { bam_props.phys_addr = dd->bam.phys_addr; bam_props.virt_addr = dd->bam.base; bam_props.irq = dd->bam.irq; bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE; bam_props.summing_threshold = 0x10; rc = sps_register_bam_device(&bam_props, &bam_handle); if (rc) { dev_err(dd->dev, "%s: Failed to register BAM device", __func__); return rc; } dd->bam.deregister_required = true; } dd->bam.handle = bam_handle; rc = msm_spi_bam_pipe_init(dd, SPI_BAM_PRODUCER_PIPE); if (rc) { dev_err(dd->dev, "%s: Failed to init producer BAM-pipe", __func__); goto bam_init_error; } rc = msm_spi_bam_pipe_init(dd, SPI_BAM_CONSUMER_PIPE); if (rc) { dev_err(dd->dev, "%s: Failed to init consumer BAM-pipe", __func__); goto bam_init_error; } return 0; bam_init_error: msm_spi_bam_teardown(dd); return rc; } enum msm_spi_dt_entry_status { DT_REQ, /* Required: fail if missing */ DT_SGST, /* Suggested: warn if missing */ DT_OPT, /* Optional: don't warn if missing */ }; enum msm_spi_dt_entry_type { DT_U32, DT_GPIO, DT_BOOL, }; struct msm_spi_dt_to_pdata_map { const char *dt_name; void *ptr_data; enum msm_spi_dt_entry_status status; enum msm_spi_dt_entry_type type; int default_val; }; static int msm_spi_dt_to_pdata_populate(struct platform_device *pdev, struct msm_spi_platform_data *pdata, struct msm_spi_dt_to_pdata_map *itr) { int ret, err = 0; struct device_node *node = pdev->dev.of_node; for (; itr->dt_name; ++itr) { switch (itr->type) { case DT_GPIO: ret = of_get_named_gpio(node, itr->dt_name, 0); if (ret >= 0) { *((int *) itr->ptr_data) = ret; ret = 0; } break; case DT_U32: ret = of_property_read_u32(node, itr->dt_name, (u32 *) itr->ptr_data); break; case DT_BOOL: *((bool *) itr->ptr_data) = of_property_read_bool(node, itr->dt_name); ret = 0; break; default: dev_err(&pdev->dev, "%d is an unknown DT entry type\n", itr->type); ret = -EBADE; } dev_dbg(&pdev->dev, "DT entry ret:%d name:%s val:%d\n", ret, itr->dt_name, *((int *)itr->ptr_data)); if (ret) { *((int *)itr->ptr_data) = itr->default_val; if (itr->status < DT_OPT) { dev_err(&pdev->dev, "Missing '%s' DT entry\n", itr->dt_name); /* cont on err to dump all missing entries */ if (itr->status == DT_REQ && !err) err = ret; } } } return err; } /** * msm_spi_dt_to_pdata: create pdata and read gpio config from device tree */ struct msm_spi_platform_data *msm_spi_dt_to_pdata( struct platform_device *pdev, struct msm_spi *dd) { struct msm_spi_platform_data *pdata; pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("Unable to allocate platform data\n"); return NULL; } else { struct msm_spi_dt_to_pdata_map map[] = { {"spi-max-frequency", &pdata->max_clock_speed, DT_SGST, DT_U32, 0}, {"qcom,infinite-mode", &pdata->infinite_mode, DT_OPT, DT_U32, 0}, {"qcom,active-only", &pdata->active_only, DT_OPT, DT_BOOL, 0}, {"qcom,master-id", &pdata->master_id, DT_SGST, DT_U32, 0}, {"qcom,ver-reg-exists", &pdata->ver_reg_exists, DT_OPT, DT_BOOL, 0}, {"qcom,use-bam", &pdata->use_bam, DT_OPT, DT_BOOL, 0}, {"qcom,use-pinctrl", &pdata->use_pinctrl, DT_OPT, DT_BOOL, 0}, {"qcom,bam-consumer-pipe-index", &pdata->bam_consumer_pipe_index, DT_OPT, DT_U32, 0}, {"qcom,bam-producer-pipe-index", &pdata->bam_producer_pipe_index, DT_OPT, DT_U32, 0}, {"qcom,gpio-clk", &dd->spi_gpios[0], DT_OPT, DT_GPIO, -1}, {"qcom,gpio-miso", &dd->spi_gpios[1], DT_OPT, DT_GPIO, -1}, {"qcom,gpio-mosi", &dd->spi_gpios[2], DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs0", &dd->cs_gpios[0].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs1", &dd->cs_gpios[1].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs2", &dd->cs_gpios[2].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,gpio-cs3", &dd->cs_gpios[3].gpio_num, DT_OPT, DT_GPIO, -1}, {"qcom,rt-priority", &pdata->rt_priority, DT_OPT, DT_BOOL, 0}, {"qcom,shared", &pdata->is_shared, DT_OPT, DT_BOOL, 0}, {NULL, NULL, 0, 0, 0}, }; if (msm_spi_dt_to_pdata_populate(pdev, pdata, map)) { devm_kfree(&pdev->dev, pdata); return NULL; } } if (pdata->use_bam) { if (!pdata->bam_consumer_pipe_index) { dev_warn(&pdev->dev, "missing qcom,bam-consumer-pipe-index entry in device-tree\n"); pdata->use_bam = false; } if (!pdata->bam_producer_pipe_index) { dev_warn(&pdev->dev, "missing qcom,bam-producer-pipe-index entry in device-tree\n"); pdata->use_bam = false; } } return pdata; } static int msm_spi_get_qup_hw_ver(struct device *dev, struct msm_spi *dd) { u32 data = readl_relaxed(dd->base + QUP_HARDWARE_VER); return (data >= QUP_HARDWARE_VER_2_1_1) ? SPI_QUP_VERSION_BFAM : SPI_QUP_VERSION_NONE; } static int msm_spi_bam_get_resources(struct msm_spi *dd, struct platform_device *pdev, struct spi_master *master) { struct resource *resource; size_t bam_mem_size; resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spi_bam_physical"); if (!resource) { dev_warn(&pdev->dev, "%s: Missing spi_bam_physical entry in DT", __func__); return -ENXIO; } dd->bam.phys_addr = resource->start; bam_mem_size = resource_size(resource); dd->bam.base = devm_ioremap(&pdev->dev, dd->bam.phys_addr, bam_mem_size); if (!dd->bam.base) { dev_warn(&pdev->dev, "%s: Failed to ioremap(spi_bam_physical)", __func__); return -ENXIO; } dd->bam.irq = platform_get_irq_byname(pdev, "spi_bam_irq"); if (dd->bam.irq < 0) { dev_warn(&pdev->dev, "%s: Missing spi_bam_irq entry in DT", __func__); return -EINVAL; } dd->dma_init = msm_spi_bam_init; dd->dma_teardown = msm_spi_bam_teardown; return 0; } static int init_resources(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; int rc = -ENXIO; int clk_enabled = 0; int pclk_enabled = 0; dd = spi_master_get_devdata(master); if (dd->pdata && dd->pdata->use_pinctrl) { rc = msm_spi_pinctrl_init(dd); if (rc) { dev_err(&pdev->dev, "%s: pinctrl init failed\n", __func__); return rc; } } mutex_lock(&dd->core_lock); dd->clk = clk_get(&pdev->dev, "core_clk"); if (IS_ERR(dd->clk)) { dev_err(&pdev->dev, "%s: unable to get core_clk\n", __func__); rc = PTR_ERR(dd->clk); goto err_clk_get; } dd->pclk = clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(dd->pclk)) { dev_err(&pdev->dev, "%s: unable to get iface_clk\n", __func__); rc = PTR_ERR(dd->pclk); goto err_pclk_get; } if (dd->pdata && dd->pdata->max_clock_speed) msm_spi_clock_set(dd, dd->pdata->max_clock_speed); rc = clk_prepare_enable(dd->clk); if (rc) { dev_err(&pdev->dev, "%s: unable to enable core_clk\n", __func__); goto err_clk_enable; } clk_enabled = 1; rc = clk_prepare_enable(dd->pclk); if (rc) { dev_err(&pdev->dev, "%s: unable to enable iface_clk\n", __func__); goto err_pclk_enable; } pclk_enabled = 1; if (dd->pdata && dd->pdata->ver_reg_exists) { enum msm_spi_qup_version ver = msm_spi_get_qup_hw_ver(&pdev->dev, dd); if (dd->qup_ver != ver) dev_warn(&pdev->dev, "%s: HW version different then initially assumed by probe", __func__); } /* GSBI dose not exists on B-family MSM-chips */ if (dd->qup_ver != SPI_QUP_VERSION_BFAM) { rc = msm_spi_configure_gsbi(dd, pdev); if (rc) goto err_config_gsbi; } msm_spi_calculate_fifo_size(dd); if (dd->use_dma) { rc = dd->dma_init(dd); if (rc) { dev_err(&pdev->dev, "%s: failed to init DMA. Disabling DMA mode\n", __func__); dd->use_dma = 0; } } msm_spi_register_init(dd); /* * The SPI core generates a bogus input overrun error on some targets, * when a transition from run to reset state occurs and if the FIFO has * an odd number of entries. Hence we disable the INPUT_OVER_RUN_ERR_EN * bit. */ msm_spi_enable_error_flags(dd); writel_relaxed(SPI_IO_C_NO_TRI_STATE, dd->base + SPI_IO_CONTROL); rc = msm_spi_set_state(dd, SPI_OP_STATE_RESET); if (rc) goto err_spi_state; clk_disable_unprepare(dd->clk); clk_disable_unprepare(dd->pclk); clk_enabled = 0; pclk_enabled = 0; dd->transfer_pending = 0; dd->multi_xfr = 0; dd->mode = SPI_MODE_NONE; rc = msm_spi_request_irq(dd, pdev, master); if (rc) goto err_irq; msm_spi_disable_irqs(dd); mutex_unlock(&dd->core_lock); return 0; err_irq: err_spi_state: if (dd->use_dma && dd->dma_teardown) dd->dma_teardown(dd); err_config_gsbi: if (pclk_enabled) clk_disable_unprepare(dd->pclk); err_pclk_enable: if (clk_enabled) clk_disable_unprepare(dd->clk); err_clk_enable: clk_put(dd->pclk); err_pclk_get: clk_put(dd->clk); err_clk_get: mutex_unlock(&dd->core_lock); return rc; } static int msm_spi_probe(struct platform_device *pdev) { struct spi_master *master; struct msm_spi *dd; struct resource *resource; int i = 0; int rc = -ENXIO; struct msm_spi_platform_data *pdata; master = spi_alloc_master(&pdev->dev, sizeof(struct msm_spi)); if (!master) { rc = -ENOMEM; dev_err(&pdev->dev, "master allocation failed\n"); goto err_probe_exit; } master->bus_num = pdev->id; master->mode_bits = SPI_SUPPORTED_MODES; master->num_chipselect = SPI_NUM_CHIPSELECTS; master->setup = msm_spi_setup; master->prepare_transfer_hardware = msm_spi_prepare_transfer_hardware; master->transfer_one_message = msm_spi_transfer_one_message; master->unprepare_transfer_hardware = msm_spi_unprepare_transfer_hardware; platform_set_drvdata(pdev, master); dd = spi_master_get_devdata(master); if (pdev->dev.of_node) { dd->qup_ver = SPI_QUP_VERSION_BFAM; master->dev.of_node = pdev->dev.of_node; pdata = msm_spi_dt_to_pdata(pdev, dd); if (!pdata) { rc = -ENOMEM; goto err_probe_exit; } rc = of_alias_get_id(pdev->dev.of_node, "spi"); if (rc < 0) dev_warn(&pdev->dev, "using default bus_num %d\n", pdev->id); else master->bus_num = pdev->id = rc; } else { pdata = pdev->dev.platform_data; dd->qup_ver = SPI_QUP_VERSION_NONE; for (i = 0; i < ARRAY_SIZE(spi_rsrcs); ++i) { resource = platform_get_resource(pdev, IORESOURCE_IO, i); dd->spi_gpios[i] = resource ? resource->start : -1; } for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) { resource = platform_get_resource(pdev, IORESOURCE_IO, i + ARRAY_SIZE(spi_rsrcs)); dd->cs_gpios[i].gpio_num = resource ? resource->start : -1; } } for (i = 0; i < ARRAY_SIZE(spi_cs_rsrcs); ++i) dd->cs_gpios[i].valid = 0; dd->pdata = pdata; resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!resource) { rc = -ENXIO; goto err_probe_res; } dd->mem_phys_addr = resource->start; dd->mem_size = resource_size(resource); dd->dev = &pdev->dev; if (pdata) { master->rt = pdata->rt_priority; if (pdata->dma_config) { rc = pdata->dma_config(); if (rc) { dev_warn(&pdev->dev, "%s: DM mode not supported\n", __func__); dd->use_dma = 0; goto skip_dma_resources; } } if (!dd->pdata->use_bam) goto skip_dma_resources; rc = msm_spi_bam_get_resources(dd, pdev, master); if (rc) { dev_warn(dd->dev, "%s: Faild to get BAM resources", __func__); goto skip_dma_resources; } dd->use_dma = 1; } skip_dma_resources: spin_lock_init(&dd->queue_lock); mutex_init(&dd->core_lock); init_waitqueue_head(&dd->continue_suspend); if (!devm_request_mem_region(&pdev->dev, dd->mem_phys_addr, dd->mem_size, SPI_DRV_NAME)) { rc = -ENXIO; goto err_probe_reqmem; } dd->base = devm_ioremap(&pdev->dev, dd->mem_phys_addr, dd->mem_size); if (!dd->base) { rc = -ENOMEM; goto err_probe_reqmem; } pm_runtime_set_autosuspend_delay(&pdev->dev, MSEC_PER_SEC); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); dd->suspended = 1; rc = spi_register_master(master); if (rc) goto err_probe_reg_master; rc = sysfs_create_group(&(dd->dev->kobj), &dev_attr_grp); if (rc) { dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc); goto err_attrs; } spi_debugfs_init(dd); return 0; err_attrs: spi_unregister_master(master); err_probe_reg_master: pm_runtime_disable(&pdev->dev); err_probe_reqmem: err_probe_res: spi_master_put(master); err_probe_exit: return rc; } static int msm_spi_pm_suspend_runtime(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; unsigned long flags; dev_dbg(device, "pm_runtime: suspending...\n"); if (!master) goto suspend_exit; dd = spi_master_get_devdata(master); if (!dd) goto suspend_exit; if (dd->suspended) return 0; /* * Make sure nothing is added to the queue while we're * suspending */ spin_lock_irqsave(&dd->queue_lock, flags); dd->suspended = 1; spin_unlock_irqrestore(&dd->queue_lock, flags); /* Wait for transactions to end, or time out */ wait_event_interruptible(dd->continue_suspend, !dd->transfer_pending); if (dd->pdata && !dd->pdata->is_shared && dd->use_dma) { msm_spi_bam_pipe_disconnect(dd, &dd->bam.prod); msm_spi_bam_pipe_disconnect(dd, &dd->bam.cons); } if (dd->pdata && !dd->pdata->is_shared) put_local_resources(dd); if (dd->pdata && !dd->pdata->active_only) msm_spi_clk_path_unvote(dd); suspend_exit: return 0; } static int msm_spi_pm_resume_runtime(struct device *device) { struct platform_device *pdev = to_platform_device(device); struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; int ret = 0; dev_dbg(device, "pm_runtime: resuming...\n"); if (!master) goto resume_exit; dd = spi_master_get_devdata(master); if (!dd) goto resume_exit; if (!dd->suspended) return 0; if (!dd->is_init_complete) { ret = init_resources(pdev); if (ret != 0) return ret; else dd->is_init_complete = true; } msm_spi_clk_path_init(dd); if (!dd->pdata->active_only) msm_spi_clk_path_vote(dd); if (!dd->pdata->is_shared) { ret = get_local_resources(dd); if (ret) return ret; } if (!dd->pdata->is_shared && dd->use_dma) { msm_spi_bam_pipe_connect(dd, &dd->bam.prod, &dd->bam.prod.config); msm_spi_bam_pipe_connect(dd, &dd->bam.cons, &dd->bam.cons.config); } dd->suspended = 0; resume_exit: return 0; } #ifdef CONFIG_PM_SLEEP static int msm_spi_suspend(struct device *device) { if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) { struct platform_device *pdev = to_platform_device(device); struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd; dev_dbg(device, "system suspend"); if (!master) goto suspend_exit; dd = spi_master_get_devdata(master); if (!dd) goto suspend_exit; msm_spi_pm_suspend_runtime(device); /* * set the device's runtime PM status to 'suspended' */ pm_runtime_disable(device); pm_runtime_set_suspended(device); pm_runtime_enable(device); } suspend_exit: return 0; } static int msm_spi_resume(struct device *device) { /* * Rely on runtime-PM to call resume in case it is enabled * Even if it's not enabled, rely on 1st client transaction to do * clock ON and gpio configuration */ dev_dbg(device, "system resume"); return 0; } #else #define msm_spi_suspend NULL #define msm_spi_resume NULL #endif static int msm_spi_remove(struct platform_device *pdev) { struct spi_master *master = platform_get_drvdata(pdev); struct msm_spi *dd = spi_master_get_devdata(master); spi_debugfs_exit(dd); sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp); if (dd->dma_teardown) dd->dma_teardown(dd); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); clk_put(dd->clk); clk_put(dd->pclk); msm_spi_clk_path_teardown(dd); platform_set_drvdata(pdev, 0); spi_unregister_master(master); spi_master_put(master); return 0; } static struct of_device_id msm_spi_dt_match[] = { { .compatible = "qcom,spi-qup-v2", }, {} }; static const struct dev_pm_ops msm_spi_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(msm_spi_suspend, msm_spi_resume) SET_RUNTIME_PM_OPS(msm_spi_pm_suspend_runtime, msm_spi_pm_resume_runtime, NULL) }; static struct platform_driver msm_spi_driver = { .driver = { .name = SPI_DRV_NAME, .owner = THIS_MODULE, .pm = &msm_spi_dev_pm_ops, .of_match_table = msm_spi_dt_match, }, .remove = msm_spi_remove, .probe = msm_spi_probe, }; static int __init msm_spi_init(void) { return platform_driver_register(&msm_spi_driver); } module_init(msm_spi_init); static void __exit msm_spi_exit(void) { platform_driver_unregister(&msm_spi_driver); } module_exit(msm_spi_exit); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.4"); MODULE_ALIAS("platform:"SPI_DRV_NAME);
gpl-2.0
wanghao-xznu/linux-2.6.32.9-hiveboard_130326
drivers/watchdog/rc32434_wdt.c
552
8763
/* * IDT Interprise 79RC32434 watchdog driver * * Copyright (C) 2006, Ondrej Zajicek <santiago@crfreenet.org> * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org> * * based on * SoftDog 0.05: A Software Watchdog Device * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> /* For module specific items */ #include <linux/moduleparam.h> /* For new moduleparam's */ #include <linux/types.h> /* For standard types (like size_t) */ #include <linux/errno.h> /* For the -ENODEV/... values */ #include <linux/kernel.h> /* For printk/panic/... */ #include <linux/fs.h> /* For file operations */ #include <linux/miscdevice.h> /* For MODULE_ALIAS_MISCDEV (WATCHDOG_MINOR) */ #include <linux/watchdog.h> /* For the watchdog specific items */ #include <linux/init.h> /* For __init/__exit/... */ #include <linux/platform_device.h> /* For platform_driver framework */ #include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ #include <linux/uaccess.h> /* For copy_to_user/put_user/... */ #include <asm/mach-rc32434/integ.h> /* For the Watchdog registers */ #define PFX KBUILD_MODNAME ": " #define VERSION "1.0" static struct { unsigned long inuse; spinlock_t io_lock; } rc32434_wdt_device; static struct integ __iomem *wdt_reg; static int expect_close; /* Board internal clock speed in Hz, * the watchdog timer ticks at. */ extern unsigned int idt_cpu_freq; /* translate wtcompare value to seconds and vice versa */ #define WTCOMP2SEC(x) (x / idt_cpu_freq) #define SEC2WTCOMP(x) (x * idt_cpu_freq) /* Use a default timeout of 20s. This should be * safe for CPU clock speeds up to 400MHz, as * ((2 ^ 32) - 1) / (400MHz / 2) = 21s. */ #define WATCHDOG_TIMEOUT 20 static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout value, in seconds (default=" __MODULE_STRING(WATCHDOG_TIMEOUT) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* apply or and nand masks to data read from addr and write back */ #define SET_BITS(addr, or, nand) \ writel((readl(&addr) | or) & ~nand, &addr) static int rc32434_wdt_set(int new_timeout) { int max_to = WTCOMP2SEC((u32)-1); if (new_timeout < 0 || new_timeout > max_to) { printk(KERN_ERR PFX "timeout value must be between 0 and %d", max_to); return -EINVAL; } timeout = new_timeout; spin_lock(&rc32434_wdt_device.io_lock); writel(SEC2WTCOMP(timeout), &wdt_reg->wtcompare); spin_unlock(&rc32434_wdt_device.io_lock); return 0; } static void rc32434_wdt_start(void) { u32 or, nand; spin_lock(&rc32434_wdt_device.io_lock); /* zero the counter before enabling */ writel(0, &wdt_reg->wtcount); /* don't generate a non-maskable interrupt, * do a warm reset instead */ nand = 1 << RC32434_ERR_WNE; or = 1 << RC32434_ERR_WRE; /* reset the ERRCS timeout bit in case it's set */ nand |= 1 << RC32434_ERR_WTO; SET_BITS(wdt_reg->errcs, or, nand); /* set the timeout (either default or based on module param) */ rc32434_wdt_set(timeout); /* reset WTC timeout bit and enable WDT */ nand = 1 << RC32434_WTC_TO; or = 1 << RC32434_WTC_EN; SET_BITS(wdt_reg->wtc, or, nand); spin_unlock(&rc32434_wdt_device.io_lock); printk(KERN_INFO PFX "Started watchdog timer.\n"); } static void rc32434_wdt_stop(void) { spin_lock(&rc32434_wdt_device.io_lock); /* Disable WDT */ SET_BITS(wdt_reg->wtc, 0, 1 << RC32434_WTC_EN); spin_unlock(&rc32434_wdt_device.io_lock); printk(KERN_INFO PFX "Stopped watchdog timer.\n"); } static void rc32434_wdt_ping(void) { spin_lock(&rc32434_wdt_device.io_lock); writel(0, &wdt_reg->wtcount); spin_unlock(&rc32434_wdt_device.io_lock); } static int rc32434_wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &rc32434_wdt_device.inuse)) return -EBUSY; if (nowayout) __module_get(THIS_MODULE); rc32434_wdt_start(); rc32434_wdt_ping(); return nonseekable_open(inode, file); } static int rc32434_wdt_release(struct inode *inode, struct file *file) { if (expect_close == 42) { rc32434_wdt_stop(); module_put(THIS_MODULE); } else { printk(KERN_CRIT PFX "device closed unexpectedly. WDT will not stop!\n"); rc32434_wdt_ping(); } clear_bit(0, &rc32434_wdt_device.inuse); return 0; } static ssize_t rc32434_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } rc32434_wdt_ping(); return len; } return 0; } static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int new_timeout; unsigned int value; static struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "RC32434_WDT Watchdog", }; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user(argp, &ident, sizeof(ident))) return -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: value = 0; if (copy_to_user(argp, &value, sizeof(int))) return -EFAULT; break; case WDIOC_SETOPTIONS: if (copy_from_user(&value, argp, sizeof(int))) return -EFAULT; switch (value) { case WDIOS_ENABLECARD: rc32434_wdt_start(); break; case WDIOS_DISABLECARD: rc32434_wdt_stop(); break; default: return -EINVAL; } break; case WDIOC_KEEPALIVE: rc32434_wdt_ping(); break; case WDIOC_SETTIMEOUT: if (copy_from_user(&new_timeout, argp, sizeof(int))) return -EFAULT; if (rc32434_wdt_set(new_timeout)) return -EINVAL; /* Fall through */ case WDIOC_GETTIMEOUT: return copy_to_user(argp, &timeout, sizeof(int)); default: return -ENOTTY; } return 0; } static const struct file_operations rc32434_wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = rc32434_wdt_write, .unlocked_ioctl = rc32434_wdt_ioctl, .open = rc32434_wdt_open, .release = rc32434_wdt_release, }; static struct miscdevice rc32434_wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &rc32434_wdt_fops, }; static char banner[] __devinitdata = KERN_INFO PFX "Watchdog Timer version " VERSION ", timer margin: %d sec\n"; static int __devinit rc32434_wdt_probe(struct platform_device *pdev) { int ret; struct resource *r; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rb532_wdt_res"); if (!r) { printk(KERN_ERR PFX "failed to retrieve resources\n"); return -ENODEV; } wdt_reg = ioremap_nocache(r->start, resource_size(r)); if (!wdt_reg) { printk(KERN_ERR PFX "failed to remap I/O resources\n"); return -ENXIO; } spin_lock_init(&rc32434_wdt_device.io_lock); /* Make sure the watchdog is not running */ rc32434_wdt_stop(); /* Check that the heartbeat value is within it's range; * if not reset to the default */ if (rc32434_wdt_set(timeout)) { rc32434_wdt_set(WATCHDOG_TIMEOUT); printk(KERN_INFO PFX "timeout value must be between 0 and %d\n", WTCOMP2SEC((u32)-1)); } ret = misc_register(&rc32434_wdt_miscdev); if (ret < 0) { printk(KERN_ERR PFX "failed to register watchdog device\n"); goto unmap; } printk(banner, timeout); return 0; unmap: iounmap(wdt_reg); return ret; } static int __devexit rc32434_wdt_remove(struct platform_device *pdev) { misc_deregister(&rc32434_wdt_miscdev); iounmap(wdt_reg); return 0; } static void rc32434_wdt_shutdown(struct platform_device *pdev) { rc32434_wdt_stop(); } static struct platform_driver rc32434_wdt_driver = { .probe = rc32434_wdt_probe, .remove = __devexit_p(rc32434_wdt_remove), .shutdown = rc32434_wdt_shutdown, .driver = { .name = "rc32434_wdt", } }; static int __init rc32434_wdt_init(void) { return platform_driver_register(&rc32434_wdt_driver); } static void __exit rc32434_wdt_exit(void) { platform_driver_unregister(&rc32434_wdt_driver); } module_init(rc32434_wdt_init); module_exit(rc32434_wdt_exit); MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>," "Florian Fainelli <florian@openwrt.org>"); MODULE_DESCRIPTION("Driver for the IDT RC32434 SoC watchdog"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
blue236/linux-1
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv10.c
552
5835
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "nv04.h" #include <core/client.h> #include <core/engctx.h> #include <core/ramht.h> #include <subdev/instmem/nv04.h> #include <nvif/class.h> #include <nvif/unpack.h> static struct ramfc_desc nv10_ramfc[] = { { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT }, { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET }, { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT }, { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE }, { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT }, { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE }, { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH }, { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE }, { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 }, {} }; /******************************************************************************* * FIFO channel objects ******************************************************************************/ static int nv10_fifo_chan_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { union { struct nv03_channel_dma_v0 v0; } *args = data; struct nv04_fifo_priv *priv = (void *)engine; struct nv04_fifo_chan *chan; int ret; nv_ioctl(parent, "create channel dma size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { nv_ioctl(parent, "create channel dma vers %d pushbuf %08x " "offset %016llx\n", args->v0.version, args->v0.pushbuf, args->v0.offset); } else return ret; ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000, 0x10000, args->v0.pushbuf, (1ULL << NVDEV_ENGINE_DMAOBJ) | (1ULL << NVDEV_ENGINE_SW) | (1ULL << NVDEV_ENGINE_GR), &chan); *pobject = nv_object(chan); if (ret) return ret; args->v0.chid = chan->base.chid; nv_parent(chan)->object_attach = nv04_fifo_object_attach; nv_parent(chan)->object_detach = nv04_fifo_object_detach; nv_parent(chan)->context_attach = nv04_fifo_context_attach; chan->ramfc = chan->base.chid * 32; nv_wo32(priv->ramfc, chan->ramfc + 0x00, args->v0.offset); nv_wo32(priv->ramfc, chan->ramfc + 0x04, args->v0.offset); nv_wo32(priv->ramfc, chan->ramfc + 0x0c, chan->base.pushgpu->addr >> 4); nv_wo32(priv->ramfc, chan->ramfc + 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES | NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES | #ifdef __BIG_ENDIAN NV_PFIFO_CACHE1_BIG_ENDIAN | #endif NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8); return 0; } static struct nvkm_ofuncs nv10_fifo_ofuncs = { .ctor = nv10_fifo_chan_ctor, .dtor = nv04_fifo_chan_dtor, .init = nv04_fifo_chan_init, .fini = nv04_fifo_chan_fini, .map = _nvkm_fifo_channel_map, .rd32 = _nvkm_fifo_channel_rd32, .wr32 = _nvkm_fifo_channel_wr32, .ntfy = _nvkm_fifo_channel_ntfy }; static struct nvkm_oclass nv10_fifo_sclass[] = { { NV10_CHANNEL_DMA, &nv10_fifo_ofuncs }, {} }; /******************************************************************************* * FIFO context - basically just the instmem reserved for the channel ******************************************************************************/ static struct nvkm_oclass nv10_fifo_cclass = { .handle = NV_ENGCTX(FIFO, 0x10), .ofuncs = &(struct nvkm_ofuncs) { .ctor = nv04_fifo_context_ctor, .dtor = _nvkm_fifo_context_dtor, .init = _nvkm_fifo_context_init, .fini = _nvkm_fifo_context_fini, .rd32 = _nvkm_fifo_context_rd32, .wr32 = _nvkm_fifo_context_wr32, }, }; /******************************************************************************* * PFIFO engine ******************************************************************************/ static int nv10_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nv04_instmem_priv *imem = nv04_instmem(parent); struct nv04_fifo_priv *priv; int ret; ret = nvkm_fifo_create(parent, engine, oclass, 0, 31, &priv); *pobject = nv_object(priv); if (ret) return ret; nvkm_ramht_ref(imem->ramht, &priv->ramht); nvkm_gpuobj_ref(imem->ramro, &priv->ramro); nvkm_gpuobj_ref(imem->ramfc, &priv->ramfc); nv_subdev(priv)->unit = 0x00000100; nv_subdev(priv)->intr = nv04_fifo_intr; nv_engine(priv)->cclass = &nv10_fifo_cclass; nv_engine(priv)->sclass = nv10_fifo_sclass; priv->base.pause = nv04_fifo_pause; priv->base.start = nv04_fifo_start; priv->ramfc_desc = nv10_ramfc; return 0; } struct nvkm_oclass * nv10_fifo_oclass = &(struct nvkm_oclass) { .handle = NV_ENGINE(FIFO, 0x10), .ofuncs = &(struct nvkm_ofuncs) { .ctor = nv10_fifo_ctor, .dtor = nv04_fifo_dtor, .init = nv04_fifo_init, .fini = _nvkm_fifo_fini, }, };
gpl-2.0
gauravds/linux
drivers/gpu/drm/nouveau/nvkm/engine/cipher/g84.c
552
5148
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <engine/cipher.h> #include <engine/fifo.h> #include <core/client.h> #include <core/engctx.h> #include <core/enum.h> struct g84_cipher_priv { struct nvkm_engine base; }; /******************************************************************************* * Crypt object classes ******************************************************************************/ static int g84_cipher_object_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nvkm_gpuobj *obj; int ret; ret = nvkm_gpuobj_create(parent, engine, oclass, 0, parent, 16, 16, 0, &obj); *pobject = nv_object(obj); if (ret) return ret; nv_wo32(obj, 0x00, nv_mclass(obj)); nv_wo32(obj, 0x04, 0x00000000); nv_wo32(obj, 0x08, 0x00000000); nv_wo32(obj, 0x0c, 0x00000000); return 0; } static struct nvkm_ofuncs g84_cipher_ofuncs = { .ctor = g84_cipher_object_ctor, .dtor = _nvkm_gpuobj_dtor, .init = _nvkm_gpuobj_init, .fini = _nvkm_gpuobj_fini, .rd32 = _nvkm_gpuobj_rd32, .wr32 = _nvkm_gpuobj_wr32, }; static struct nvkm_oclass g84_cipher_sclass[] = { { 0x74c1, &g84_cipher_ofuncs }, {} }; /******************************************************************************* * PCIPHER context ******************************************************************************/ static struct nvkm_oclass g84_cipher_cclass = { .handle = NV_ENGCTX(CIPHER, 0x84), .ofuncs = &(struct nvkm_ofuncs) { .ctor = _nvkm_engctx_ctor, .dtor = _nvkm_engctx_dtor, .init = _nvkm_engctx_init, .fini = _nvkm_engctx_fini, .rd32 = _nvkm_engctx_rd32, .wr32 = _nvkm_engctx_wr32, }, }; /******************************************************************************* * PCIPHER engine/subdev functions ******************************************************************************/ static const struct nvkm_bitfield g84_cipher_intr_mask[] = { { 0x00000001, "INVALID_STATE" }, { 0x00000002, "ILLEGAL_MTHD" }, { 0x00000004, "ILLEGAL_CLASS" }, { 0x00000080, "QUERY" }, { 0x00000100, "FAULT" }, {} }; static void g84_cipher_intr(struct nvkm_subdev *subdev) { struct nvkm_fifo *pfifo = nvkm_fifo(subdev); struct nvkm_engine *engine = nv_engine(subdev); struct nvkm_object *engctx; struct g84_cipher_priv *priv = (void *)subdev; u32 stat = nv_rd32(priv, 0x102130); u32 mthd = nv_rd32(priv, 0x102190); u32 data = nv_rd32(priv, 0x102194); u32 inst = nv_rd32(priv, 0x102188) & 0x7fffffff; int chid; engctx = nvkm_engctx_get(engine, inst); chid = pfifo->chid(pfifo, engctx); if (stat) { nv_error(priv, "%s", ""); nvkm_bitfield_print(g84_cipher_intr_mask, stat); pr_cont(" ch %d [0x%010llx %s] mthd 0x%04x data 0x%08x\n", chid, (u64)inst << 12, nvkm_client_name(engctx), mthd, data); } nv_wr32(priv, 0x102130, stat); nv_wr32(priv, 0x10200c, 0x10); nvkm_engctx_put(engctx); } static int g84_cipher_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct g84_cipher_priv *priv; int ret; ret = nvkm_engine_create(parent, engine, oclass, true, "PCIPHER", "cipher", &priv); *pobject = nv_object(priv); if (ret) return ret; nv_subdev(priv)->unit = 0x00004000; nv_subdev(priv)->intr = g84_cipher_intr; nv_engine(priv)->cclass = &g84_cipher_cclass; nv_engine(priv)->sclass = g84_cipher_sclass; return 0; } static int g84_cipher_init(struct nvkm_object *object) { struct g84_cipher_priv *priv = (void *)object; int ret; ret = nvkm_engine_init(&priv->base); if (ret) return ret; nv_wr32(priv, 0x102130, 0xffffffff); nv_wr32(priv, 0x102140, 0xffffffbf); nv_wr32(priv, 0x10200c, 0x00000010); return 0; } struct nvkm_oclass g84_cipher_oclass = { .handle = NV_ENGINE(CIPHER, 0x84), .ofuncs = &(struct nvkm_ofuncs) { .ctor = g84_cipher_ctor, .dtor = _nvkm_engine_dtor, .init = g84_cipher_init, .fini = _nvkm_engine_fini, }, };
gpl-2.0
XtheOne/enrc2b-3.1.10-42105bd
drivers/block/cciss.c
552
150376
/* * Disk Array driver for HP Smart Array controllers. * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA. * * Questions/Comments/Bugfixes to iss_storagedev@hp.com * */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/major.h> #include <linux/fs.h> #include <linux/bio.h> #include <linux/blkpg.h> #include <linux/timer.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/jiffies.h> #include <linux/hdreg.h> #include <linux/spinlock.h> #include <linux/compat.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/dma-mapping.h> #include <linux/blkdev.h> #include <linux/genhd.h> #include <linux/completion.h> #include <scsi/scsi.h> #include <scsi/sg.h> #include <scsi/scsi_ioctl.h> #include <linux/cdrom.h> #include <linux/scatterlist.h> #include <linux/kthread.h> #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) #define DRIVER_NAME "HP CISS Driver (v 3.6.26)" #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) /* Embedded module documentation macros - see modules.h */ MODULE_AUTHOR("Hewlett-Packard Company"); MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); MODULE_VERSION("3.6.26"); MODULE_LICENSE("GPL"); static int cciss_tape_cmds = 6; module_param(cciss_tape_cmds, int, 0644); MODULE_PARM_DESC(cciss_tape_cmds, "number of commands to allocate for tape devices (default: 6)"); static DEFINE_MUTEX(cciss_mutex); static struct proc_dir_entry *proc_cciss; #include "cciss_cmd.h" #include "cciss.h" #include <linux/cciss_ioctl.h> /* define the PCI info for the cards we can control */ static const struct pci_device_id cciss_pci_device_id[] = { {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C}, {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, {0,} }; MODULE_DEVICE_TABLE(pci, cciss_pci_device_id); /* board_id = Subsystem Device ID & Vendor ID * product = Marketing Name for the board * access = Address of the struct of function pointers */ static struct board_type products[] = { {0x40700E11, "Smart Array 5300", &SA5_access}, {0x40800E11, "Smart Array 5i", &SA5B_access}, {0x40820E11, "Smart Array 532", &SA5B_access}, {0x40830E11, "Smart Array 5312", &SA5B_access}, {0x409A0E11, "Smart Array 641", &SA5_access}, {0x409B0E11, "Smart Array 642", &SA5_access}, {0x409C0E11, "Smart Array 6400", &SA5_access}, {0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access}, {0x3223103C, "Smart Array P800", &SA5_access}, {0x3234103C, "Smart Array P400", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access}, {0x3213103C, "Smart Array E200i", &SA5_access}, {0x3214103C, "Smart Array E200i", &SA5_access}, {0x3215103C, "Smart Array E200i", &SA5_access}, {0x3237103C, "Smart Array E500", &SA5_access}, {0x3223103C, "Smart Array P800", &SA5_access}, {0x3234103C, "Smart Array P400", &SA5_access}, {0x323D103C, "Smart Array P700m", &SA5_access}, }; /* How long to wait (in milliseconds) for board to go into simple mode */ #define MAX_CONFIG_WAIT 30000 #define MAX_IOCTL_CONFIG_WAIT 1000 /*define how many times we will try a command because of bus resets */ #define MAX_CMD_RETRIES 3 #define MAX_CTLR 32 /* Originally cciss driver only supports 8 major numbers */ #define MAX_CTLR_ORIG 8 static ctlr_info_t *hba[MAX_CTLR]; static struct task_struct *cciss_scan_thread; static DEFINE_MUTEX(scan_mutex); static LIST_HEAD(scan_q); static void do_cciss_request(struct request_queue *q); static irqreturn_t do_cciss_intx(int irq, void *dev_id); static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); static int cciss_release(struct gendisk *disk, fmode_t mode); static int do_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int cciss_revalidate(struct gendisk *disk); static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); static int deregister_disk(ctlr_info_t *h, int drv_index, int clear_all, int via_ioctl); static void cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size); static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size); static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, sector_t total_size, unsigned int block_size, InquiryData_struct *inq_buff, drive_info_struct *drv); static void __devinit cciss_interrupt_mode(ctlr_info_t *); static void start_io(ctlr_info_t *h); static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char scsi3addr[], int cmd_type); static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, int attempt_retry); static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); static int add_to_scan_list(struct ctlr_info *h); static int scan_thread(void *data); static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); static void cciss_hba_release(struct device *dev); static void cciss_device_release(struct device *dev); static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); static inline u32 next_command(ctlr_info_t *h); static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset); static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar); static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag); static __devinit int write_driver_ver_to_cfgtable( CfgTable_struct __iomem *cfgtable); /* performant mode helper functions */ static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, int *bucket_map); static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); #ifdef CONFIG_PROC_FS static void cciss_procinit(ctlr_info_t *h); #else static void cciss_procinit(ctlr_info_t *h) { } #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_COMPAT static int cciss_compat_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); #endif static const struct block_device_operations cciss_fops = { .owner = THIS_MODULE, .open = cciss_unlocked_open, .release = cciss_release, .ioctl = do_ioctl, .getgeo = cciss_getgeo, #ifdef CONFIG_COMPAT .compat_ioctl = cciss_compat_ioctl, #endif .revalidate_disk = cciss_revalidate, }; /* set_performant_mode: Modify the tag for cciss performant * set bit 0 for pull model, bits 3-1 for block fetch * register number */ static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) { if (likely(h->transMethod & CFGTBL_Trans_Performant)) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); } /* * Enqueuing and dequeuing functions for cmdlists. */ static inline void addQ(struct list_head *list, CommandList_struct *c) { list_add_tail(&c->list, list); } static inline void removeQ(CommandList_struct *c) { /* * After kexec/dump some commands might still * be in flight, which the firmware will try * to complete. Resetting the firmware doesn't work * with old fw revisions, so we have to mark * them off as 'stale' to prevent the driver from * falling over. */ if (WARN_ON(list_empty(&c->list))) { c->cmd_type = CMD_MSG_STALE; return; } list_del_init(&c->list); } static void enqueue_cmd_and_start_io(ctlr_info_t *h, CommandList_struct *c) { unsigned long flags; set_performant_mode(h, c); spin_lock_irqsave(&h->lock, flags); addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; start_io(h); spin_unlock_irqrestore(&h->lock, flags); } static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, int nr_cmds) { int i; if (!cmd_sg_list) return; for (i = 0; i < nr_cmds; i++) { kfree(cmd_sg_list[i]); cmd_sg_list[i] = NULL; } kfree(cmd_sg_list); } static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( ctlr_info_t *h, int chainsize, int nr_cmds) { int j; SGDescriptor_struct **cmd_sg_list; if (chainsize <= 0) return NULL; cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); if (!cmd_sg_list) return NULL; /* Build up chain blocks for each command */ for (j = 0; j < nr_cmds; j++) { /* Need a block of chainsized s/g elements. */ cmd_sg_list[j] = kmalloc((chainsize * sizeof(*cmd_sg_list[j])), GFP_KERNEL); if (!cmd_sg_list[j]) { dev_err(&h->pdev->dev, "Cannot get memory " "for s/g chains.\n"); goto clean; } } return cmd_sg_list; clean: cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); return NULL; } static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) { SGDescriptor_struct *chain_sg; u64bit temp64; if (c->Header.SGTotal <= h->max_cmd_sgentries) return; chain_sg = &c->SG[h->max_cmd_sgentries - 1]; temp64.val32.lower = chain_sg->Addr.lower; temp64.val32.upper = chain_sg->Addr.upper; pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); } static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, SGDescriptor_struct *chain_block, int len) { SGDescriptor_struct *chain_sg; u64bit temp64; chain_sg = &c->SG[h->max_cmd_sgentries - 1]; chain_sg->Ext = CCISS_SG_CHAIN; chain_sg->Len = len; temp64.val = pci_map_single(h->pdev, chain_block, len, PCI_DMA_TODEVICE); chain_sg->Addr.lower = temp64.val32.lower; chain_sg->Addr.upper = temp64.val32.upper; } #include "cciss_scsi.c" /* For SCSI tape support */ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", "UNKNOWN" }; #define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1) #ifdef CONFIG_PROC_FS /* * Report information about this controller. */ #define ENG_GIG 1000000000 #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" static void cciss_seq_show_header(struct seq_file *seq) { ctlr_info_t *h = seq->private; seq_printf(seq, "%s: HP %s Controller\n" "Board ID: 0x%08lx\n" "Firmware Version: %c%c%c%c\n" "IRQ: %d\n" "Logical drives: %d\n" "Current Q depth: %d\n" "Current # commands on controller: %d\n" "Max Q depth since init: %d\n" "Max # commands on controller since init: %d\n" "Max SG entries since init: %d\n", h->devname, h->product_name, (unsigned long)h->board_id, h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT], h->num_luns, h->Qdepth, h->commands_outstanding, h->maxQsinceinit, h->max_outstanding, h->maxSG); #ifdef CONFIG_CISS_SCSI_TAPE cciss_seq_tape_report(seq, h); #endif /* CONFIG_CISS_SCSI_TAPE */ } static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) { ctlr_info_t *h = seq->private; unsigned long flags; /* prevent displaying bogus info during configuration * or deconfiguration of a logical volume */ spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return ERR_PTR(-EBUSY); } h->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); if (*pos == 0) cciss_seq_show_header(seq); return pos; } static int cciss_seq_show(struct seq_file *seq, void *v) { sector_t vol_sz, vol_sz_frac; ctlr_info_t *h = seq->private; unsigned ctlr = h->ctlr; loff_t *pos = v; drive_info_struct *drv = h->drv[*pos]; if (*pos > h->highest_lun) return 0; if (drv == NULL) /* it's possible for h->drv[] to have holes. */ return 0; if (drv->heads == 0) return 0; vol_sz = drv->nr_blocks; vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); vol_sz_frac *= 100; sector_div(vol_sz_frac, ENG_GIG_FACTOR); if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) drv->raid_level = RAID_UNKNOWN; seq_printf(seq, "cciss/c%dd%d:" "\t%4u.%02uGB\tRAID %s\n", ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, raid_label[drv->raid_level]); return 0; } static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ctlr_info_t *h = seq->private; if (*pos > h->highest_lun) return NULL; *pos += 1; return pos; } static void cciss_seq_stop(struct seq_file *seq, void *v) { ctlr_info_t *h = seq->private; /* Only reset h->busy_configuring if we succeeded in setting * it during cciss_seq_start. */ if (v == ERR_PTR(-EBUSY)) return; h->busy_configuring = 0; } static const struct seq_operations cciss_seq_ops = { .start = cciss_seq_start, .show = cciss_seq_show, .next = cciss_seq_next, .stop = cciss_seq_stop, }; static int cciss_seq_open(struct inode *inode, struct file *file) { int ret = seq_open(file, &cciss_seq_ops); struct seq_file *seq = file->private_data; if (!ret) seq->private = PDE(inode)->data; return ret; } static ssize_t cciss_proc_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int err; char *buffer; #ifndef CONFIG_CISS_SCSI_TAPE return -EINVAL; #endif if (!buf || length > PAGE_SIZE - 1) return -EINVAL; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) return -ENOMEM; err = -EFAULT; if (copy_from_user(buffer, buf, length)) goto out; buffer[length] = '\0'; #ifdef CONFIG_CISS_SCSI_TAPE if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { struct seq_file *seq = file->private_data; ctlr_info_t *h = seq->private; err = cciss_engage_scsi(h); if (err == 0) err = length; } else #endif /* CONFIG_CISS_SCSI_TAPE */ err = -EINVAL; /* might be nice to have "disengage" too, but it's not safely possible. (only 1 module use count, lock issues.) */ out: free_page((unsigned long)buffer); return err; } static const struct file_operations cciss_proc_fops = { .owner = THIS_MODULE, .open = cciss_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .write = cciss_proc_write, }; static void __devinit cciss_procinit(ctlr_info_t *h) { struct proc_dir_entry *pde; if (proc_cciss == NULL) proc_cciss = proc_mkdir("driver/cciss", NULL); if (!proc_cciss) return; pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, proc_cciss, &cciss_proc_fops, h); } #endif /* CONFIG_PROC_FS */ #define MAX_PRODUCT_NAME_LEN 19 #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) /* List of controllers which cannot be hard reset on kexec with reset_devices */ static u32 unresettable_controller[] = { 0x324a103C, /* Smart Array P712m */ 0x324b103C, /* SmartArray P711m */ 0x3223103C, /* Smart Array P800 */ 0x3234103C, /* Smart Array P400 */ 0x3235103C, /* Smart Array P400i */ 0x3211103C, /* Smart Array E200i */ 0x3212103C, /* Smart Array E200 */ 0x3213103C, /* Smart Array E200i */ 0x3214103C, /* Smart Array E200i */ 0x3215103C, /* Smart Array E200i */ 0x3237103C, /* Smart Array E500 */ 0x323D103C, /* Smart Array P700m */ 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; /* List of controllers which cannot even be soft reset */ static u32 soft_unresettable_controller[] = { 0x409C0E11, /* Smart Array 6400 */ 0x409D0E11, /* Smart Array 6400 EM */ }; static int ctlr_is_hard_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++) if (unresettable_controller[i] == board_id) return 0; return 1; } static int ctlr_is_soft_resettable(u32 board_id) { int i; for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++) if (soft_unresettable_controller[i] == board_id) return 0; return 1; } static int ctlr_is_resettable(u32 board_id) { return ctlr_is_hard_resettable(board_id) || ctlr_is_soft_resettable(board_id); } static ssize_t host_show_resettable(struct device *dev, struct device_attribute *attr, char *buf) { struct ctlr_info *h = to_hba(dev); return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); } static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL); static ssize_t host_store_rescan(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctlr_info *h = to_hba(dev); add_to_scan_list(h); wake_up_process(cciss_scan_thread); wait_for_completion_interruptible(&h->scan_wait); return count; } static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); static ssize_t dev_show_unique_id(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); __u8 sn[16]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(sn, drv->serial_no, sizeof(sn)); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, 16 * 2 + 2, "%02X%02X%02X%02X%02X%02X%02X%02X" "%02X%02X%02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5], sn[6], sn[7], sn[8], sn[9], sn[10], sn[11], sn[12], sn[13], sn[14], sn[15]); } static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); static ssize_t dev_show_vendor(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char vendor[VENDOR_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(vendor, drv->vendor, VENDOR_LEN + 1); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); } static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); static ssize_t dev_show_model(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char model[MODEL_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(model, drv->model, MODEL_LEN + 1); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); } static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); static ssize_t dev_show_rev(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); char rev[REV_LEN + 1]; unsigned long flags; int ret = 0; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) ret = -EBUSY; else memcpy(rev, drv->rev, REV_LEN + 1); spin_unlock_irqrestore(&h->lock, flags); if (ret) return ret; else return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); } static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); static ssize_t cciss_show_lunid(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); unsigned long flags; unsigned char lunid[8]; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } if (!drv->heads) { spin_unlock_irqrestore(&h->lock, flags); return -ENOTTY; } memcpy(lunid, drv->LunID, sizeof(lunid)); spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", lunid[0], lunid[1], lunid[2], lunid[3], lunid[4], lunid[5], lunid[6], lunid[7]); } static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); static ssize_t cciss_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); int raid; unsigned long flags; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } raid = drv->raid_level; spin_unlock_irqrestore(&h->lock, flags); if (raid < 0 || raid > RAID_UNKNOWN) raid = RAID_UNKNOWN; return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", raid_label[raid]); } static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); static ssize_t cciss_show_usage_count(struct device *dev, struct device_attribute *attr, char *buf) { drive_info_struct *drv = to_drv(dev); struct ctlr_info *h = to_hba(drv->dev.parent); unsigned long flags; int count; spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } count = drv->usage_count; spin_unlock_irqrestore(&h->lock, flags); return snprintf(buf, 20, "%d\n", count); } static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); static struct attribute *cciss_host_attrs[] = { &dev_attr_rescan.attr, &dev_attr_resettable.attr, NULL }; static struct attribute_group cciss_host_attr_group = { .attrs = cciss_host_attrs, }; static const struct attribute_group *cciss_host_attr_groups[] = { &cciss_host_attr_group, NULL }; static struct device_type cciss_host_type = { .name = "cciss_host", .groups = cciss_host_attr_groups, .release = cciss_hba_release, }; static struct attribute *cciss_dev_attrs[] = { &dev_attr_unique_id.attr, &dev_attr_model.attr, &dev_attr_vendor.attr, &dev_attr_rev.attr, &dev_attr_lunid.attr, &dev_attr_raid_level.attr, &dev_attr_usage_count.attr, NULL }; static struct attribute_group cciss_dev_attr_group = { .attrs = cciss_dev_attrs, }; static const struct attribute_group *cciss_dev_attr_groups[] = { &cciss_dev_attr_group, NULL }; static struct device_type cciss_dev_type = { .name = "cciss_device", .groups = cciss_dev_attr_groups, .release = cciss_device_release, }; static struct bus_type cciss_bus_type = { .name = "cciss", }; /* * cciss_hba_release is called when the reference count * of h->dev goes to zero. */ static void cciss_hba_release(struct device *dev) { /* * nothing to do, but need this to avoid a warning * about not having a release handler from lib/kref.c. */ } /* * Initialize sysfs entry for each controller. This sets up and registers * the 'cciss#' directory for each individual controller under * /sys/bus/pci/devices/<dev>/. */ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) { device_initialize(&h->dev); h->dev.type = &cciss_host_type; h->dev.bus = &cciss_bus_type; dev_set_name(&h->dev, "%s", h->devname); h->dev.parent = &h->pdev->dev; return device_add(&h->dev); } /* * Remove sysfs entries for an hba. */ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) { device_del(&h->dev); put_device(&h->dev); /* final put. */ } /* cciss_device_release is called when the reference count * of h->drv[x]dev goes to zero. */ static void cciss_device_release(struct device *dev) { drive_info_struct *drv = to_drv(dev); kfree(drv); } /* * Initialize sysfs for each logical drive. This sets up and registers * the 'c#d#' directory for each individual logical drive under * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from * /sys/block/cciss!c#d# to this entry. */ static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, int drv_index) { struct device *dev; if (h->drv[drv_index]->device_initialized) return 0; dev = &h->drv[drv_index]->dev; device_initialize(dev); dev->type = &cciss_dev_type; dev->bus = &cciss_bus_type; dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); dev->parent = &h->dev; h->drv[drv_index]->device_initialized = 1; return device_add(dev); } /* * Remove sysfs entries for a logical drive. */ static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, int ctlr_exiting) { struct device *dev = &h->drv[drv_index]->dev; /* special case for c*d0, we only destroy it on controller exit */ if (drv_index == 0 && !ctlr_exiting) return; device_del(dev); put_device(dev); /* the "final" put. */ h->drv[drv_index] = NULL; } /* * For operations that cannot sleep, a command block is allocated at init, * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track * which ones are free or in use. */ static CommandList_struct *cmd_alloc(ctlr_info_t *h) { CommandList_struct *c; int i; u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; do { i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); if (i == h->nr_cmds) return NULL; } while (test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); c = h->cmd_pool + i; memset(c, 0, sizeof(CommandList_struct)); cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); c->err_info = h->errinfo_pool + i; memset(c->err_info, 0, sizeof(ErrorInfo_struct)); err_dma_handle = h->errinfo_pool_dhandle + i * sizeof(ErrorInfo_struct); h->nr_allocs++; c->cmdindex = i; INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(ErrorInfo_struct); c->ctlr = h->ctlr; return c; } /* allocate a command using pci_alloc_consistent, used for ioctls, * etc., not for the main i/o path. */ static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) { CommandList_struct *c; u64bit temp64; dma_addr_t cmd_dma_handle, err_dma_handle; c = (CommandList_struct *) pci_alloc_consistent(h->pdev, sizeof(CommandList_struct), &cmd_dma_handle); if (c == NULL) return NULL; memset(c, 0, sizeof(CommandList_struct)); c->cmdindex = -1; c->err_info = (ErrorInfo_struct *) pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), &err_dma_handle); if (c->err_info == NULL) { pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, cmd_dma_handle); return NULL; } memset(c->err_info, 0, sizeof(ErrorInfo_struct)); INIT_LIST_HEAD(&c->list); c->busaddr = (__u32) cmd_dma_handle; temp64.val = (__u64) err_dma_handle; c->ErrDesc.Addr.lower = temp64.val32.lower; c->ErrDesc.Addr.upper = temp64.val32.upper; c->ErrDesc.Len = sizeof(ErrorInfo_struct); c->ctlr = h->ctlr; return c; } static void cmd_free(ctlr_info_t *h, CommandList_struct *c) { int i; i = c - h->cmd_pool; clear_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits + (i / BITS_PER_LONG)); h->nr_frees++; } static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) { u64bit temp64; temp64.val32.lower = c->ErrDesc.Addr.lower; temp64.val32.upper = c->ErrDesc.Addr.upper; pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), c->err_info, (dma_addr_t) temp64.val); pci_free_consistent(h->pdev, sizeof(CommandList_struct), c, (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr)); } static inline ctlr_info_t *get_host(struct gendisk *disk) { return disk->queue->queuedata; } static inline drive_info_struct *get_drv(struct gendisk *disk) { return disk->private_data; } /* * Open. Make sure the device is really there. */ static int cciss_open(struct block_device *bdev, fmode_t mode) { ctlr_info_t *h = get_host(bdev->bd_disk); drive_info_struct *drv = get_drv(bdev->bd_disk); dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); if (drv->busy_configuring) return -EBUSY; /* * Root is allowed to open raw volume zero even if it's not configured * so array config can still work. Root is also allowed to open any * volume that has a LUN ID, so it can issue IOCTL to reread the * disk information. I don't think I really like this * but I'm already using way to many device nodes to claim another one * for "raw controller". */ if (drv->heads == 0) { if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */ /* if not node 0 make sure it is a partition = 0 */ if (MINOR(bdev->bd_dev) & 0x0f) { return -ENXIO; /* if it is, make sure we have a LUN ID */ } else if (memcmp(drv->LunID, CTLR_LUNID, sizeof(drv->LunID))) { return -ENXIO; } } if (!capable(CAP_SYS_ADMIN)) return -EPERM; } drv->usage_count++; h->usage_count++; return 0; } static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) { int ret; mutex_lock(&cciss_mutex); ret = cciss_open(bdev, mode); mutex_unlock(&cciss_mutex); return ret; } /* * Close. Sync first. */ static int cciss_release(struct gendisk *disk, fmode_t mode) { ctlr_info_t *h; drive_info_struct *drv; mutex_lock(&cciss_mutex); h = get_host(disk); drv = get_drv(disk); dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); drv->usage_count--; h->usage_count--; mutex_unlock(&cciss_mutex); return 0; } static int do_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { int ret; mutex_lock(&cciss_mutex); ret = cciss_ioctl(bdev, mode, cmd, arg); mutex_unlock(&cciss_mutex); return ret; } #ifdef CONFIG_COMPAT static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg); static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg); static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { switch (cmd) { case CCISS_GETPCIINFO: case CCISS_GETINTINFO: case CCISS_SETINTINFO: case CCISS_GETNODENAME: case CCISS_SETNODENAME: case CCISS_GETHEARTBEAT: case CCISS_GETBUSTYPES: case CCISS_GETFIRMVER: case CCISS_GETDRIVVER: case CCISS_REVALIDVOLS: case CCISS_DEREGDISK: case CCISS_REGNEWDISK: case CCISS_REGNEWD: case CCISS_RESCANDISK: case CCISS_GETLUNINFO: return do_ioctl(bdev, mode, cmd, arg); case CCISS_PASSTHRU32: return cciss_ioctl32_passthru(bdev, mode, cmd, arg); case CCISS_BIG_PASSTHRU32: return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg); default: return -ENOIOCTLCMD; } } static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { IOCTL32_Command_struct __user *arg32 = (IOCTL32_Command_struct __user *) arg; IOCTL_Command_struct arg64; IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = do_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode, unsigned cmd, unsigned long arg) { BIG_IOCTL32_Command_struct __user *arg32 = (BIG_IOCTL32_Command_struct __user *) arg; BIG_IOCTL_Command_struct arg64; BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64)); int err; u32 cp; memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info)); err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request)); err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info)); err |= get_user(arg64.buf_size, &arg32->buf_size); err |= get_user(arg64.malloc_size, &arg32->malloc_size); err |= get_user(cp, &arg32->buf); arg64.buf = compat_ptr(cp); err |= copy_to_user(p, &arg64, sizeof(arg64)); if (err) return -EFAULT; err = do_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p); if (err) return err; err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info)); if (err) return -EFAULT; return err; } #endif static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo) { drive_info_struct *drv = get_drv(bdev->bd_disk); if (!drv->cylinders) return -ENXIO; geo->heads = drv->heads; geo->sectors = drv->sectors; geo->cylinders = drv->cylinders; return 0; } static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) { if (c->err_info->CommandStatus == CMD_TARGET_STATUS && c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) (void)check_for_unit_attention(h, c); } static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp) { cciss_pci_info_struct pciinfo; if (!argp) return -EINVAL; pciinfo.domain = pci_domain_nr(h->pdev->bus); pciinfo.bus = h->pdev->bus->number; pciinfo.dev_fn = h->pdev->devfn; pciinfo.board_id = h->board_id; if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct))) return -EFAULT; return 0; } static int cciss_getintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; if (!argp) return -EINVAL; intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay); intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount); if (copy_to_user (argp, &intinfo, sizeof(cciss_coalint_struct))) return -EFAULT; return 0; } static int cciss_setintinfo(ctlr_info_t *h, void __user *argp) { cciss_coalint_struct intinfo; unsigned long flags; int i; if (!argp) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(&intinfo, argp, sizeof(intinfo))) return -EFAULT; if ((intinfo.delay == 0) && (intinfo.count == 0)) return -EINVAL; spin_lock_irqsave(&h->lock, flags); /* Update the field, and then ring the doorbell */ writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay)); writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; udelay(1000); /* delay and try again */ } spin_unlock_irqrestore(&h->lock, flags); if (i >= MAX_IOCTL_CONFIG_WAIT) return -EAGAIN; return 0; } static int cciss_getnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; int i; if (!argp) return -EINVAL; for (i = 0; i < 16; i++) NodeName[i] = readb(&h->cfgtable->ServerName[i]); if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) return -EFAULT; return 0; } static int cciss_setnodename(ctlr_info_t *h, void __user *argp) { NodeName_type NodeName; unsigned long flags; int i; if (!argp) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (copy_from_user(NodeName, argp, sizeof(NodeName_type))) return -EFAULT; spin_lock_irqsave(&h->lock, flags); /* Update the field, and then ring the doorbell */ for (i = 0; i < 16; i++) writeb(NodeName[i], &h->cfgtable->ServerName[i]); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; udelay(1000); /* delay and try again */ } spin_unlock_irqrestore(&h->lock, flags); if (i >= MAX_IOCTL_CONFIG_WAIT) return -EAGAIN; return 0; } static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp) { Heartbeat_type heartbeat; if (!argp) return -EINVAL; heartbeat = readl(&h->cfgtable->HeartBeat); if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type))) return -EFAULT; return 0; } static int cciss_getbustypes(ctlr_info_t *h, void __user *argp) { BusTypes_type BusTypes; if (!argp) return -EINVAL; BusTypes = readl(&h->cfgtable->BusTypes); if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type))) return -EFAULT; return 0; } static int cciss_getfirmver(ctlr_info_t *h, void __user *argp) { FirmwareVer_type firmware; if (!argp) return -EINVAL; memcpy(firmware, h->firm_ver, 4); if (copy_to_user (argp, firmware, sizeof(FirmwareVer_type))) return -EFAULT; return 0; } static int cciss_getdrivver(ctlr_info_t *h, void __user *argp) { DriverVer_type DriverVer = DRIVER_VERSION; if (!argp) return -EINVAL; if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type))) return -EFAULT; return 0; } static int cciss_getluninfo(ctlr_info_t *h, struct gendisk *disk, void __user *argp) { LogvolInfo_struct luninfo; drive_info_struct *drv = get_drv(disk); if (!argp) return -EINVAL; memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID)); luninfo.num_opens = drv->usage_count; luninfo.num_parts = 0; if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct))) return -EFAULT; return 0; } static int cciss_passthru(ctlr_info_t *h, void __user *argp) { IOCTL_Command_struct iocommand; CommandList_struct *c; char *buff = NULL; u64bit temp64; DECLARE_COMPLETION_ONSTACK(wait); if (!argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; if (copy_from_user (&iocommand, argp, sizeof(IOCTL_Command_struct))) return -EFAULT; if ((iocommand.buf_size < 1) && (iocommand.Request.Type.Direction != XFER_NONE)) { return -EINVAL; } if (iocommand.buf_size > 0) { buff = kmalloc(iocommand.buf_size, GFP_KERNEL); if (buff == NULL) return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_WRITE) { /* Copy the data into the buffer we created */ if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { kfree(buff); return -EFAULT; } } else { memset(buff, 0, iocommand.buf_size); } c = cmd_special_alloc(h); if (!c) { kfree(buff); return -ENOMEM; } /* Fill in the command type */ c->cmd_type = CMD_IOCTL_PEND; /* Fill in Command Header */ c->Header.ReplyQueue = 0; /* unused in simple mode */ if (iocommand.buf_size > 0) { /* buffer to fill */ c->Header.SGList = 1; c->Header.SGTotal = 1; } else { /* no buffers to fill */ c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.LUN = iocommand.LUN_info; /* use the kernel address the cmd block for tag */ c->Header.Tag.lower = c->busaddr; /* Fill in Request block */ c->Request = iocommand.Request; /* Fill in the scatter gather information */ if (iocommand.buf_size > 0) { temp64.val = pci_map_single(h->pdev, buff, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = temp64.val32.lower; c->SG[0].Addr.upper = temp64.val32.upper; c->SG[0].Len = iocommand.buf_size; c->SG[0].Ext = 0; /* we are not chaining */ } c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); /* unlock the buffers from DMA */ temp64.val32.lower = c->SG[0].Addr.lower; temp64.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size, PCI_DMA_BIDIRECTIONAL); check_ioctl_unit_attention(h, c); /* Copy the error information out */ iocommand.error_info = *(c->err_info); if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) { kfree(buff); cmd_special_free(h, c); return -EFAULT; } if (iocommand.Request.Type.Direction == XFER_READ) { /* Copy the data out of the buffer we created */ if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { kfree(buff); cmd_special_free(h, c); return -EFAULT; } } kfree(buff); cmd_special_free(h, c); return 0; } static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp) { BIG_IOCTL_Command_struct *ioc; CommandList_struct *c; unsigned char **buff = NULL; int *buff_size = NULL; u64bit temp64; BYTE sg_used = 0; int status = 0; int i; DECLARE_COMPLETION_ONSTACK(wait); __u32 left; __u32 sz; BYTE __user *data_ptr; if (!argp) return -EINVAL; if (!capable(CAP_SYS_RAWIO)) return -EPERM; ioc = kmalloc(sizeof(*ioc), GFP_KERNEL); if (!ioc) { status = -ENOMEM; goto cleanup1; } if (copy_from_user(ioc, argp, sizeof(*ioc))) { status = -EFAULT; goto cleanup1; } if ((ioc->buf_size < 1) && (ioc->Request.Type.Direction != XFER_NONE)) { status = -EINVAL; goto cleanup1; } /* Check kmalloc limits using all SGs */ if (ioc->malloc_size > MAX_KMALLOC_SIZE) { status = -EINVAL; goto cleanup1; } if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) { status = -EINVAL; goto cleanup1; } buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL); if (!buff) { status = -ENOMEM; goto cleanup1; } buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL); if (!buff_size) { status = -ENOMEM; goto cleanup1; } left = ioc->buf_size; data_ptr = ioc->buf; while (left) { sz = (left > ioc->malloc_size) ? ioc->malloc_size : left; buff_size[sg_used] = sz; buff[sg_used] = kmalloc(sz, GFP_KERNEL); if (buff[sg_used] == NULL) { status = -ENOMEM; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_WRITE) { if (copy_from_user(buff[sg_used], data_ptr, sz)) { status = -EFAULT; goto cleanup1; } } else { memset(buff[sg_used], 0, sz); } left -= sz; data_ptr += sz; sg_used++; } c = cmd_special_alloc(h); if (!c) { status = -ENOMEM; goto cleanup1; } c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; c->Header.SGList = sg_used; c->Header.SGTotal = sg_used; c->Header.LUN = ioc->LUN_info; c->Header.Tag.lower = c->busaddr; c->Request = ioc->Request; for (i = 0; i < sg_used; i++) { temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i], PCI_DMA_BIDIRECTIONAL); c->SG[i].Addr.lower = temp64.val32.lower; c->SG[i].Addr.upper = temp64.val32.upper; c->SG[i].Len = buff_size[i]; c->SG[i].Ext = 0; /* we are not chaining */ } c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); /* unlock the buffers from DMA */ for (i = 0; i < sg_used; i++) { temp64.val32.lower = c->SG[i].Addr.lower; temp64.val32.upper = c->SG[i].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, buff_size[i], PCI_DMA_BIDIRECTIONAL); } check_ioctl_unit_attention(h, c); /* Copy the error information out */ ioc->error_info = *(c->err_info); if (copy_to_user(argp, ioc, sizeof(*ioc))) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } if (ioc->Request.Type.Direction == XFER_READ) { /* Copy the data out of the buffer we created */ BYTE __user *ptr = ioc->buf; for (i = 0; i < sg_used; i++) { if (copy_to_user(ptr, buff[i], buff_size[i])) { cmd_special_free(h, c); status = -EFAULT; goto cleanup1; } ptr += buff_size[i]; } } cmd_special_free(h, c); status = 0; cleanup1: if (buff) { for (i = 0; i < sg_used; i++) kfree(buff[i]); kfree(buff); } kfree(buff_size); kfree(ioc); return status; } static int cciss_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; ctlr_info_t *h = get_host(disk); void __user *argp = (void __user *)arg; dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); switch (cmd) { case CCISS_GETPCIINFO: return cciss_getpciinfo(h, argp); case CCISS_GETINTINFO: return cciss_getintinfo(h, argp); case CCISS_SETINTINFO: return cciss_setintinfo(h, argp); case CCISS_GETNODENAME: return cciss_getnodename(h, argp); case CCISS_SETNODENAME: return cciss_setnodename(h, argp); case CCISS_GETHEARTBEAT: return cciss_getheartbeat(h, argp); case CCISS_GETBUSTYPES: return cciss_getbustypes(h, argp); case CCISS_GETFIRMVER: return cciss_getfirmver(h, argp); case CCISS_GETDRIVVER: return cciss_getdrivver(h, argp); case CCISS_DEREGDISK: case CCISS_REGNEWD: case CCISS_REVALIDVOLS: return rebuild_lun_table(h, 0, 1); case CCISS_GETLUNINFO: return cciss_getluninfo(h, disk, argp); case CCISS_PASSTHRU: return cciss_passthru(h, argp); case CCISS_BIG_PASSTHRU: return cciss_bigpassthru(h, argp); /* scsi_cmd_ioctl handles these, below, though some are not */ /* very meaningful for cciss. SG_IO is the main one people want. */ case SG_GET_VERSION_NUM: case SG_SET_TIMEOUT: case SG_GET_TIMEOUT: case SG_GET_RESERVED_SIZE: case SG_SET_RESERVED_SIZE: case SG_EMULATED_HOST: case SG_IO: case SCSI_IOCTL_SEND_COMMAND: return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); /* scsi_cmd_ioctl would normally handle these, below, but */ /* they aren't a good fit for cciss, as CD-ROMs are */ /* not supported, and we don't have any bus/target/lun */ /* which we present to the kernel. */ case CDROM_SEND_PACKET: case CDROMCLOSETRAY: case CDROMEJECT: case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: default: return -ENOTTY; } } static void cciss_check_queues(ctlr_info_t *h) { int start_queue = h->next_to_run; int i; /* check to see if we have maxed out the number of commands that can * be placed on the queue. If so then exit. We do this check here * in case the interrupt we serviced was from an ioctl and did not * free any new commands. */ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) return; /* We have room on the queue for more commands. Now we need to queue * them up. We will also keep track of the next queue to run so * that every queue gets a chance to be started first. */ for (i = 0; i < h->highest_lun + 1; i++) { int curr_queue = (start_queue + i) % (h->highest_lun + 1); /* make sure the disk has been added and the drive is real * because this can be called from the middle of init_one. */ if (!h->drv[curr_queue]) continue; if (!(h->drv[curr_queue]->queue) || !(h->drv[curr_queue]->heads)) continue; blk_start_queue(h->gendisk[curr_queue]->queue); /* check to see if we have maxed out the number of commands * that can be placed on the queue. */ if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) { if (curr_queue == start_queue) { h->next_to_run = (start_queue + 1) % (h->highest_lun + 1); break; } else { h->next_to_run = curr_queue; break; } } } } static void cciss_softirq_done(struct request *rq) { CommandList_struct *c = rq->completion_data; ctlr_info_t *h = hba[c->ctlr]; SGDescriptor_struct *curr_sg = c->SG; u64bit temp64; unsigned long flags; int i, ddir; int sg_index = 0; if (c->Request.Type.Direction == XFER_READ) ddir = PCI_DMA_FROMDEVICE; else ddir = PCI_DMA_TODEVICE; /* command did not need to be retried */ /* unmap the DMA mapping for all the scatter gather elements */ for (i = 0; i < c->Header.SGList; i++) { if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { cciss_unmap_sg_chain_block(h, c); /* Point to the next block */ curr_sg = h->cmd_sg_list[c->cmdindex]; sg_index = 0; } temp64.val32.lower = curr_sg[sg_index].Addr.lower; temp64.val32.upper = curr_sg[sg_index].Addr.upper; pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len, ddir); ++sg_index; } dev_dbg(&h->pdev->dev, "Done with %p\n", rq); /* set the residual count for pc requests */ if (rq->cmd_type == REQ_TYPE_BLOCK_PC) rq->resid_len = c->err_info->ResidualCnt; blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); spin_lock_irqsave(&h->lock, flags); cmd_free(h, c); cciss_check_queues(h); spin_unlock_irqrestore(&h->lock, flags); } static inline void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], uint32_t log_unit) { memcpy(scsi3addr, h->drv[log_unit]->LunID, sizeof(h->drv[log_unit]->LunID)); } /* This function gets the SCSI vendor, model, and revision of a logical drive * via the inquiry page 0. Model, vendor, and rev are set to empty strings if * they cannot be read. */ static void cciss_get_device_descr(ctlr_info_t *h, int logvol, char *vendor, char *model, char *rev) { int rc; InquiryData_struct *inq_buf; unsigned char scsi3addr[8]; *vendor = '\0'; *model = '\0'; *rev = '\0'; inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (!inq_buf) return; log_unit_to_scsi3addr(h, scsi3addr, logvol); rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, scsi3addr, TYPE_CMD); if (rc == IO_OK) { memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); vendor[VENDOR_LEN] = '\0'; memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); model[MODEL_LEN] = '\0'; memcpy(rev, &inq_buf->data_byte[32], REV_LEN); rev[REV_LEN] = '\0'; } kfree(inq_buf); return; } /* This function gets the serial number of a logical drive via * inquiry page 0x83. Serial no. is 16 bytes. If the serial * number cannot be had, for whatever reason, 16 bytes of 0xff * are returned instead. */ static void cciss_get_serial_no(ctlr_info_t *h, int logvol, unsigned char *serial_no, int buflen) { #define PAGE_83_INQ_BYTES 64 int rc; unsigned char *buf; unsigned char scsi3addr[8]; if (buflen > 16) buflen = 16; memset(serial_no, 0xff, buflen); buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL); if (!buf) return; memset(serial_no, 0, buflen); log_unit_to_scsi3addr(h, scsi3addr, logvol); rc = sendcmd_withirq(h, CISS_INQUIRY, buf, PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); if (rc == IO_OK) memcpy(serial_no, &buf[8], buflen); kfree(buf); return; } /* * cciss_add_disk sets up the block device queue for a logical drive */ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, int drv_index) { disk->queue = blk_init_queue(do_cciss_request, &h->lock); if (!disk->queue) goto init_queue_failure; sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); disk->major = h->major; disk->first_minor = drv_index << NWD_SHIFT; disk->fops = &cciss_fops; if (cciss_create_ld_sysfs_entry(h, drv_index)) goto cleanup_queue; disk->private_data = h->drv[drv_index]; disk->driverfs_dev = &h->drv[drv_index]->dev; /* Set up queue information */ blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); /* This is a hardware imposed limit. */ blk_queue_max_segments(disk->queue, h->maxsgentries); blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); blk_queue_softirq_done(disk->queue, cciss_softirq_done); disk->queue->queuedata = h; blk_queue_logical_block_size(disk->queue, h->drv[drv_index]->block_size); /* Make sure all queue data is written out before */ /* setting h->drv[drv_index]->queue, as setting this */ /* allows the interrupt handler to start the queue */ wmb(); h->drv[drv_index]->queue = disk->queue; add_disk(disk); return 0; cleanup_queue: blk_cleanup_queue(disk->queue); disk->queue = NULL; init_queue_failure: return -1; } /* This function will check the usage_count of the drive to be updated/added. * If the usage_count is zero and it is a heretofore unknown drive, or, * the drive's capacity, geometry, or serial number has changed, * then the drive information will be updated and the disk will be * re-registered with the kernel. If these conditions don't hold, * then it will be left alone for the next reboot. The exception to this * is disk 0 which will always be left registered with the kernel since it * is also the controller node. Any changes to disk 0 will show up on * the next reboot. */ static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, int first_time, int via_ioctl) { struct gendisk *disk; InquiryData_struct *inq_buff = NULL; unsigned int block_size; sector_t total_size; unsigned long flags = 0; int ret = 0; drive_info_struct *drvinfo; /* Get information about the disk and modify the driver structure */ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); if (inq_buff == NULL || drvinfo == NULL) goto mem_msg; /* testing to see if 16-byte CDBs are already being used */ if (h->cciss_read == CCISS_READ_16) { cciss_read_capacity_16(h, drv_index, &total_size, &block_size); } else { cciss_read_capacity(h, drv_index, &total_size, &block_size); /* if read_capacity returns all F's this volume is >2TB */ /* in size so we switch to 16-byte CDB's for all */ /* read/write ops */ if (total_size == 0xFFFFFFFFULL) { cciss_read_capacity_16(h, drv_index, &total_size, &block_size); h->cciss_read = CCISS_READ_16; h->cciss_write = CCISS_WRITE_16; } else { h->cciss_read = CCISS_READ_10; h->cciss_write = CCISS_WRITE_10; } } cciss_geometry_inquiry(h, drv_index, total_size, block_size, inq_buff, drvinfo); drvinfo->block_size = block_size; drvinfo->nr_blocks = total_size + 1; cciss_get_device_descr(h, drv_index, drvinfo->vendor, drvinfo->model, drvinfo->rev); cciss_get_serial_no(h, drv_index, drvinfo->serial_no, sizeof(drvinfo->serial_no)); /* Save the lunid in case we deregister the disk, below. */ memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, sizeof(drvinfo->LunID)); /* Is it the same disk we already know, and nothing's changed? */ if (h->drv[drv_index]->raid_level != -1 && ((memcmp(drvinfo->serial_no, h->drv[drv_index]->serial_no, 16) == 0) && drvinfo->block_size == h->drv[drv_index]->block_size && drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && drvinfo->heads == h->drv[drv_index]->heads && drvinfo->sectors == h->drv[drv_index]->sectors && drvinfo->cylinders == h->drv[drv_index]->cylinders)) /* The disk is unchanged, nothing to update */ goto freeret; /* If we get here it's not the same disk, or something's changed, * so we need to * deregister it, and re-register it, if it's not * in use. * If the disk already exists then deregister it before proceeding * (unless it's the first disk (for the controller node). */ if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); spin_lock_irqsave(&h->lock, flags); h->drv[drv_index]->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); /* deregister_disk sets h->drv[drv_index]->queue = NULL * which keeps the interrupt handler from starting * the queue. */ ret = deregister_disk(h, drv_index, 0, via_ioctl); } /* If the disk is in use return */ if (ret) goto freeret; /* Save the new information from cciss_geometry_inquiry * and serial number inquiry. If the disk was deregistered * above, then h->drv[drv_index] will be NULL. */ if (h->drv[drv_index] == NULL) { drvinfo->device_initialized = 0; h->drv[drv_index] = drvinfo; drvinfo = NULL; /* so it won't be freed below. */ } else { /* special case for cxd0 */ h->drv[drv_index]->block_size = drvinfo->block_size; h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; h->drv[drv_index]->heads = drvinfo->heads; h->drv[drv_index]->sectors = drvinfo->sectors; h->drv[drv_index]->cylinders = drvinfo->cylinders; h->drv[drv_index]->raid_level = drvinfo->raid_level; memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, VENDOR_LEN + 1); memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); } ++h->num_luns; disk = h->gendisk[drv_index]; set_capacity(disk, h->drv[drv_index]->nr_blocks); /* If it's not disk 0 (drv_index != 0) * or if it was disk 0, but there was previously * no actual corresponding configured logical drive * (raid_leve == -1) then we want to update the * logical drive's information. */ if (drv_index || first_time) { if (cciss_add_disk(h, disk, drv_index) != 0) { cciss_free_gendisk(h, drv_index); cciss_free_drive_info(h, drv_index); dev_warn(&h->pdev->dev, "could not update disk %d\n", drv_index); --h->num_luns; } } freeret: kfree(inq_buff); kfree(drvinfo); return; mem_msg: dev_err(&h->pdev->dev, "out of memory\n"); goto freeret; } /* This function will find the first index of the controllers drive array * that has a null drv pointer and allocate the drive info struct and * will return that index This is where new drives will be added. * If the index to be returned is greater than the highest_lun index for * the controller then highest_lun is set * to this new index. * If there are no available indexes or if tha allocation fails, then -1 * is returned. * "controller_node" is used to know if this is a real * logical drive, or just the controller node, which determines if this * counts towards highest_lun. */ static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) { int i; drive_info_struct *drv; /* Search for an empty slot for our drive info */ for (i = 0; i < CISS_MAX_LUN; i++) { /* if not cxd0 case, and it's occupied, skip it. */ if (h->drv[i] && i != 0) continue; /* * If it's cxd0 case, and drv is alloc'ed already, and a * disk is configured there, skip it. */ if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) continue; /* * We've found an empty slot. Update highest_lun * provided this isn't just the fake cxd0 controller node. */ if (i > h->highest_lun && !controller_node) h->highest_lun = i; /* If adding a real disk at cxd0, and it's already alloc'ed */ if (i == 0 && h->drv[i] != NULL) return i; /* * Found an empty slot, not already alloc'ed. Allocate it. * Mark it with raid_level == -1, so we know it's new later on. */ drv = kzalloc(sizeof(*drv), GFP_KERNEL); if (!drv) return -1; drv->raid_level = -1; /* so we know it's new */ h->drv[i] = drv; return i; } return -1; } static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) { kfree(h->drv[drv_index]); h->drv[drv_index] = NULL; } static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) { put_disk(h->gendisk[drv_index]); h->gendisk[drv_index] = NULL; } /* cciss_add_gendisk finds a free hba[]->drv structure * and allocates a gendisk if needed, and sets the lunid * in the drvinfo structure. It returns the index into * the ->drv[] array, or -1 if none are free. * is_controller_node indicates whether highest_lun should * count this disk, or if it's only being added to provide * a means to talk to the controller in case no logical * drives have yet been configured. */ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], int controller_node) { int drv_index; drv_index = cciss_alloc_drive_info(h, controller_node); if (drv_index == -1) return -1; /*Check if the gendisk needs to be allocated */ if (!h->gendisk[drv_index]) { h->gendisk[drv_index] = alloc_disk(1 << NWD_SHIFT); if (!h->gendisk[drv_index]) { dev_err(&h->pdev->dev, "could not allocate a new disk %d\n", drv_index); goto err_free_drive_info; } } memcpy(h->drv[drv_index]->LunID, lunid, sizeof(h->drv[drv_index]->LunID)); if (cciss_create_ld_sysfs_entry(h, drv_index)) goto err_free_disk; /* Don't need to mark this busy because nobody */ /* else knows about this disk yet to contend */ /* for access to it. */ h->drv[drv_index]->busy_configuring = 0; wmb(); return drv_index; err_free_disk: cciss_free_gendisk(h, drv_index); err_free_drive_info: cciss_free_drive_info(h, drv_index); return -1; } /* This is for the special case of a controller which * has no logical drives. In this case, we still need * to register a disk so the controller can be accessed * by the Array Config Utility. */ static void cciss_add_controller_node(ctlr_info_t *h) { struct gendisk *disk; int drv_index; if (h->gendisk[0] != NULL) /* already did this? Then bail. */ return; drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); if (drv_index == -1) goto error; h->drv[drv_index]->block_size = 512; h->drv[drv_index]->nr_blocks = 0; h->drv[drv_index]->heads = 0; h->drv[drv_index]->sectors = 0; h->drv[drv_index]->cylinders = 0; h->drv[drv_index]->raid_level = -1; memset(h->drv[drv_index]->serial_no, 0, 16); disk = h->gendisk[drv_index]; if (cciss_add_disk(h, disk, drv_index) == 0) return; cciss_free_gendisk(h, drv_index); cciss_free_drive_info(h, drv_index); error: dev_warn(&h->pdev->dev, "could not add disk 0.\n"); return; } /* This function will add and remove logical drives from the Logical * drive array of the controller and maintain persistency of ordering * so that mount points are preserved until the next reboot. This allows * for the removal of logical drives in the middle of the drive array * without a re-ordering of those drives. * INPUT * h = The controller to perform the operations on */ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl) { int num_luns; ReportLunData_struct *ld_buff = NULL; int return_code; int listlength = 0; int i; int drv_found; int drv_index = 0; unsigned char lunid[8] = CTLR_LUNID; unsigned long flags; if (!capable(CAP_SYS_RAWIO)) return -EPERM; /* Set busy_configuring flag for this operation */ spin_lock_irqsave(&h->lock, flags); if (h->busy_configuring) { spin_unlock_irqrestore(&h->lock, flags); return -EBUSY; } h->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); if (ld_buff == NULL) goto mem_msg; return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, sizeof(ReportLunData_struct), 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); else { /* reading number of logical volumes failed */ dev_warn(&h->pdev->dev, "report logical volume command failed\n"); listlength = 0; goto freeret; } num_luns = listlength / 8; /* 8 bytes per entry */ if (num_luns > CISS_MAX_LUN) { num_luns = CISS_MAX_LUN; dev_warn(&h->pdev->dev, "more luns configured" " on controller than can be handled by" " this driver.\n"); } if (num_luns == 0) cciss_add_controller_node(h); /* Compare controller drive array to driver's drive array * to see if any drives are missing on the controller due * to action of Array Config Utility (user deletes drive) * and deregister logical drives which have disappeared. */ for (i = 0; i <= h->highest_lun; i++) { int j; drv_found = 0; /* skip holes in the array from already deleted drives */ if (h->drv[i] == NULL) continue; for (j = 0; j < num_luns; j++) { memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); if (memcmp(h->drv[i]->LunID, lunid, sizeof(lunid)) == 0) { drv_found = 1; break; } } if (!drv_found) { /* Deregister it from the OS, it's gone. */ spin_lock_irqsave(&h->lock, flags); h->drv[i]->busy_configuring = 1; spin_unlock_irqrestore(&h->lock, flags); return_code = deregister_disk(h, i, 1, via_ioctl); if (h->drv[i] != NULL) h->drv[i]->busy_configuring = 0; } } /* Compare controller drive array to driver's drive array. * Check for updates in the drive information and any new drives * on the controller due to ACU adding logical drives, or changing * a logical drive's size, etc. Reregister any new/changed drives */ for (i = 0; i < num_luns; i++) { int j; drv_found = 0; memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); /* Find if the LUN is already in the drive array * of the driver. If so then update its info * if not in use. If it does not exist then find * the first free index and add it. */ for (j = 0; j <= h->highest_lun; j++) { if (h->drv[j] != NULL && memcmp(h->drv[j]->LunID, lunid, sizeof(h->drv[j]->LunID)) == 0) { drv_index = j; drv_found = 1; break; } } /* check if the drive was found already in the array */ if (!drv_found) { drv_index = cciss_add_gendisk(h, lunid, 0); if (drv_index == -1) goto freeret; } cciss_update_drive_info(h, drv_index, first_time, via_ioctl); } /* end for */ freeret: kfree(ld_buff); h->busy_configuring = 0; /* We return -1 here to tell the ACU that we have registered/updated * all of the drives that we can and to keep it from calling us * additional times. */ return -1; mem_msg: dev_err(&h->pdev->dev, "out of memory\n"); h->busy_configuring = 0; goto freeret; } static void cciss_clear_drive_info(drive_info_struct *drive_info) { /* zero out the disk size info */ drive_info->nr_blocks = 0; drive_info->block_size = 0; drive_info->heads = 0; drive_info->sectors = 0; drive_info->cylinders = 0; drive_info->raid_level = -1; memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); memset(drive_info->model, 0, sizeof(drive_info->model)); memset(drive_info->rev, 0, sizeof(drive_info->rev)); memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); /* * don't clear the LUNID though, we need to remember which * one this one is. */ } /* This function will deregister the disk and it's queue from the * kernel. It must be called with the controller lock held and the * drv structures busy_configuring flag set. It's parameters are: * * disk = This is the disk to be deregistered * drv = This is the drive_info_struct associated with the disk to be * deregistered. It contains information about the disk used * by the driver. * clear_all = This flag determines whether or not the disk information * is going to be completely cleared out and the highest_lun * reset. Sometimes we want to clear out information about * the disk in preparation for re-adding it. In this case * the highest_lun should be left unchanged and the LunID * should not be cleared. * via_ioctl * This indicates whether we've reached this path via ioctl. * This affects the maximum usage count allowed for c0d0 to be messed with. * If this path is reached via ioctl(), then the max_usage_count will * be 1, as the process calling ioctl() has got to have the device open. * If we get here via sysfs, then the max usage count will be zero. */ static int deregister_disk(ctlr_info_t *h, int drv_index, int clear_all, int via_ioctl) { int i; struct gendisk *disk; drive_info_struct *drv; int recalculate_highest_lun; if (!capable(CAP_SYS_RAWIO)) return -EPERM; drv = h->drv[drv_index]; disk = h->gendisk[drv_index]; /* make sure logical volume is NOT is use */ if (clear_all || (h->gendisk[0] == disk)) { if (drv->usage_count > via_ioctl) return -EBUSY; } else if (drv->usage_count > 0) return -EBUSY; recalculate_highest_lun = (drv == h->drv[h->highest_lun]); /* invalidate the devices and deregister the disk. If it is disk * zero do not deregister it but just zero out it's values. This * allows us to delete disk zero but keep the controller registered. */ if (h->gendisk[0] != disk) { struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) { cciss_destroy_ld_sysfs_entry(h, drv_index, 0); del_gendisk(disk); } if (q) blk_cleanup_queue(q); /* If clear_all is set then we are deleting the logical * drive, not just refreshing its info. For drives * other than disk 0 we will call put_disk. We do not * do this for disk 0 as we need it to be able to * configure the controller. */ if (clear_all){ /* This isn't pretty, but we need to find the * disk in our array and NULL our the pointer. * This is so that we will call alloc_disk if * this index is used again later. */ for (i=0; i < CISS_MAX_LUN; i++){ if (h->gendisk[i] == disk) { h->gendisk[i] = NULL; break; } } put_disk(disk); } } else { set_capacity(disk, 0); cciss_clear_drive_info(drv); } --h->num_luns; /* if it was the last disk, find the new hightest lun */ if (clear_all && recalculate_highest_lun) { int newhighest = -1; for (i = 0; i <= h->highest_lun; i++) { /* if the disk has size > 0, it is available */ if (h->drv[i] && h->drv[i]->heads) newhighest = i; } h->highest_lun = newhighest; } return 0; } static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, int cmd_type) { u64bit buff_dma_handle; int status = IO_OK; c->cmd_type = CMD_IOCTL_PEND; c->Header.ReplyQueue = 0; if (buff != NULL) { c->Header.SGList = 1; c->Header.SGTotal = 1; } else { c->Header.SGList = 0; c->Header.SGTotal = 0; } c->Header.Tag.lower = c->busaddr; memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); c->Request.Type.Type = cmd_type; if (cmd_type == TYPE_CMD) { switch (cmd) { case CISS_INQUIRY: /* are we trying to read a vital product page */ if (page_code != 0) { c->Request.CDB[1] = 0x01; c->Request.CDB[2] = page_code; } c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = CISS_INQUIRY; c->Request.CDB[4] = size & 0xFF; break; case CISS_REPORT_LOG: case CISS_REPORT_PHYS: /* Talking to controller so It's a physical command mode = 00 target = 0. Nothing to write. */ c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ c->Request.CDB[7] = (size >> 16) & 0xFF; c->Request.CDB[8] = (size >> 8) & 0xFF; c->Request.CDB[9] = size & 0xFF; break; case CCISS_READ_CAPACITY: c->Request.CDBLen = 10; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; case CCISS_READ_CAPACITY_16: c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_READ; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; c->Request.CDB[1] = 0x10; c->Request.CDB[10] = (size >> 24) & 0xFF; c->Request.CDB[11] = (size >> 16) & 0xFF; c->Request.CDB[12] = (size >> 8) & 0xFF; c->Request.CDB[13] = size & 0xFF; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; case CCISS_CACHE_FLUSH: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = BMIC_WRITE; c->Request.CDB[6] = BMIC_CACHE_FLUSH; break; case TEST_UNIT_READY: c->Request.CDBLen = 6; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; break; default: dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); return IO_ERROR; } } else if (cmd_type == TYPE_MSG) { switch (cmd) { case CCISS_ABORT_MSG: c->Request.CDBLen = 12; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; /* abort */ c->Request.CDB[1] = 0; /* abort a command */ /* buff contains the tag of the command to abort */ memcpy(&c->Request.CDB[4], buff, 8); break; case CCISS_RESET_MSG: c->Request.CDBLen = 16; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_NONE; c->Request.Timeout = 0; memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); c->Request.CDB[0] = cmd; /* reset */ c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET; break; case CCISS_NOOP_MSG: c->Request.CDBLen = 1; c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = XFER_WRITE; c->Request.Timeout = 0; c->Request.CDB[0] = cmd; break; default: dev_warn(&h->pdev->dev, "unknown message type %d\n", cmd); return IO_ERROR; } } else { dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); return IO_ERROR; } /* Fill in the scatter gather information */ if (size > 0) { buff_dma_handle.val = (__u64) pci_map_single(h->pdev, buff, size, PCI_DMA_BIDIRECTIONAL); c->SG[0].Addr.lower = buff_dma_handle.val32.lower; c->SG[0].Addr.upper = buff_dma_handle.val32.upper; c->SG[0].Len = size; c->SG[0].Ext = 0; /* we are not chaining */ } return status; } static int __devinit cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr, u8 reset_type) { CommandList_struct *c; int return_status; c = cmd_alloc(h); if (!c) return -ENOMEM; return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0, CTLR_LUNID, TYPE_MSG); c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */ if (return_status != IO_OK) { cmd_special_free(h, c); return return_status; } c->waiting = NULL; enqueue_cmd_and_start_io(h, c); /* Don't wait for completion, the reset won't complete. Don't free * the command either. This is the last command we will send before * re-initializing everything, so it doesn't matter and won't leak. */ return 0; } static int check_target_status(ctlr_info_t *h, CommandList_struct *c) { switch (c->err_info->ScsiStatus) { case SAM_STAT_GOOD: return IO_OK; case SAM_STAT_CHECK_CONDITION: switch (0xf & c->err_info->SenseInfo[2]) { case 0: return IO_OK; /* no sense */ case 1: return IO_OK; /* recovered error */ default: if (check_for_unit_attention(h, c)) return IO_NEEDS_RETRY; dev_warn(&h->pdev->dev, "cmd 0x%02x " "check condition, sense key = 0x%02x\n", c->Request.CDB[0], c->err_info->SenseInfo[2]); } break; default: dev_warn(&h->pdev->dev, "cmd 0x%02x" "scsi status = 0x%02x\n", c->Request.CDB[0], c->err_info->ScsiStatus); break; } return IO_ERROR; } static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) { int return_status = IO_OK; if (c->err_info->CommandStatus == CMD_SUCCESS) return IO_OK; switch (c->err_info->CommandStatus) { case CMD_TARGET_STATUS: return_status = check_target_status(h, c); break; case CMD_DATA_UNDERRUN: case CMD_DATA_OVERRUN: /* expected for inquiry and report lun commands */ break; case CMD_INVALID: dev_warn(&h->pdev->dev, "cmd 0x%02x is " "reported invalid\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_PROTOCOL_ERR: dev_warn(&h->pdev->dev, "cmd 0x%02x has " "protocol error\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_HARDWARE_ERR: dev_warn(&h->pdev->dev, "cmd 0x%02x had " " hardware error\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_CONNECTION_LOST: dev_warn(&h->pdev->dev, "cmd 0x%02x had " "connection lost\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_ABORTED: dev_warn(&h->pdev->dev, "cmd 0x%02x was " "aborted\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_ABORT_FAILED: dev_warn(&h->pdev->dev, "cmd 0x%02x reports " "abort failed\n", c->Request.CDB[0]); return_status = IO_ERROR; break; case CMD_UNSOLICITED_ABORT: dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", c->Request.CDB[0]); return_status = IO_NEEDS_RETRY; break; case CMD_UNABORTABLE: dev_warn(&h->pdev->dev, "cmd unabortable\n"); return_status = IO_ERROR; break; default: dev_warn(&h->pdev->dev, "cmd 0x%02x returned " "unknown status %x\n", c->Request.CDB[0], c->err_info->CommandStatus); return_status = IO_ERROR; } return return_status; } static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, int attempt_retry) { DECLARE_COMPLETION_ONSTACK(wait); u64bit buff_dma_handle; int return_status = IO_OK; resend_cmd2: c->waiting = &wait; enqueue_cmd_and_start_io(h, c); wait_for_completion(&wait); if (c->err_info->CommandStatus == 0 || !attempt_retry) goto command_done; return_status = process_sendcmd_error(h, c); if (return_status == IO_NEEDS_RETRY && c->retry_count < MAX_CMD_RETRIES) { dev_warn(&h->pdev->dev, "retrying 0x%02x\n", c->Request.CDB[0]); c->retry_count++; /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); return_status = IO_OK; INIT_COMPLETION(wait); goto resend_cmd2; } command_done: /* unlock the buffers from DMA */ buff_dma_handle.val32.lower = c->SG[0].Addr.lower; buff_dma_handle.val32.upper = c->SG[0].Addr.upper; pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); return return_status; } static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, __u8 page_code, unsigned char scsi3addr[], int cmd_type) { CommandList_struct *c; int return_status; c = cmd_special_alloc(h); if (!c) return -ENOMEM; return_status = fill_cmd(h, c, cmd, buff, size, page_code, scsi3addr, cmd_type); if (return_status == IO_OK) return_status = sendcmd_withirq_core(h, c, 1); cmd_special_free(h, c); return return_status; } static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, sector_t total_size, unsigned int block_size, InquiryData_struct *inq_buff, drive_info_struct *drv) { int return_code; unsigned long t; unsigned char scsi3addr[8]; memset(inq_buff, 0, sizeof(InquiryData_struct)); log_unit_to_scsi3addr(h, scsi3addr, logvol); return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { if (inq_buff->data_byte[8] == 0xFF) { dev_warn(&h->pdev->dev, "reading geometry failed, volume " "does not support reading geometry\n"); drv->heads = 255; drv->sectors = 32; /* Sectors per track */ drv->cylinders = total_size + 1; drv->raid_level = RAID_UNKNOWN; } else { drv->heads = inq_buff->data_byte[6]; drv->sectors = inq_buff->data_byte[7]; drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8; drv->cylinders += inq_buff->data_byte[5]; drv->raid_level = inq_buff->data_byte[8]; } drv->block_size = block_size; drv->nr_blocks = total_size + 1; t = drv->heads * drv->sectors; if (t > 1) { sector_t real_size = total_size + 1; unsigned long rem = sector_div(real_size, t); if (rem) real_size++; drv->cylinders = real_size; } } else { /* Get geometry failed */ dev_warn(&h->pdev->dev, "reading geometry failed\n"); } } static void cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size) { ReadCapdata_struct *buf; int return_code; unsigned char scsi3addr[8]; buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); if (!buf) { dev_warn(&h->pdev->dev, "out of memory\n"); return; } log_unit_to_scsi3addr(h, scsi3addr, logvol); return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { *total_size = be32_to_cpu(*(__be32 *) buf->total_size); *block_size = be32_to_cpu(*(__be32 *) buf->block_size); } else { /* read capacity command failed */ dev_warn(&h->pdev->dev, "read capacity failed\n"); *total_size = 0; *block_size = BLOCK_SIZE; } kfree(buf); } static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, sector_t *total_size, unsigned int *block_size) { ReadCapdata_struct_16 *buf; int return_code; unsigned char scsi3addr[8]; buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); if (!buf) { dev_warn(&h->pdev->dev, "out of memory\n"); return; } log_unit_to_scsi3addr(h, scsi3addr, logvol); return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, buf, sizeof(ReadCapdata_struct_16), 0, scsi3addr, TYPE_CMD); if (return_code == IO_OK) { *total_size = be64_to_cpu(*(__be64 *) buf->total_size); *block_size = be32_to_cpu(*(__be32 *) buf->block_size); } else { /* read capacity command failed */ dev_warn(&h->pdev->dev, "read capacity failed\n"); *total_size = 0; *block_size = BLOCK_SIZE; } dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", (unsigned long long)*total_size+1, *block_size); kfree(buf); } static int cciss_revalidate(struct gendisk *disk) { ctlr_info_t *h = get_host(disk); drive_info_struct *drv = get_drv(disk); int logvol; int FOUND = 0; unsigned int block_size; sector_t total_size; InquiryData_struct *inq_buff = NULL; for (logvol = 0; logvol <= h->highest_lun; logvol++) { if (!h->drv[logvol]) continue; if (memcmp(h->drv[logvol]->LunID, drv->LunID, sizeof(drv->LunID)) == 0) { FOUND = 1; break; } } if (!FOUND) return 1; inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (inq_buff == NULL) { dev_warn(&h->pdev->dev, "out of memory\n"); return 1; } if (h->cciss_read == CCISS_READ_10) { cciss_read_capacity(h, logvol, &total_size, &block_size); } else { cciss_read_capacity_16(h, logvol, &total_size, &block_size); } cciss_geometry_inquiry(h, logvol, total_size, block_size, inq_buff, drv); blk_queue_logical_block_size(drv->queue, drv->block_size); set_capacity(disk, drv->nr_blocks); kfree(inq_buff); return 0; } /* * Map (physical) PCI mem into (virtual) kernel space */ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; } /* * Takes jobs of the Q and sends them to the hardware, then puts it on * the Q to wait for completion. */ static void start_io(ctlr_info_t *h) { CommandList_struct *c; while (!list_empty(&h->reqQ)) { c = list_entry(h->reqQ.next, CommandList_struct, list); /* can't do anything if fifo is full */ if ((h->access.fifo_full(h))) { dev_warn(&h->pdev->dev, "fifo full\n"); break; } /* Get the first entry from the Request Q */ removeQ(c); h->Qdepth--; /* Tell the controller execute command */ h->access.submit_command(h, c); /* Put job onto the completed Q */ addQ(&h->cmpQ, c); } } /* Assumes that h->lock is held. */ /* Zeros out the error record and then resends the command back */ /* to the controller */ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) { /* erase the old error information */ memset(c->err_info, 0, sizeof(ErrorInfo_struct)); /* add it to software queue and then send it to the controller */ addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; start_io(h); } static inline unsigned int make_status_bytes(unsigned int scsi_status_byte, unsigned int msg_byte, unsigned int host_byte, unsigned int driver_byte) { /* inverse of macros in scsi.h */ return (scsi_status_byte & 0xff) | ((msg_byte & 0xff) << 8) | ((host_byte & 0xff) << 16) | ((driver_byte & 0xff) << 24); } static inline int evaluate_target_status(ctlr_info_t *h, CommandList_struct *cmd, int *retry_cmd) { unsigned char sense_key; unsigned char status_byte, msg_byte, host_byte, driver_byte; int error_value; *retry_cmd = 0; /* If we get in here, it means we got "target status", that is, scsi status */ status_byte = cmd->err_info->ScsiStatus; driver_byte = DRIVER_OK; msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) host_byte = DID_PASSTHROUGH; else host_byte = DID_OK; error_value = make_status_bytes(status_byte, msg_byte, host_byte, driver_byte); if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) dev_warn(&h->pdev->dev, "cmd %p " "has SCSI Status 0x%x\n", cmd, cmd->err_info->ScsiStatus); return error_value; } /* check the sense key */ sense_key = 0xf & cmd->err_info->SenseInfo[2]; /* no status or recovered error */ if (((sense_key == 0x0) || (sense_key == 0x1)) && (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) error_value = 0; if (check_for_unit_attention(h, cmd)) { *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); return 0; } /* Not SG_IO or similar? */ if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { if (error_value != 0) dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" " sense key = 0x%x\n", cmd, sense_key); return error_value; } /* SG_IO or similar, copy sense data back */ if (cmd->rq->sense) { if (cmd->rq->sense_len > cmd->err_info->SenseLen) cmd->rq->sense_len = cmd->err_info->SenseLen; memcpy(cmd->rq->sense, cmd->err_info->SenseInfo, cmd->rq->sense_len); } else cmd->rq->sense_len = 0; return error_value; } /* checks the status of the job and calls complete buffers to mark all * buffers for the completed job. Note that this function does not need * to hold the hba/queue lock. */ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd, int timeout) { int retry_cmd = 0; struct request *rq = cmd->rq; rq->errors = 0; if (timeout) rq->errors = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT); if (cmd->err_info->CommandStatus == 0) /* no error has occurred */ goto after_error_processing; switch (cmd->err_info->CommandStatus) { case CMD_TARGET_STATUS: rq->errors = evaluate_target_status(h, cmd, &retry_cmd); break; case CMD_DATA_UNDERRUN: if (cmd->rq->cmd_type == REQ_TYPE_FS) { dev_warn(&h->pdev->dev, "cmd %p has" " completed with data underrun " "reported\n", cmd); cmd->rq->resid_len = cmd->err_info->ResidualCnt; } break; case CMD_DATA_OVERRUN: if (cmd->rq->cmd_type == REQ_TYPE_FS) dev_warn(&h->pdev->dev, "cciss: cmd %p has" " completed with data overrun " "reported\n", cmd); break; case CMD_INVALID: dev_warn(&h->pdev->dev, "cciss: cmd %p is " "reported invalid\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_PROTOCOL_ERR: dev_warn(&h->pdev->dev, "cciss: cmd %p has " "protocol error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_HARDWARE_ERR: dev_warn(&h->pdev->dev, "cciss: cmd %p had " " hardware error\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_CONNECTION_LOST: dev_warn(&h->pdev->dev, "cciss: cmd %p had " "connection lost\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_ABORTED: dev_warn(&h->pdev->dev, "cciss: cmd %p was " "aborted\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_ABORT_FAILED: dev_warn(&h->pdev->dev, "cciss: cmd %p reports " "abort failed\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNSOLICITED_ABORT: dev_warn(&h->pdev->dev, "cciss%d: unsolicited " "abort %p\n", h->ctlr, cmd); if (cmd->retry_count < MAX_CMD_RETRIES) { retry_cmd = 1; dev_warn(&h->pdev->dev, "retrying %p\n", cmd); cmd->retry_count++; } else dev_warn(&h->pdev->dev, "%p retried too many times\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ABORT); break; case CMD_TIMEOUT: dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); break; case CMD_UNABORTABLE: dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC ? DID_PASSTHROUGH : DID_ERROR); break; default: dev_warn(&h->pdev->dev, "cmd %p returned " "unknown status %x\n", cmd, cmd->err_info->CommandStatus); rq->errors = make_status_bytes(SAM_STAT_GOOD, cmd->err_info->CommandStatus, DRIVER_OK, (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? DID_PASSTHROUGH : DID_ERROR); } after_error_processing: /* We need to return this command */ if (retry_cmd) { resend_cciss_cmd(h, cmd); return; } cmd->rq->completion_data = cmd; blk_complete_request(cmd->rq); } static inline u32 cciss_tag_contains_index(u32 tag) { #define DIRECT_LOOKUP_BIT 0x10 return tag & DIRECT_LOOKUP_BIT; } static inline u32 cciss_tag_to_index(u32 tag) { #define DIRECT_LOOKUP_SHIFT 5 return tag >> DIRECT_LOOKUP_SHIFT; } static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag) { #define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1) #define CCISS_SIMPLE_ERROR_BITS 0x03 if (likely(h->transMethod & CFGTBL_Trans_Performant)) return tag & ~CCISS_PERF_ERROR_BITS; return tag & ~CCISS_SIMPLE_ERROR_BITS; } static inline void cciss_mark_tag_indexed(u32 *tag) { *tag |= DIRECT_LOOKUP_BIT; } static inline void cciss_set_tag_index(u32 *tag, u32 index) { *tag |= (index << DIRECT_LOOKUP_SHIFT); } /* * Get a request and submit it to the controller. */ static void do_cciss_request(struct request_queue *q) { ctlr_info_t *h = q->queuedata; CommandList_struct *c; sector_t start_blk; int seg; struct request *creq; u64bit temp64; struct scatterlist *tmp_sg; SGDescriptor_struct *curr_sg; drive_info_struct *drv; int i, dir; int sg_index = 0; int chained = 0; queue: creq = blk_peek_request(q); if (!creq) goto startio; BUG_ON(creq->nr_phys_segments > h->maxsgentries); c = cmd_alloc(h); if (!c) goto full; blk_start_request(creq); tmp_sg = h->scatter_list[c->cmdindex]; spin_unlock_irq(q->queue_lock); c->cmd_type = CMD_RWREQ; c->rq = creq; /* fill in the request */ drv = creq->rq_disk->private_data; c->Header.ReplyQueue = 0; /* unused in simple mode */ /* got command from pool, so use the command block index instead */ /* for direct lookups. */ /* The first 2 bits are reserved for controller error reporting. */ cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); cciss_mark_tag_indexed(&c->Header.Tag.lower); memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ c->Request.Type.Type = TYPE_CMD; /* It is a command. */ c->Request.Type.Attribute = ATTR_SIMPLE; c->Request.Type.Direction = (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; c->Request.Timeout = 0; /* Don't time out */ c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; start_blk = blk_rq_pos(creq); dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); sg_init_table(tmp_sg, h->maxsgentries); seg = blk_rq_map_sg(q, creq, tmp_sg); /* get the DMA records for the setup */ if (c->Request.Type.Direction == XFER_READ) dir = PCI_DMA_FROMDEVICE; else dir = PCI_DMA_TODEVICE; curr_sg = c->SG; sg_index = 0; chained = 0; for (i = 0; i < seg; i++) { if (((sg_index+1) == (h->max_cmd_sgentries)) && !chained && ((seg - i) > 1)) { /* Point to next chain block. */ curr_sg = h->cmd_sg_list[c->cmdindex]; sg_index = 0; chained = 1; } curr_sg[sg_index].Len = tmp_sg[i].length; temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]), tmp_sg[i].offset, tmp_sg[i].length, dir); curr_sg[sg_index].Addr.lower = temp64.val32.lower; curr_sg[sg_index].Addr.upper = temp64.val32.upper; curr_sg[sg_index].Ext = 0; /* we are not chaining */ ++sg_index; } if (chained) cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], (seg - (h->max_cmd_sgentries - 1)) * sizeof(SGDescriptor_struct)); /* track how many SG entries we are using */ if (seg > h->maxSG) h->maxSG = seg; dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " "chained[%d]\n", blk_rq_sectors(creq), seg, chained); c->Header.SGTotal = seg + chained; if (seg <= h->max_cmd_sgentries) c->Header.SGList = c->Header.SGTotal; else c->Header.SGList = h->max_cmd_sgentries; set_performant_mode(h, c); if (likely(creq->cmd_type == REQ_TYPE_FS)) { if(h->cciss_read == CCISS_READ_10) { c->Request.CDB[1] = 0; c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ c->Request.CDB[3] = (start_blk >> 16) & 0xff; c->Request.CDB[4] = (start_blk >> 8) & 0xff; c->Request.CDB[5] = start_blk & 0xff; c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; } else { u32 upper32 = upper_32_bits(start_blk); c->Request.CDBLen = 16; c->Request.CDB[1]= 0; c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ c->Request.CDB[3]= (upper32 >> 16) & 0xff; c->Request.CDB[4]= (upper32 >> 8) & 0xff; c->Request.CDB[5]= upper32 & 0xff; c->Request.CDB[6]= (start_blk >> 24) & 0xff; c->Request.CDB[7]= (start_blk >> 16) & 0xff; c->Request.CDB[8]= (start_blk >> 8) & 0xff; c->Request.CDB[9]= start_blk & 0xff; c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; c->Request.CDB[14] = c->Request.CDB[15] = 0; } } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { c->Request.CDBLen = creq->cmd_len; memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); } else { dev_warn(&h->pdev->dev, "bad request type %d\n", creq->cmd_type); BUG(); } spin_lock_irq(q->queue_lock); addQ(&h->reqQ, c); h->Qdepth++; if (h->Qdepth > h->maxQsinceinit) h->maxQsinceinit = h->Qdepth; goto queue; full: blk_stop_queue(q); startio: /* We will already have the driver lock here so not need * to lock it. */ start_io(h); } static inline unsigned long get_next_completion(ctlr_info_t *h) { return h->access.command_completed(h); } static inline int interrupt_pending(ctlr_info_t *h) { return h->access.intr_pending(h); } static inline long interrupt_not_for_us(ctlr_info_t *h) { return ((h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0)); } static inline int bad_tag(ctlr_info_t *h, u32 tag_index, u32 raw_tag) { if (unlikely(tag_index >= h->nr_cmds)) { dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); return 1; } return 0; } static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, u32 raw_tag) { removeQ(c); if (likely(c->cmd_type == CMD_RWREQ)) complete_command(h, c, 0); else if (c->cmd_type == CMD_IOCTL_PEND) complete(c->waiting); #ifdef CONFIG_CISS_SCSI_TAPE else if (c->cmd_type == CMD_SCSI) complete_scsi_command(c, 0, raw_tag); #endif } static inline u32 next_command(ctlr_info_t *h) { u32 a; if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant))) return h->access.command_completed(h); if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { a = *(h->reply_pool_head); /* Next cmd in ring buffer */ (h->reply_pool_head)++; h->commands_outstanding--; } else { a = FIFO_EMPTY; } /* Check for wraparound */ if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { h->reply_pool_head = h->reply_pool; h->reply_pool_wraparound ^= 1; } return a; } /* process completion of an indexed ("direct lookup") command */ static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) { u32 tag_index; CommandList_struct *c; tag_index = cciss_tag_to_index(raw_tag); if (bad_tag(h, tag_index, raw_tag)) return next_command(h); c = h->cmd_pool + tag_index; finish_cmd(h, c, raw_tag); return next_command(h); } /* process completion of a non-indexed command */ static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) { CommandList_struct *c = NULL; __u32 busaddr_masked, tag_masked; tag_masked = cciss_tag_discard_error_bits(h, raw_tag); list_for_each_entry(c, &h->cmpQ, list) { busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr); if (busaddr_masked == tag_masked) { finish_cmd(h, c, raw_tag); return next_command(h); } } bad_tag(h, h->nr_cmds + 1, raw_tag); return next_command(h); } /* Some controllers, like p400, will give us one interrupt * after a soft reset, even if we turned interrupts off. * Only need to check for this in the cciss_xxx_discard_completions * functions. */ static int ignore_bogus_interrupt(ctlr_info_t *h) { if (likely(!reset_devices)) return 0; if (likely(h->interrupts_enabled)) return 0; dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled " "(known firmware bug.) Ignoring.\n"); return 1; } static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; if (interrupt_not_for_us(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); while (interrupt_pending(h)) { raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h); } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; if (ignore_bogus_interrupt(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) raw_tag = next_command(h); spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } static irqreturn_t do_cciss_intx(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; if (interrupt_not_for_us(h)) return IRQ_NONE; spin_lock_irqsave(&h->lock, flags); while (interrupt_pending(h)) { raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) { if (cciss_tag_contains_index(raw_tag)) raw_tag = process_indexed_cmd(h, raw_tag); else raw_tag = process_nonindexed_cmd(h, raw_tag); } } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } /* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never * check the interrupt pending register because it is not set. */ static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) { ctlr_info_t *h = dev_id; unsigned long flags; u32 raw_tag; spin_lock_irqsave(&h->lock, flags); raw_tag = get_next_completion(h); while (raw_tag != FIFO_EMPTY) { if (cciss_tag_contains_index(raw_tag)) raw_tag = process_indexed_cmd(h, raw_tag); else raw_tag = process_nonindexed_cmd(h, raw_tag); } spin_unlock_irqrestore(&h->lock, flags); return IRQ_HANDLED; } /** * add_to_scan_list() - add controller to rescan queue * @h: Pointer to the controller. * * Adds the controller to the rescan queue if not already on the queue. * * returns 1 if added to the queue, 0 if skipped (could be on the * queue already, or the controller could be initializing or shutting * down). **/ static int add_to_scan_list(struct ctlr_info *h) { struct ctlr_info *test_h; int found = 0; int ret = 0; if (h->busy_initializing) return 0; if (!mutex_trylock(&h->busy_shutting_down)) return 0; mutex_lock(&scan_mutex); list_for_each_entry(test_h, &scan_q, scan_list) { if (test_h == h) { found = 1; break; } } if (!found && !h->busy_scanning) { INIT_COMPLETION(h->scan_wait); list_add_tail(&h->scan_list, &scan_q); ret = 1; } mutex_unlock(&scan_mutex); mutex_unlock(&h->busy_shutting_down); return ret; } /** * remove_from_scan_list() - remove controller from rescan queue * @h: Pointer to the controller. * * Removes the controller from the rescan queue if present. Blocks if * the controller is currently conducting a rescan. The controller * can be in one of three states: * 1. Doesn't need a scan * 2. On the scan list, but not scanning yet (we remove it) * 3. Busy scanning (and not on the list). In this case we want to wait for * the scan to complete to make sure the scanning thread for this * controller is completely idle. **/ static void remove_from_scan_list(struct ctlr_info *h) { struct ctlr_info *test_h, *tmp_h; mutex_lock(&scan_mutex); list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { if (test_h == h) { /* state 2. */ list_del(&h->scan_list); complete_all(&h->scan_wait); mutex_unlock(&scan_mutex); return; } } if (h->busy_scanning) { /* state 3. */ mutex_unlock(&scan_mutex); wait_for_completion(&h->scan_wait); } else { /* state 1, nothing to do. */ mutex_unlock(&scan_mutex); } } /** * scan_thread() - kernel thread used to rescan controllers * @data: Ignored. * * A kernel thread used scan for drive topology changes on * controllers. The thread processes only one controller at a time * using a queue. Controllers are added to the queue using * add_to_scan_list() and removed from the queue either after done * processing or using remove_from_scan_list(). * * returns 0. **/ static int scan_thread(void *data) { struct ctlr_info *h; while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; while (1) { mutex_lock(&scan_mutex); if (list_empty(&scan_q)) { mutex_unlock(&scan_mutex); break; } h = list_entry(scan_q.next, struct ctlr_info, scan_list); list_del(&h->scan_list); h->busy_scanning = 1; mutex_unlock(&scan_mutex); rebuild_lun_table(h, 0, 0); complete_all(&h->scan_wait); mutex_lock(&scan_mutex); h->busy_scanning = 0; mutex_unlock(&scan_mutex); } } return 0; } static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) { if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) return 0; switch (c->err_info->SenseInfo[12]) { case STATE_CHANGED: dev_warn(&h->pdev->dev, "a state change " "detected, command retried\n"); return 1; break; case LUN_FAILED: dev_warn(&h->pdev->dev, "LUN failure " "detected, action required\n"); return 1; break; case REPORT_LUNS_CHANGED: dev_warn(&h->pdev->dev, "report LUN data changed\n"); /* * Here, we could call add_to_scan_list and wake up the scan thread, * except that it's quite likely that we will get more than one * REPORT_LUNS_CHANGED condition in quick succession, which means * that those which occur after the first one will likely happen * *during* the scan_thread's rescan. And the rescan code is not * robust enough to restart in the middle, undoing what it has already * done, and it's not clear that it's even possible to do this, since * part of what it does is notify the block layer, which starts * doing it's own i/o to read partition tables and so on, and the * driver doesn't have visibility to know what might need undoing. * In any event, if possible, it is horribly complicated to get right * so we just don't do it for now. * * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. */ return 1; break; case POWER_OR_RESET: dev_warn(&h->pdev->dev, "a power on or device reset detected\n"); return 1; break; case UNIT_ATTENTION_CLEARED: dev_warn(&h->pdev->dev, "unit attention cleared by another initiator\n"); return 1; break; default: dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); return 1; } } /* * We cannot read the structure directly, for portability we must use * the io functions. * This is for debug only. */ static void print_cfg_table(ctlr_info_t *h) { int i; char temp_name[17]; CfgTable_struct *tb = h->cfgtable; dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); dev_dbg(&h->pdev->dev, "------------------------------------\n"); for (i = 0; i < 4; i++) temp_name[i] = readb(&(tb->Signature[i])); temp_name[4] = '\0'; dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); dev_dbg(&h->pdev->dev, " Spec Number = %d\n", readl(&(tb->SpecValence))); dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", readl(&(tb->TransportSupport))); dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", readl(&(tb->TransportActive))); dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", readl(&(tb->HostWrite.TransportRequest))); dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", readl(&(tb->HostWrite.CoalIntDelay))); dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", readl(&(tb->HostWrite.CoalIntCount))); dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n", readl(&(tb->CmdsOutMax))); dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes))); for (i = 0; i < 16; i++) temp_name[i] = readb(&(tb->ServerName[i])); temp_name[16] = '\0'; dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); } static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) { int i, offset, mem_type, bar_type; if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */ return 0; offset = 0; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE; if (bar_type == PCI_BASE_ADDRESS_SPACE_IO) offset += 4; else { mem_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: case PCI_BASE_ADDRESS_MEM_TYPE_1M: offset += 4; /* 32 bit */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: offset += 8; break; default: /* reserved in PCI 2.2 */ dev_warn(&pdev->dev, "Base address is invalid\n"); return -1; break; } } if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0) return i + 1; } return -1; } /* Fill in bucket_map[], given nsgs (the max number of * scatter gather elements supported) and bucket[], * which is an array of 8 integers. The bucket[] array * contains 8 different DMA transfer sizes (in 16 * byte increments) which the controller uses to fetch * commands. This function fills in bucket_map[], which * maps a given number of scatter gather elements to one of * the 8 DMA transfer sizes. The point of it is to allow the * controller to only do as much DMA as needed to fetch the * command, with the DMA transfer size encoded in the lower * bits of the command address. */ static void calc_bucket_map(int bucket[], int num_buckets, int nsgs, int *bucket_map) { int i, j, b, size; /* even a command with 0 SGs requires 4 blocks */ #define MINIMUM_TRANSFER_BLOCKS 4 #define NUM_BUCKETS 8 /* Note, bucket_map must have nsgs+1 entries. */ for (i = 0; i <= nsgs; i++) { /* Compute size of a command with i SG entries */ size = i + MINIMUM_TRANSFER_BLOCKS; b = num_buckets; /* Assume the biggest bucket */ /* Find the bucket that is just big enough */ for (j = 0; j < 8; j++) { if (bucket[j] >= size) { b = j; break; } } /* for a command with i SG entries, use bucket b. */ bucket_map[i] = b; } } static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) { int i; /* under certain very rare conditions, this can take awhile. * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right * as we enter this code.) */ for (i = 0; i < MAX_CONFIG_WAIT; i++) { if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) break; usleep_range(10000, 20000); } } static __devinit void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags) { /* This is a bit complicated. There are 8 registers on * the controller which we write to to tell it 8 different * sizes of commands which there may be. It's a way of * reducing the DMA done to fetch each command. Encoded into * each command's tag are 3 bits which communicate to the controller * which of the eight sizes that command fits within. The size of * each command depends on how many scatter gather entries there are. * Each SG entry requires 16 bytes. The eight registers are programmed * with the number of 16-byte blocks a command of that size requires. * The smallest command possible requires 5 such 16 byte blocks. * the largest command possible requires MAXSGENTRIES + 4 16-byte * blocks. Note, this only extends to the SG entries contained * within the command block, and does not extend to chained blocks * of SG elements. bft[] contains the eight values we write to * the registers. They are not evenly distributed, but have more * sizes for small commands, and fewer sizes for larger commands. */ __u32 trans_offset; int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; /* * 5 = 1 s/g entry or 4k * 6 = 2 s/g entry or 8k * 8 = 4 s/g entry or 16k * 10 = 6 s/g entry or 24k */ unsigned long register_value; BUILD_BUG_ON(28 > MAXSGENTRIES + 4); h->reply_pool_wraparound = 1; /* spec: init to 1 */ /* Controller spec: zero out this buffer. */ memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); h->reply_pool_head = h->reply_pool; trans_offset = readl(&(h->cfgtable->TransMethodOffset)); calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, h->blockFetchTable); writel(bft[0], &h->transtable->BlockFetch0); writel(bft[1], &h->transtable->BlockFetch1); writel(bft[2], &h->transtable->BlockFetch2); writel(bft[3], &h->transtable->BlockFetch3); writel(bft[4], &h->transtable->BlockFetch4); writel(bft[5], &h->transtable->BlockFetch5); writel(bft[6], &h->transtable->BlockFetch6); writel(bft[7], &h->transtable->BlockFetch7); /* size of controller ring buffer */ writel(h->max_commands, &h->transtable->RepQSize); writel(1, &h->transtable->RepQCount); writel(0, &h->transtable->RepQCtrAddrLow32); writel(0, &h->transtable->RepQCtrAddrHigh32); writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); writel(0, &h->transtable->RepQAddr0High32); writel(CFGTBL_Trans_Performant | use_short_tags, &(h->cfgtable->HostWrite.TransportRequest)); writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); cciss_wait_for_mode_change_ack(h); register_value = readl(&(h->cfgtable->TransportActive)); if (!(register_value & CFGTBL_Trans_Performant)) dev_warn(&h->pdev->dev, "cciss: unable to get board into" " performant mode\n"); } static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) { __u32 trans_support; dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); /* Attempt to put controller into performant mode if supported */ /* Does board support performant mode? */ trans_support = readl(&(h->cfgtable->TransportSupport)); if (!(trans_support & PERFORMANT_MODE)) return; dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); /* Performant mode demands commands on a 32 byte boundary * pci_alloc_consistent aligns on page boundarys already. * Just need to check if divisible by 32 */ if ((sizeof(CommandList_struct) % 32) != 0) { dev_warn(&h->pdev->dev, "%s %d %s\n", "cciss info: command size[", (int)sizeof(CommandList_struct), "] not divisible by 32, no performant mode..\n"); return; } /* Performant mode ring buffer and supporting data structures */ h->reply_pool = (__u64 *)pci_alloc_consistent( h->pdev, h->max_commands * sizeof(__u64), &(h->reply_pool_dhandle)); /* Need a block fetch table for performant mode */ h->blockFetchTable = kmalloc(((h->maxsgentries+1) * sizeof(__u32)), GFP_KERNEL); if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) goto clean_up; cciss_enter_performant_mode(h, trans_support & CFGTBL_Trans_use_short_tags); /* Change the access methods to the performant access methods */ h->access = SA5_performant_access; h->transMethod = CFGTBL_Trans_Performant; return; clean_up: kfree(h->blockFetchTable); if (h->reply_pool) pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), h->reply_pool, h->reply_pool_dhandle); return; } /* cciss_put_controller_into_performant_mode */ /* If MSI/MSI-X is supported by the kernel we will try to enable it on * controllers that are capable. If not, we use IO-APIC mode. */ static void __devinit cciss_interrupt_mode(ctlr_info_t *h) { #ifdef CONFIG_PCI_MSI int err; struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1}, {0, 2}, {0, 3} }; /* Some boards advertise MSI but don't really support it */ if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) goto default_int_mode; if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); if (!err) { h->intr[0] = cciss_msix_entries[0].vector; h->intr[1] = cciss_msix_entries[1].vector; h->intr[2] = cciss_msix_entries[2].vector; h->intr[3] = cciss_msix_entries[3].vector; h->msix_vector = 1; return; } if (err > 0) { dev_warn(&h->pdev->dev, "only %d MSI-X vectors available\n", err); goto default_int_mode; } else { dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err); goto default_int_mode; } } if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { if (!pci_enable_msi(h->pdev)) h->msi_vector = 1; else dev_warn(&h->pdev->dev, "MSI init failed\n"); } default_int_mode: #endif /* CONFIG_PCI_MSI */ /* if we get here we're going to use the default interrupt mode */ h->intr[PERF_MODE_INT] = h->pdev->irq; return; } static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) { int i; u32 subsystem_vendor_id, subsystem_device_id; subsystem_vendor_id = pdev->subsystem_vendor; subsystem_device_id = pdev->subsystem_device; *board_id = ((subsystem_device_id << 16) & 0xffff0000) | subsystem_vendor_id; for (i = 0; i < ARRAY_SIZE(products); i++) if (*board_id == products[i].board_id) return i; dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", *board_id); return -ENODEV; } static inline bool cciss_board_disabled(ctlr_info_t *h) { u16 command; (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); return ((command & PCI_COMMAND_MEMORY) == 0); } static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, unsigned long *memory_bar) { int i; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { /* addressing mode bits already removed */ *memory_bar = pci_resource_start(pdev, i); dev_dbg(&pdev->dev, "memory BAR = %lx\n", *memory_bar); return 0; } dev_warn(&pdev->dev, "no memory BAR found\n"); return -ENODEV; } static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr, int wait_for_ready) #define BOARD_READY 1 #define BOARD_NOT_READY 0 { int i, iterations; u32 scratchpad; if (wait_for_ready) iterations = CCISS_BOARD_READY_ITERATIONS; else iterations = CCISS_BOARD_NOT_READY_ITERATIONS; for (i = 0; i < iterations; i++) { scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET); if (wait_for_ready) { if (scratchpad == CCISS_FIRMWARE_READY) return 0; } else { if (scratchpad != CCISS_FIRMWARE_READY) return 0; } msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); } dev_warn(&pdev->dev, "board not ready, timed out.\n"); return -ENODEV; } static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, u64 *cfg_offset) { *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); *cfg_base_addr &= (u32) 0x0000ffff; *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); if (*cfg_base_addr_index == -1) { dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); return -ENODEV; } return 0; } static int __devinit cciss_find_cfgtables(ctlr_info_t *h) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; u32 trans_offset; int rc; rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) return rc; h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); if (!h->cfgtable) return -ENOMEM; rc = write_driver_ver_to_cfgtable(h->cfgtable); if (rc) return rc; /* Find performant mode table. */ trans_offset = readl(&h->cfgtable->TransMethodOffset); h->transtable = remap_pci_mem(pci_resource_start(h->pdev, cfg_base_addr_index)+cfg_offset+trans_offset, sizeof(*h->transtable)); if (!h->transtable) return -ENOMEM; return 0; } static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) { h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); /* Limit commands in memory limited kdump scenario. */ if (reset_devices && h->max_commands > 32) h->max_commands = 32; if (h->max_commands < 16) { dev_warn(&h->pdev->dev, "Controller reports " "max supported commands of %d, an obvious lie. " "Using 16. Ensure that firmware is up to date.\n", h->max_commands); h->max_commands = 16; } } /* Interrogate the hardware for some limits: * max commands, max SG elements without chaining, and with chaining, * SG chain block size, etc. */ static void __devinit cciss_find_board_params(ctlr_info_t *h) { cciss_get_max_perf_mode_cmds(h); h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds; h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); /* * Limit in-command s/g elements to 32 save dma'able memory. * Howvever spec says if 0, use 31 */ h->max_cmd_sgentries = 31; if (h->maxsgentries > 512) { h->max_cmd_sgentries = 32; h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; h->maxsgentries--; /* save one for chain pointer */ } else { h->maxsgentries = 31; /* default to traditional values */ h->chainsize = 0; } } static inline bool CISS_signature_present(ctlr_info_t *h) { if ((readb(&h->cfgtable->Signature[0]) != 'C') || (readb(&h->cfgtable->Signature[1]) != 'I') || (readb(&h->cfgtable->Signature[2]) != 'S') || (readb(&h->cfgtable->Signature[3]) != 'S')) { dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); return false; } return true; } /* Need to enable prefetch in the SCSI core for 6400 in x86 */ static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) { #ifdef CONFIG_X86 u32 prefetch; prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); prefetch |= 0x100; writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); #endif } /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result * in a prefetch beyond physical memory. */ static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) { u32 dma_prefetch; __u32 dma_refetch; if (h->board_id != 0x3225103C) return; dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); dma_prefetch |= 0x8000; writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); dma_refetch |= 0x1; pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); } static int __devinit cciss_pci_init(ctlr_info_t *h) { int prod_index, err; prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); if (prod_index < 0) return -ENODEV; h->product_name = products[prod_index].product_name; h->access = *(products[prod_index].access); if (cciss_board_disabled(h)) { dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); return -ENODEV; } err = pci_enable_device(h->pdev); if (err) { dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); return err; } err = pci_request_regions(h->pdev, "cciss"); if (err) { dev_warn(&h->pdev->dev, "Cannot obtain PCI resources, aborting\n"); return err; } dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); /* If the kernel supports MSI/MSI-X we will try to enable that functionality, * else we use the IO-APIC interrupt assigned to us by system ROM. */ cciss_interrupt_mode(h); err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); if (err) goto err_out_free_res; h->vaddr = remap_pci_mem(h->paddr, 0x250); if (!h->vaddr) { err = -ENOMEM; goto err_out_free_res; } err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY); if (err) goto err_out_free_res; err = cciss_find_cfgtables(h); if (err) goto err_out_free_res; print_cfg_table(h); cciss_find_board_params(h); if (!CISS_signature_present(h)) { err = -ENODEV; goto err_out_free_res; } cciss_enable_scsi_prefetch(h); cciss_p600_dma_prefetch_quirk(h); cciss_put_controller_into_performant_mode(h); return 0; err_out_free_res: /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ if (h->transtable) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); if (h->vaddr) iounmap(h->vaddr); pci_release_regions(h->pdev); return err; } /* Function to find the first free pointer into our hba[] array * Returns -1 if no free entries are left. */ static int alloc_cciss_hba(struct pci_dev *pdev) { int i; for (i = 0; i < MAX_CTLR; i++) { if (!hba[i]) { ctlr_info_t *h; h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); if (!h) goto Enomem; hba[i] = h; return i; } } dev_warn(&pdev->dev, "This driver supports a maximum" " of %d controllers.\n", MAX_CTLR); return -1; Enomem: dev_warn(&pdev->dev, "out of memory.\n"); return -1; } static void free_hba(ctlr_info_t *h) { int i; hba[h->ctlr] = NULL; for (i = 0; i < h->highest_lun + 1; i++) if (h->gendisk[i] != NULL) put_disk(h->gendisk[i]); kfree(h); } /* Send a message CDB to the firmware. */ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) { typedef struct { CommandListHeader_struct CommandHeader; RequestBlock_struct Request; ErrDescriptor_struct ErrorDescriptor; } Command; static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); Command *cmd; dma_addr_t paddr64; uint32_t paddr32, tag; void __iomem *vaddr; int i, err; vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (vaddr == NULL) return -ENOMEM; /* The Inbound Post Queue only accepts 32-bit physical addresses for the CCISS commands, so they must be allocated from the lower 4GiB of memory. */ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { iounmap(vaddr); return -ENOMEM; } cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); if (cmd == NULL) { iounmap(vaddr); return -ENOMEM; } /* This must fit, because of the 32-bit consistent DMA mask. Also, although there's no guarantee, we assume that the address is at least 4-byte aligned (most likely, it's page-aligned). */ paddr32 = paddr64; cmd->CommandHeader.ReplyQueue = 0; cmd->CommandHeader.SGList = 0; cmd->CommandHeader.SGTotal = 0; cmd->CommandHeader.Tag.lower = paddr32; cmd->CommandHeader.Tag.upper = 0; memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); cmd->Request.CDBLen = 16; cmd->Request.Type.Type = TYPE_MSG; cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; cmd->Request.Type.Direction = XFER_NONE; cmd->Request.Timeout = 0; /* Don't time out */ cmd->Request.CDB[0] = opcode; cmd->Request.CDB[1] = type; memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); cmd->ErrorDescriptor.Addr.upper = 0; cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); for (i = 0; i < 10; i++) { tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); if ((tag & ~3) == paddr32) break; msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS); } iounmap(vaddr); /* we leak the DMA buffer here ... no choice since the controller could still complete the command. */ if (i == 10) { dev_err(&pdev->dev, "controller message %02x:%02x timed out\n", opcode, type); return -ETIMEDOUT; } pci_free_consistent(pdev, cmd_sz, cmd, paddr64); if (tag & 2) { dev_err(&pdev->dev, "controller message %02x:%02x failed\n", opcode, type); return -EIO; } dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", opcode, type); return 0; } #define cciss_noop(p) cciss_message(p, 3, 0) static int cciss_controller_hard_reset(struct pci_dev *pdev, void * __iomem vaddr, u32 use_doorbell) { u16 pmcsr; int pos; if (use_doorbell) { /* For everything after the P600, the PCI power state method * of resetting the controller doesn't work, so we have this * other way using the doorbell register. */ dev_info(&pdev->dev, "using doorbell to reset controller\n"); writel(use_doorbell, vaddr + SA5_DOORBELL); } else { /* Try to do it the PCI power state way */ /* Quoting from the Open CISS Specification: "The Power * Management Control/Status Register (CSR) controls the power * state of the device. The normal operating state is D0, * CSR=00h. The software off state is D3, CSR=03h. To reset * the controller, place the interface device in D3 then to D0, * this causes a secondary PCI reset which will reset the * controller." */ pos = pci_find_capability(pdev, PCI_CAP_ID_PM); if (pos == 0) { dev_err(&pdev->dev, "cciss_controller_hard_reset: " "PCI PM not supported\n"); return -ENODEV; } dev_info(&pdev->dev, "using PCI PM to reset controller\n"); /* enter the D3hot power management state */ pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D3hot; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); msleep(500); /* enter the D0 power management state */ pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= PCI_D0; pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); /* * The P600 requires a small delay when changing states. * Otherwise we may think the board did not reset and we bail. * This for kdump only and is particular to the P600. */ msleep(500); } return 0; } static __devinit void init_driver_version(char *driver_version, int len) { memset(driver_version, 0, len); strncpy(driver_version, "cciss " DRIVER_NAME, len - 1); } static __devinit int write_driver_ver_to_cfgtable( CfgTable_struct __iomem *cfgtable) { char *driver_version; int i, size = sizeof(cfgtable->driver_version); driver_version = kmalloc(size, GFP_KERNEL); if (!driver_version) return -ENOMEM; init_driver_version(driver_version, size); for (i = 0; i < size; i++) writeb(driver_version[i], &cfgtable->driver_version[i]); kfree(driver_version); return 0; } static __devinit void read_driver_ver_from_cfgtable( CfgTable_struct __iomem *cfgtable, unsigned char *driver_ver) { int i; for (i = 0; i < sizeof(cfgtable->driver_version); i++) driver_ver[i] = readb(&cfgtable->driver_version[i]); } static __devinit int controller_reset_failed( CfgTable_struct __iomem *cfgtable) { char *driver_ver, *old_driver_ver; int rc, size = sizeof(cfgtable->driver_version); old_driver_ver = kmalloc(2 * size, GFP_KERNEL); if (!old_driver_ver) return -ENOMEM; driver_ver = old_driver_ver + size; /* After a reset, the 32 bytes of "driver version" in the cfgtable * should have been changed, otherwise we know the reset failed. */ init_driver_version(old_driver_ver, size); read_driver_ver_from_cfgtable(cfgtable, driver_ver); rc = !memcmp(driver_ver, old_driver_ver, size); kfree(old_driver_ver); return rc; } /* This does a hard reset of the controller using PCI power management * states or using the doorbell register. */ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) { u64 cfg_offset; u32 cfg_base_addr; u64 cfg_base_addr_index; void __iomem *vaddr; unsigned long paddr; u32 misc_fw_support; int rc; CfgTable_struct __iomem *cfgtable; u32 use_doorbell; u32 board_id; u16 command_register; /* For controllers as old a the p600, this is very nearly * the same thing as * * pci_save_state(pci_dev); * pci_set_power_state(pci_dev, PCI_D3hot); * pci_set_power_state(pci_dev, PCI_D0); * pci_restore_state(pci_dev); * * For controllers newer than the P600, the pci power state * method of resetting doesn't work so we have another way * using the doorbell register. */ /* Exclude 640x boards. These are two pci devices in one slot * which share a battery backed cache module. One controls the * cache, the other accesses the cache through the one that controls * it. If we reset the one controlling the cache, the other will * likely not be happy. Just forbid resetting this conjoined mess. */ cciss_lookup_board_id(pdev, &board_id); if (!ctlr_is_resettable(board_id)) { dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " "due to shared cache module."); return -ENODEV; } /* if controller is soft- but not hard resettable... */ if (!ctlr_is_hard_resettable(board_id)) return -ENOTSUPP; /* try soft reset later. */ /* Save the PCI command register */ pci_read_config_word(pdev, 4, &command_register); /* Turn the board off. This is so that later pci_restore_state() * won't turn the board on before the rest of config space is ready. */ pci_disable_device(pdev); pci_save_state(pdev); /* find the first memory BAR, so we can find the cfg table */ rc = cciss_pci_find_memory_BAR(pdev, &paddr); if (rc) return rc; vaddr = remap_pci_mem(paddr, 0x250); if (!vaddr) return -ENOMEM; /* find cfgtable in order to check if reset via doorbell is supported */ rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, &cfg_base_addr_index, &cfg_offset); if (rc) goto unmap_vaddr; cfgtable = remap_pci_mem(pci_resource_start(pdev, cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); if (!cfgtable) { rc = -ENOMEM; goto unmap_vaddr; } rc = write_driver_ver_to_cfgtable(cfgtable); if (rc) goto unmap_vaddr; /* If reset via doorbell register is supported, use that. * There are two such methods. Favor the newest method. */ misc_fw_support = readl(&cfgtable->misc_fw_support); use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2; if (use_doorbell) { use_doorbell = DOORBELL_CTLR_RESET2; } else { use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; if (use_doorbell) { dev_warn(&pdev->dev, "Controller claims that " "'Bit 2 doorbell reset' is " "supported, but not 'bit 5 doorbell reset'. " "Firmware update is recommended.\n"); rc = -ENOTSUPP; /* use the soft reset */ goto unmap_cfgtable; } } rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); if (rc) goto unmap_cfgtable; pci_restore_state(pdev); rc = pci_enable_device(pdev); if (rc) { dev_warn(&pdev->dev, "failed to enable device.\n"); goto unmap_cfgtable; } pci_write_config_word(pdev, 4, command_register); /* Some devices (notably the HP Smart Array 5i Controller) need a little pause here */ msleep(CCISS_POST_RESET_PAUSE_MSECS); /* Wait for board to become not ready, then ready. */ dev_info(&pdev->dev, "Waiting for board to reset.\n"); rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); if (rc) { dev_warn(&pdev->dev, "Failed waiting for board to hard reset." " Will try soft reset.\n"); rc = -ENOTSUPP; /* Not expected, but try soft reset later */ goto unmap_cfgtable; } rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY); if (rc) { dev_warn(&pdev->dev, "failed waiting for board to become ready " "after hard reset\n"); goto unmap_cfgtable; } rc = controller_reset_failed(vaddr); if (rc < 0) goto unmap_cfgtable; if (rc) { dev_warn(&pdev->dev, "Unable to successfully hard reset " "controller. Will try soft reset.\n"); rc = -ENOTSUPP; /* Not expected, but try soft reset later */ } else { dev_info(&pdev->dev, "Board ready after hard reset.\n"); } unmap_cfgtable: iounmap(cfgtable); unmap_vaddr: iounmap(vaddr); return rc; } static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) { int rc, i; if (!reset_devices) return 0; /* Reset the controller with a PCI power-cycle or via doorbell */ rc = cciss_kdump_hard_reset_controller(pdev); /* -ENOTSUPP here means we cannot reset the controller * but it's already (and still) up and running in * "performant mode". Or, it might be 640x, which can't reset * due to concerns about shared bbwc between 6402/6404 pair. */ if (rc == -ENOTSUPP) return rc; /* just try to do the kdump anyhow. */ if (rc) return -ENODEV; /* Now try to get the controller to respond to a no-op */ dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n"); for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { if (cciss_noop(pdev) == 0) break; else dev_warn(&pdev->dev, "no-op failed%s\n", (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? "; re-trying" : "")); msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); } return 0; } static __devinit int cciss_allocate_cmd_pool(ctlr_info_t *h) { h->cmd_pool_bits = kmalloc( DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL); h->cmd_pool = pci_alloc_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), &(h->cmd_pool_dhandle)); h->errinfo_pool = pci_alloc_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), &(h->errinfo_pool_dhandle)); if ((h->cmd_pool_bits == NULL) || (h->cmd_pool == NULL) || (h->errinfo_pool == NULL)) { dev_err(&h->pdev->dev, "out of memory"); return -ENOMEM; } return 0; } static __devinit int cciss_allocate_scatterlists(ctlr_info_t *h) { int i; /* zero it, so that on free we need not know how many were alloc'ed */ h->scatter_list = kzalloc(h->max_commands * sizeof(struct scatterlist *), GFP_KERNEL); if (!h->scatter_list) return -ENOMEM; for (i = 0; i < h->nr_cmds; i++) { h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) * h->maxsgentries, GFP_KERNEL); if (h->scatter_list[i] == NULL) { dev_err(&h->pdev->dev, "could not allocate " "s/g lists\n"); return -ENOMEM; } } return 0; } static void cciss_free_scatterlists(ctlr_info_t *h) { int i; if (h->scatter_list) { for (i = 0; i < h->nr_cmds; i++) kfree(h->scatter_list[i]); kfree(h->scatter_list); } } static void cciss_free_cmd_pool(ctlr_info_t *h) { kfree(h->cmd_pool_bits); if (h->cmd_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), h->cmd_pool, h->cmd_pool_dhandle); if (h->errinfo_pool) pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), h->errinfo_pool, h->errinfo_pool_dhandle); } static int cciss_request_irq(ctlr_info_t *h, irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*intxhandler)(int, void *)) { if (h->msix_vector || h->msi_vector) { if (!request_irq(h->intr[PERF_MODE_INT], msixhandler, IRQF_DISABLED, h->devname, h)) return 0; dev_err(&h->pdev->dev, "Unable to get msi irq %d" " for %s\n", h->intr[PERF_MODE_INT], h->devname); return -1; } if (!request_irq(h->intr[PERF_MODE_INT], intxhandler, IRQF_DISABLED, h->devname, h)) return 0; dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", h->intr[PERF_MODE_INT], h->devname); return -1; } static int __devinit cciss_kdump_soft_reset(ctlr_info_t *h) { if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) { dev_warn(&h->pdev->dev, "Resetting array controller failed.\n"); return -EIO; } dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n"); if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) { dev_warn(&h->pdev->dev, "Soft reset had no effect.\n"); return -1; } dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n"); if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) { dev_warn(&h->pdev->dev, "Board failed to become ready " "after soft reset.\n"); return -1; } return 0; } static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) { int ctlr = h->ctlr; free_irq(h->intr[PERF_MODE_INT], h); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); else if (h->msi_vector) pci_disable_msi(h->pdev); #endif /* CONFIG_PCI_MSI */ cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); cciss_free_scatterlists(h); cciss_free_cmd_pool(h); kfree(h->blockFetchTable); if (h->reply_pool) pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), h->reply_pool, h->reply_pool_dhandle); if (h->transtable) iounmap(h->transtable); if (h->cfgtable) iounmap(h->cfgtable); if (h->vaddr) iounmap(h->vaddr); unregister_blkdev(h->major, h->devname); cciss_destroy_hba_sysfs_entry(h); pci_release_regions(h->pdev); kfree(h); hba[ctlr] = NULL; } /* * This is it. Find all the controllers and register them. I really hate * stealing all these major device numbers. * returns the number of block devices registered. */ static int __devinit cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int i; int j = 0; int rc; int try_soft_reset = 0; int dac, return_code; InquiryData_struct *inq_buff; ctlr_info_t *h; unsigned long flags; rc = cciss_init_reset_devices(pdev); if (rc) { if (rc != -ENOTSUPP) return rc; /* If the reset fails in a particular way (it has no way to do * a proper hard reset, so returns -ENOTSUPP) we can try to do * a soft reset once we get the controller configured up to the * point that it can accept a command. */ try_soft_reset = 1; rc = 0; } reinit_after_soft_reset: i = alloc_cciss_hba(pdev); if (i < 0) return -1; h = hba[i]; h->pdev = pdev; h->busy_initializing = 1; INIT_LIST_HEAD(&h->cmpQ); INIT_LIST_HEAD(&h->reqQ); mutex_init(&h->busy_shutting_down); if (cciss_pci_init(h) != 0) goto clean_no_release_regions; sprintf(h->devname, "cciss%d", i); h->ctlr = i; if (cciss_tape_cmds < 2) cciss_tape_cmds = 2; if (cciss_tape_cmds > 16) cciss_tape_cmds = 16; init_completion(&h->scan_wait); if (cciss_create_hba_sysfs_entry(h)) goto clean0; /* configure PCI DMA stuff */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) dac = 1; else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) dac = 0; else { dev_err(&h->pdev->dev, "no suitable DMA available\n"); goto clean1; } /* * register with the major number, or get a dynamic major number * by passing 0 as argument. This is done for greater than * 8 controller support. */ if (i < MAX_CTLR_ORIG) h->major = COMPAQ_CISS_MAJOR + i; rc = register_blkdev(h->major, h->devname); if (rc == -EBUSY || rc == -EINVAL) { dev_err(&h->pdev->dev, "Unable to get major number %d for %s " "on hba %d\n", h->major, h->devname, i); goto clean1; } else { if (i >= MAX_CTLR_ORIG) h->major = rc; } /* make sure the board interrupts are off */ h->access.set_intr_mask(h, CCISS_INTR_OFF); rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx); if (rc) goto clean2; dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", h->devname, pdev->device, pci_name(pdev), h->intr[PERF_MODE_INT], dac ? "" : " not"); if (cciss_allocate_cmd_pool(h)) goto clean4; if (cciss_allocate_scatterlists(h)) goto clean4; h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, h->chainsize, h->nr_cmds); if (!h->cmd_sg_list && h->chainsize > 0) goto clean4; spin_lock_init(&h->lock); /* Initialize the pdev driver private data. have it point to h. */ pci_set_drvdata(pdev, h); /* command and error info recs zeroed out before they are used */ memset(h->cmd_pool_bits, 0, DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) * sizeof(unsigned long)); h->num_luns = 0; h->highest_lun = -1; for (j = 0; j < CISS_MAX_LUN; j++) { h->drv[j] = NULL; h->gendisk[j] = NULL; } /* At this point, the controller is ready to take commands. * Now, if reset_devices and the hard reset didn't work, try * the soft reset and see if that works. */ if (try_soft_reset) { /* This is kind of gross. We may or may not get a completion * from the soft reset command, and if we do, then the value * from the fifo may or may not be valid. So, we wait 10 secs * after the reset throwing away any completions we get during * that time. Unregister the interrupt handler and register * fake ones to scoop up any residual completions. */ spin_lock_irqsave(&h->lock, flags); h->access.set_intr_mask(h, CCISS_INTR_OFF); spin_unlock_irqrestore(&h->lock, flags); free_irq(h->intr[PERF_MODE_INT], h); rc = cciss_request_irq(h, cciss_msix_discard_completions, cciss_intx_discard_completions); if (rc) { dev_warn(&h->pdev->dev, "Failed to request_irq after " "soft reset.\n"); goto clean4; } rc = cciss_kdump_soft_reset(h); if (rc) { dev_warn(&h->pdev->dev, "Soft reset failed.\n"); goto clean4; } dev_info(&h->pdev->dev, "Board READY.\n"); dev_info(&h->pdev->dev, "Waiting for stale completions to drain.\n"); h->access.set_intr_mask(h, CCISS_INTR_ON); msleep(10000); h->access.set_intr_mask(h, CCISS_INTR_OFF); rc = controller_reset_failed(h->cfgtable); if (rc) dev_info(&h->pdev->dev, "Soft reset appears to have failed.\n"); /* since the controller's reset, we have to go back and re-init * everything. Easiest to just forget what we've done and do it * all over again. */ cciss_undo_allocations_after_kdump_soft_reset(h); try_soft_reset = 0; if (rc) /* don't go to clean4, we already unallocated */ return -ENODEV; goto reinit_after_soft_reset; } cciss_scsi_setup(h); /* Turn the interrupts on so we can service requests */ h->access.set_intr_mask(h, CCISS_INTR_ON); /* Get the firmware version */ inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); if (inq_buff == NULL) { dev_err(&h->pdev->dev, "out of memory\n"); goto clean4; } return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); if (return_code == IO_OK) { h->firm_ver[0] = inq_buff->data_byte[32]; h->firm_ver[1] = inq_buff->data_byte[33]; h->firm_ver[2] = inq_buff->data_byte[34]; h->firm_ver[3] = inq_buff->data_byte[35]; } else { /* send command failed */ dev_warn(&h->pdev->dev, "unable to determine firmware" " version of controller\n"); } kfree(inq_buff); cciss_procinit(h); h->cciss_max_sectors = 8192; rebuild_lun_table(h, 1, 0); h->busy_initializing = 0; return 1; clean4: cciss_free_cmd_pool(h); cciss_free_scatterlists(h); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); free_irq(h->intr[PERF_MODE_INT], h); clean2: unregister_blkdev(h->major, h->devname); clean1: cciss_destroy_hba_sysfs_entry(h); clean0: pci_release_regions(pdev); clean_no_release_regions: h->busy_initializing = 0; /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_set_drvdata(pdev, NULL); free_hba(h); return -1; } static void cciss_shutdown(struct pci_dev *pdev) { ctlr_info_t *h; char *flush_buf; int return_code; h = pci_get_drvdata(pdev); flush_buf = kzalloc(4, GFP_KERNEL); if (!flush_buf) { dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); return; } /* write all data in the battery backed cache to disk */ memset(flush_buf, 0, 4); return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 4, 0, CTLR_LUNID, TYPE_CMD); kfree(flush_buf); if (return_code != IO_OK) dev_warn(&h->pdev->dev, "Error flushing cache\n"); h->access.set_intr_mask(h, CCISS_INTR_OFF); free_irq(h->intr[PERF_MODE_INT], h); } static void __devexit cciss_remove_one(struct pci_dev *pdev) { ctlr_info_t *h; int i, j; if (pci_get_drvdata(pdev) == NULL) { dev_err(&pdev->dev, "Unable to remove device\n"); return; } h = pci_get_drvdata(pdev); i = h->ctlr; if (hba[i] == NULL) { dev_err(&pdev->dev, "device appears to already be removed\n"); return; } mutex_lock(&h->busy_shutting_down); remove_from_scan_list(h); remove_proc_entry(h->devname, proc_cciss); unregister_blkdev(h->major, h->devname); /* remove it from the disk list */ for (j = 0; j < CISS_MAX_LUN; j++) { struct gendisk *disk = h->gendisk[j]; if (disk) { struct request_queue *q = disk->queue; if (disk->flags & GENHD_FL_UP) { cciss_destroy_ld_sysfs_entry(h, j, 1); del_gendisk(disk); } if (q) blk_cleanup_queue(q); } } #ifdef CONFIG_CISS_SCSI_TAPE cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ #endif cciss_shutdown(pdev); #ifdef CONFIG_PCI_MSI if (h->msix_vector) pci_disable_msix(h->pdev); else if (h->msi_vector) pci_disable_msi(h->pdev); #endif /* CONFIG_PCI_MSI */ iounmap(h->transtable); iounmap(h->cfgtable); iounmap(h->vaddr); cciss_free_cmd_pool(h); /* Free up sg elements */ for (j = 0; j < h->nr_cmds; j++) kfree(h->scatter_list[j]); kfree(h->scatter_list); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); kfree(h->blockFetchTable); if (h->reply_pool) pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64), h->reply_pool, h->reply_pool_dhandle); /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo */ pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); cciss_destroy_hba_sysfs_entry(h); mutex_unlock(&h->busy_shutting_down); free_hba(h); } static struct pci_driver cciss_pci_driver = { .name = "cciss", .probe = cciss_init_one, .remove = __devexit_p(cciss_remove_one), .id_table = cciss_pci_device_id, /* id_table */ .shutdown = cciss_shutdown, }; /* * This is it. Register the PCI driver information for the cards we control * the OS will call our registered routines when it finds one of our cards. */ static int __init cciss_init(void) { int err; /* * The hardware requires that commands are aligned on a 64-bit * boundary. Given that we use pci_alloc_consistent() to allocate an * array of them, the size must be a multiple of 8 bytes. */ BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); printk(KERN_INFO DRIVER_NAME "\n"); err = bus_register(&cciss_bus_type); if (err) return err; /* Start the scan thread */ cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); if (IS_ERR(cciss_scan_thread)) { err = PTR_ERR(cciss_scan_thread); goto err_bus_unregister; } /* Register for our PCI devices */ err = pci_register_driver(&cciss_pci_driver); if (err) goto err_thread_stop; return err; err_thread_stop: kthread_stop(cciss_scan_thread); err_bus_unregister: bus_unregister(&cciss_bus_type); return err; } static void __exit cciss_cleanup(void) { int i; pci_unregister_driver(&cciss_pci_driver); /* double check that all controller entrys have been removed */ for (i = 0; i < MAX_CTLR; i++) { if (hba[i] != NULL) { dev_warn(&hba[i]->pdev->dev, "had to remove controller\n"); cciss_remove_one(hba[i]->pdev); } } kthread_stop(cciss_scan_thread); if (proc_cciss) remove_proc_entry("driver/cciss", NULL); bus_unregister(&cciss_bus_type); } module_init(cciss_init); module_exit(cciss_cleanup);
gpl-2.0
aksalj/kernel_rpi
tools/testing/selftests/size/get_size.c
1320
2453
/* * Copyright 2014 Sony Mobile Communications Inc. * * Licensed under the terms of the GNU GPL License version 2 * * Selftest for runtime system size * * Prints the amount of RAM that the currently running system is using. * * This program tries to be as small as possible itself, to * avoid perturbing the system memory utilization with its * own execution. It also attempts to have as few dependencies * on kernel features as possible. * * It should be statically linked, with startup libs avoided. * It uses no library calls, and only the following 3 syscalls: * sysinfo(), write(), and _exit() * * For output, it avoids printf (which in some C libraries * has large external dependencies) by implementing it's own * number output and print routines, and using __builtin_strlen() */ #include <sys/sysinfo.h> #include <unistd.h> #define STDOUT_FILENO 1 static int print(const char *s) { return write(STDOUT_FILENO, s, __builtin_strlen(s)); } static inline char *num_to_str(unsigned long num, char *buf, int len) { unsigned int digit; /* put digits in buffer from back to front */ buf += len - 1; *buf = 0; do { digit = num % 10; *(--buf) = digit + '0'; num /= 10; } while (num > 0); return buf; } static int print_num(unsigned long num) { char num_buf[30]; return print(num_to_str(num, num_buf, sizeof(num_buf))); } static int print_k_value(const char *s, unsigned long num, unsigned long units) { unsigned long long temp; int ccode; print(s); temp = num; temp = (temp * units)/1024; num = temp; ccode = print_num(num); print("\n"); return ccode; } /* this program has no main(), as startup libraries are not used */ void _start(void) { int ccode; struct sysinfo info; unsigned long used; print("Testing system size.\n"); print("1..1\n"); ccode = sysinfo(&info); if (ccode < 0) { print("not ok 1 get runtime memory use\n"); print("# could not get sysinfo\n"); _exit(ccode); } /* ignore cache complexities for now */ used = info.totalram - info.freeram - info.bufferram; print_k_value("ok 1 get runtime memory use # size = ", used, info.mem_unit); print("# System runtime memory report (units in Kilobytes):\n"); print_k_value("# Total: ", info.totalram, info.mem_unit); print_k_value("# Free: ", info.freeram, info.mem_unit); print_k_value("# Buffer: ", info.bufferram, info.mem_unit); print_k_value("# In use: ", used, info.mem_unit); _exit(0); }
gpl-2.0
KernelWorld/KW-Kenzo
drivers/ata/sata_mv.c
1576
124566
/* * sata_mv.c - Marvell SATA support * * Copyright 2008-2009: Marvell Corporation, all rights reserved. * Copyright 2005: EMC Corporation, all rights reserved. * Copyright 2005 Red Hat, Inc. All rights reserved. * * Originally written by Brett Russ. * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>. * * Please ALWAYS copy linux-ide@vger.kernel.org on emails. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * sata_mv TODO list: * * --> Develop a low-power-consumption strategy, and implement it. * * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds. * * --> [Experiment, Marvell value added] Is it possible to use target * mode to cross-connect two Linux boxes with Marvell cards? If so, * creating LibATA target mode support would be very interesting. * * Target mode, for those without docs, is the ability to directly * connect two SATA ports. */ /* * 80x1-B2 errata PCI#11: * * Users of the 6041/6081 Rev.B2 chips (current is C0) * should be careful to insert those cards only onto PCI-X bus #0, * and only in device slots 0..7, not higher. The chips may not * work correctly otherwise (note: this is a pretty rare condition). */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/ata_platform.h> #include <linux/mbus.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <linux/of.h> #include <linux/of_irq.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/libata.h> #define DRV_NAME "sata_mv" #define DRV_VERSION "1.28" /* * module options */ #ifdef CONFIG_PCI static int msi; module_param(msi, int, S_IRUGO); MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)"); #endif static int irq_coalescing_io_count; module_param(irq_coalescing_io_count, int, S_IRUGO); MODULE_PARM_DESC(irq_coalescing_io_count, "IRQ coalescing I/O count threshold (0..255)"); static int irq_coalescing_usecs; module_param(irq_coalescing_usecs, int, S_IRUGO); MODULE_PARM_DESC(irq_coalescing_usecs, "IRQ coalescing time threshold in usecs"); enum { /* BAR's are enumerated in terms of pci_resource_start() terms */ MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */ MV_IO_BAR = 2, /* offset 0x18: IO space */ MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */ MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */ MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */ /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */ COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */ MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */ MAX_COAL_IO_COUNT = 255, /* completed I/O count */ MV_PCI_REG_BASE = 0, /* * Per-chip ("all ports") interrupt coalescing feature. * This is only for GEN_II / GEN_IIE hardware. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. */ COAL_REG_BASE = 0x18000, IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08), ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */ IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc), IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0), /* * Registers for the (unused here) transaction coalescing feature: */ TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88), TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c), SATAHC0_REG_BASE = 0x20000, FLASH_CTL = 0x1046c, GPIO_PORT_CTL = 0x104f0, RESET_CFG = 0x180d8, MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ, MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */ MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ, MV_MAX_Q_DEPTH = 32, MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1, /* CRQB needs alignment on a 1KB boundary. Size == 1KB * CRPB needs alignment on a 256B boundary. Size == 256B * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B */ MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH), MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH), MV_MAX_SG_CT = 256, MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */ MV_PORT_HC_SHIFT = 2, MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */ /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */ MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */ /* Host Flags */ MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING, MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI, MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA, MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, CRQB_FLAG_READ = (1 << 0), CRQB_TAG_SHIFT = 1, CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ CRQB_CMD_ADDR_SHIFT = 8, CRQB_CMD_CS = (0x2 << 11), CRQB_CMD_LAST = (1 << 15), CRPB_FLAG_STATUS_SHIFT = 8, CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */ CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */ EPRD_FLAG_END_OF_TBL = (1 << 31), /* PCI interface registers */ MV_PCI_COMMAND = 0xc00, MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */ MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */ PCI_MAIN_CMD_STS = 0xd30, STOP_PCI_MASTER = (1 << 2), PCI_MASTER_EMPTY = (1 << 3), GLOB_SFT_RST = (1 << 4), MV_PCI_MODE = 0xd00, MV_PCI_MODE_MASK = 0x30, MV_PCI_EXP_ROM_BAR_CTL = 0xd2c, MV_PCI_DISC_TIMER = 0xd04, MV_PCI_MSI_TRIGGER = 0xc38, MV_PCI_SERR_MASK = 0xc28, MV_PCI_XBAR_TMOUT = 0x1d04, MV_PCI_ERR_LOW_ADDRESS = 0x1d40, MV_PCI_ERR_HIGH_ADDRESS = 0x1d44, MV_PCI_ERR_ATTRIBUTE = 0x1d48, MV_PCI_ERR_COMMAND = 0x1d50, PCI_IRQ_CAUSE = 0x1d58, PCI_IRQ_MASK = 0x1d5c, PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */ PCIE_IRQ_CAUSE = 0x1900, PCIE_IRQ_MASK = 0x1910, PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */ /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */ PCI_HC_MAIN_IRQ_CAUSE = 0x1d60, PCI_HC_MAIN_IRQ_MASK = 0x1d64, SOC_HC_MAIN_IRQ_CAUSE = 0x20020, SOC_HC_MAIN_IRQ_MASK = 0x20024, ERR_IRQ = (1 << 0), /* shift by (2 * port #) */ DONE_IRQ = (1 << 1), /* shift by (2 * port #) */ HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */ DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */ PCI_ERR = (1 << 18), TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */ TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */ PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */ PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */ ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */ GPIO_INT = (1 << 22), SELF_INT = (1 << 23), TWSI_INT = (1 << 24), HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ /* SATAHC registers */ HC_CFG = 0x00, HC_IRQ_CAUSE = 0x14, DMA_IRQ = (1 << 0), /* shift by port # */ HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */ DEV_IRQ = (1 << 8), /* shift by port # */ /* * Per-HC (Host-Controller) interrupt coalescing feature. * This is present on all chip generations. * * Coalescing defers the interrupt until either the IO_THRESHOLD * (count of completed I/Os) is met, or the TIME_THRESHOLD is met. */ HC_IRQ_COAL_IO_THRESHOLD = 0x000c, HC_IRQ_COAL_TIME_THRESHOLD = 0x0010, SOC_LED_CTRL = 0x2c, SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */ SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */ /* with dev activity LED */ /* Shadow block registers */ SHD_BLK = 0x100, SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */ /* SATA registers */ SATA_STATUS = 0x300, /* ctrl, err regs follow status */ SATA_ACTIVE = 0x350, FIS_IRQ_CAUSE = 0x364, FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */ LTMODE = 0x30c, /* requires read-after-write */ LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ PHY_MODE2 = 0x330, PHY_MODE3 = 0x310, PHY_MODE4 = 0x314, /* requires read-after-write */ PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ SATA_IFCTL = 0x344, SATA_TESTCTL = 0x348, SATA_IFSTAT = 0x34c, VENDOR_UNIQUE_FIS = 0x35c, FISCFG = 0x360, FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */ FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */ PHY_MODE9_GEN2 = 0x398, PHY_MODE9_GEN1 = 0x39c, PHYCFG_OFS = 0x3a0, /* only in 65n devices */ MV5_PHY_MODE = 0x74, MV5_LTMODE = 0x30, MV5_PHY_CTL = 0x0C, SATA_IFCFG = 0x050, LP_PHY_CTL = 0x058, MV_M2_PREAMP_MASK = 0x7e0, /* Port registers */ EDMA_CFG = 0, EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */ EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */ EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ EDMA_ERR_IRQ_CAUSE = 0x8, EDMA_ERR_IRQ_MASK = 0xc, EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */ EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */ EDMA_ERR_DEV = (1 << 2), /* device error */ EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */ EDMA_ERR_DEV_CON = (1 << 4), /* device connected */ EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */ EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */ EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */ EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */ EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */ EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */ EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */ EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */ EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */ EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */ EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */ EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */ EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */ EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */ EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */ EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */ EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */ EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */ EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */ EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */ EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */ EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */ EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */ EDMA_ERR_OVERRUN_5 = (1 << 5), EDMA_ERR_UNDERRUN_5 = (1 << 6), EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 | EDMA_ERR_LNK_CTRL_RX_1 | EDMA_ERR_LNK_CTRL_RX_3 | EDMA_ERR_LNK_CTRL_TX, EDMA_EH_FREEZE = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON | EDMA_ERR_SERR | EDMA_ERR_SELF_DIS | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 | EDMA_ERR_LNK_DATA_RX | EDMA_ERR_LNK_DATA_TX | EDMA_ERR_TRANS_PROTO, EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON | EDMA_ERR_OVERRUN_5 | EDMA_ERR_UNDERRUN_5 | EDMA_ERR_SELF_DIS_5 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR | EDMA_ERR_IORDY, EDMA_REQ_Q_BASE_HI = 0x10, EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */ EDMA_REQ_Q_OUT_PTR = 0x18, EDMA_REQ_Q_PTR_SHIFT = 5, EDMA_RSP_Q_BASE_HI = 0x1c, EDMA_RSP_Q_IN_PTR = 0x20, EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */ EDMA_RSP_Q_PTR_SHIFT = 3, EDMA_CMD = 0x28, /* EDMA command register */ EDMA_EN = (1 << 0), /* enable EDMA */ EDMA_DS = (1 << 1), /* disable EDMA; self-negated */ EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */ EDMA_STATUS = 0x30, /* EDMA engine status */ EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */ EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */ EDMA_IORDY_TMOUT = 0x34, EDMA_ARB_CFG = 0x38, EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */ EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */ BMDMA_CMD = 0x224, /* bmdma command register */ BMDMA_STATUS = 0x228, /* bmdma status register */ BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */ BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */ /* Host private flags (hp_flags) */ MV_HP_FLAG_MSI = (1 << 0), MV_HP_ERRATA_50XXB0 = (1 << 1), MV_HP_ERRATA_50XXB2 = (1 << 2), MV_HP_ERRATA_60X1B2 = (1 << 3), MV_HP_ERRATA_60X1C0 = (1 << 4), MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */ MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */ /* Port private flags (pp_flags) */ MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */ MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */ MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */ }; #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) enum { /* DMA boundary 0xffff is required by the s/g splitting * we need on /length/ in mv_fill-sg(). */ MV_DMA_BOUNDARY = 0xffffU, /* mask of register bits containing lower 32 bits * of EDMA request queue DMA address */ EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U, /* ditto, for response queue */ EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U, }; enum chip_type { chip_504x, chip_508x, chip_5080, chip_604x, chip_608x, chip_6042, chip_7042, chip_soc, }; /* Command ReQuest Block: 32B */ struct mv_crqb { __le32 sg_addr; __le32 sg_addr_hi; __le16 ctrl_flags; __le16 ata_cmd[11]; }; struct mv_crqb_iie { __le32 addr; __le32 addr_hi; __le32 flags; __le32 len; __le32 ata_cmd[4]; }; /* Command ResPonse Block: 8B */ struct mv_crpb { __le16 id; __le16 flags; __le32 tmstmp; }; /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */ struct mv_sg { __le32 addr; __le32 flags_size; __le32 addr_hi; __le32 reserved; }; /* * We keep a local cache of a few frequently accessed port * registers here, to avoid having to read them (very slow) * when switching between EDMA and non-EDMA modes. */ struct mv_cached_regs { u32 fiscfg; u32 ltmode; u32 haltcond; u32 unknown_rsvd; }; struct mv_port_priv { struct mv_crqb *crqb; dma_addr_t crqb_dma; struct mv_crpb *crpb; dma_addr_t crpb_dma; struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH]; dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH]; unsigned int req_idx; unsigned int resp_idx; u32 pp_flags; struct mv_cached_regs cached; unsigned int delayed_eh_pmp_map; }; struct mv_port_signal { u32 amps; u32 pre; }; struct mv_host_priv { u32 hp_flags; unsigned int board_idx; u32 main_irq_mask; struct mv_port_signal signal[8]; const struct mv_hw_ops *ops; int n_ports; void __iomem *base; void __iomem *main_irq_cause_addr; void __iomem *main_irq_mask_addr; u32 irq_cause_offset; u32 irq_mask_offset; u32 unmask_all_irqs; #if defined(CONFIG_HAVE_CLK) struct clk *clk; struct clk **port_clks; #endif /* * These consistent DMA memory pools give us guaranteed * alignment for hardware-accessed data structures, * and less memory waste in accomplishing the alignment. */ struct dma_pool *crqb_pool; struct dma_pool *crpb_pool; struct dma_pool *sg_tbl_pool; }; struct mv_hw_ops { void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*read_preamp)(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio); void (*reset_bus)(struct ata_host *host, void __iomem *mmio); }; static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val); static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val); static int mv_port_start(struct ata_port *ap); static void mv_port_stop(struct ata_port *ap); static int mv_qc_defer(struct ata_queued_cmd *qc); static void mv_qc_prep(struct ata_queued_cmd *qc); static void mv_qc_prep_iie(struct ata_queued_cmd *qc); static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void mv_eh_freeze(struct ata_port *ap); static void mv_eh_thaw(struct ata_port *ap); static void mv6_dev_config(struct ata_device *dev); static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio); static int mv_soc_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc); static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio); static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port); static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no); static int mv_stop_edma(struct ata_port *ap); static int mv_stop_edma_engine(void __iomem *port_mmio); static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); static void mv_pmp_select(struct ata_port *ap, int pmp); static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline); static void mv_pmp_error_handler(struct ata_port *ap); static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp); static void mv_sff_irq_clear(struct ata_port *ap); static int mv_check_atapi_dma(struct ata_queued_cmd *qc); static void mv_bmdma_setup(struct ata_queued_cmd *qc); static void mv_bmdma_start(struct ata_queued_cmd *qc); static void mv_bmdma_stop(struct ata_queued_cmd *qc); static u8 mv_bmdma_status(struct ata_port *ap); static u8 mv_sff_check_status(struct ata_port *ap); /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below * because we have to allow room for worst case splitting of * PRDs for 64K boundaries in mv_fill_sg(). */ #ifdef CONFIG_PCI static struct scsi_host_template mv5_sht = { ATA_BASE_SHT(DRV_NAME), .sg_tablesize = MV_MAX_SG_CT / 2, .dma_boundary = MV_DMA_BOUNDARY, }; #endif static struct scsi_host_template mv6_sht = { ATA_NCQ_SHT(DRV_NAME), .can_queue = MV_MAX_Q_DEPTH - 1, .sg_tablesize = MV_MAX_SG_CT / 2, .dma_boundary = MV_DMA_BOUNDARY, }; static struct ata_port_operations mv5_ops = { .inherits = &ata_sff_port_ops, .lost_interrupt = ATA_OP_NULL, .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, .scr_read = mv5_scr_read, .scr_write = mv5_scr_write, .port_start = mv_port_start, .port_stop = mv_port_stop, }; static struct ata_port_operations mv6_ops = { .inherits = &ata_bmdma_port_ops, .lost_interrupt = ATA_OP_NULL, .qc_defer = mv_qc_defer, .qc_prep = mv_qc_prep, .qc_issue = mv_qc_issue, .dev_config = mv6_dev_config, .freeze = mv_eh_freeze, .thaw = mv_eh_thaw, .hardreset = mv_hardreset, .softreset = mv_softreset, .pmp_hardreset = mv_pmp_hardreset, .pmp_softreset = mv_softreset, .error_handler = mv_pmp_error_handler, .scr_read = mv_scr_read, .scr_write = mv_scr_write, .sff_check_status = mv_sff_check_status, .sff_irq_clear = mv_sff_irq_clear, .check_atapi_dma = mv_check_atapi_dma, .bmdma_setup = mv_bmdma_setup, .bmdma_start = mv_bmdma_start, .bmdma_stop = mv_bmdma_stop, .bmdma_status = mv_bmdma_status, .port_start = mv_port_start, .port_stop = mv_port_stop, }; static struct ata_port_operations mv_iie_ops = { .inherits = &mv6_ops, .dev_config = ATA_OP_NULL, .qc_prep = mv_qc_prep_iie, }; static const struct ata_port_info mv_port_info[] = { { /* chip_504x */ .flags = MV_GEN_I_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_508x */ .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_5080 */ .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv5_ops, }, { /* chip_604x */ .flags = MV_GEN_II_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_608x */ .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv6_ops, }, { /* chip_6042 */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_7042 */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, { /* chip_soc */ .flags = MV_GEN_IIE_FLAGS, .pio_mask = ATA_PIO4, .udma_mask = ATA_UDMA6, .port_ops = &mv_iie_ops, }, }; static const struct pci_device_id mv_pci_tbl[] = { { PCI_VDEVICE(MARVELL, 0x5040), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5041), chip_504x }, { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 }, { PCI_VDEVICE(MARVELL, 0x5081), chip_508x }, /* RocketRAID 1720/174x have different identifiers */ { PCI_VDEVICE(TTI, 0x1720), chip_6042 }, { PCI_VDEVICE(TTI, 0x1740), chip_6042 }, { PCI_VDEVICE(TTI, 0x1742), chip_6042 }, { PCI_VDEVICE(MARVELL, 0x6040), chip_604x }, { PCI_VDEVICE(MARVELL, 0x6041), chip_604x }, { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 }, { PCI_VDEVICE(MARVELL, 0x6080), chip_608x }, { PCI_VDEVICE(MARVELL, 0x6081), chip_608x }, { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x }, /* Adaptec 1430SA */ { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 }, /* Marvell 7042 support */ { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 }, /* Highpoint RocketRAID PCIe series */ { PCI_VDEVICE(TTI, 0x2300), chip_7042 }, { PCI_VDEVICE(TTI, 0x2310), chip_7042 }, { } /* terminate list */ }; static const struct mv_hw_ops mv5xxx_ops = { .phy_errata = mv5_phy_errata, .enable_leds = mv5_enable_leds, .read_preamp = mv5_read_preamp, .reset_hc = mv5_reset_hc, .reset_flash = mv5_reset_flash, .reset_bus = mv5_reset_bus, }; static const struct mv_hw_ops mv6xxx_ops = { .phy_errata = mv6_phy_errata, .enable_leds = mv6_enable_leds, .read_preamp = mv6_read_preamp, .reset_hc = mv6_reset_hc, .reset_flash = mv6_reset_flash, .reset_bus = mv_reset_pci_bus, }; static const struct mv_hw_ops mv_soc_ops = { .phy_errata = mv6_phy_errata, .enable_leds = mv_soc_enable_leds, .read_preamp = mv_soc_read_preamp, .reset_hc = mv_soc_reset_hc, .reset_flash = mv_soc_reset_flash, .reset_bus = mv_soc_reset_bus, }; static const struct mv_hw_ops mv_soc_65n_ops = { .phy_errata = mv_soc_65n_phy_errata, .enable_leds = mv_soc_enable_leds, .reset_hc = mv_soc_reset_hc, .reset_flash = mv_soc_reset_flash, .reset_bus = mv_soc_reset_bus, }; /* * Functions */ static inline void writelfl(unsigned long data, void __iomem *addr) { writel(data, addr); (void) readl(addr); /* flush to avoid PCI posted write */ } static inline unsigned int mv_hc_from_port(unsigned int port) { return port >> MV_PORT_HC_SHIFT; } static inline unsigned int mv_hardport_from_port(unsigned int port) { return port & MV_PORT_MASK; } /* * Consolidate some rather tricky bit shift calculations. * This is hot-path stuff, so not a function. * Simple code, with two return values, so macro rather than inline. * * port is the sole input, in range 0..7. * shift is one output, for use with main_irq_cause / main_irq_mask registers. * hardport is the other output, in range 0..3. * * Note that port and hardport may be the same variable in some cases. */ #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \ { \ shift = mv_hc_from_port(port) * HC_SHIFT; \ hardport = mv_hardport_from_port(port); \ shift += hardport * 2; \ } static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc) { return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ)); } static inline void __iomem *mv_hc_base_from_port(void __iomem *base, unsigned int port) { return mv_hc_base(base, mv_hc_from_port(port)); } static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) { return mv_hc_base_from_port(base, port) + MV_SATAHC_ARBTR_REG_SZ + (mv_hardport_from_port(port) * MV_PORT_REG_SZ); } static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) { void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; return hc_mmio + ofs; } static inline void __iomem *mv_host_base(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; return hpriv->base; } static inline void __iomem *mv_ap_base(struct ata_port *ap) { return mv_port_base(mv_host_base(ap->host), ap->port_no); } static inline int mv_get_hc_count(unsigned long port_flags) { return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); } /** * mv_save_cached_regs - (re-)initialize cached port registers * @ap: the port whose registers we are caching * * Initialize the local cache of port registers, * so that reading them over and over again can * be avoided on the hotter paths of this driver. * This saves a few microseconds each time we switch * to/from EDMA mode to perform (eg.) a drive cache flush. */ static void mv_save_cached_regs(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; pp->cached.fiscfg = readl(port_mmio + FISCFG); pp->cached.ltmode = readl(port_mmio + LTMODE); pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); } /** * mv_write_cached_reg - write to a cached port register * @addr: hardware address of the register * @old: pointer to cached value of the register * @new: new value for the register * * Write a new value to a cached register, * but only if the value is different from before. */ static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new) { if (new != *old) { unsigned long laddr; *old = new; /* * Workaround for 88SX60x1-B2 FEr SATA#13: * Read-after-write is needed to prevent generating 64-bit * write cycles on the PCI bus for SATA interface registers * at offsets ending in 0x4 or 0xc. * * Looks like a lot of fuss, but it avoids an unnecessary * +1 usec read-after-write delay for unaffected registers. */ laddr = (long)addr & 0xffff; if (laddr >= 0x300 && laddr <= 0x33c) { laddr &= 0x000f; if (laddr == 0x4 || laddr == 0xc) { writelfl(new, addr); /* read after write */ return; } } writel(new, addr); /* unaffected by the errata */ } } static void mv_set_edma_ptrs(void __iomem *port_mmio, struct mv_host_priv *hpriv, struct mv_port_priv *pp) { u32 index; /* * initialize request queue */ pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; WARN_ON(pp->crqb_dma & 0x3ff); writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, port_mmio + EDMA_REQ_Q_IN_PTR); writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR); /* * initialize response queue */ pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; WARN_ON(pp->crpb_dma & 0xff); writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR); writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, port_mmio + EDMA_RSP_Q_OUT_PTR); } static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv) { /* * When writing to the main_irq_mask in hardware, * we must ensure exclusivity between the interrupt coalescing bits * and the corresponding individual port DONE_IRQ bits. * * Note that this register is really an "IRQ enable" register, * not an "IRQ mask" register as Marvell's naming might suggest. */ if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE)) mask &= ~DONE_IRQ_0_3; if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE)) mask &= ~DONE_IRQ_4_7; writelfl(mask, hpriv->main_irq_mask_addr); } static void mv_set_main_irq_mask(struct ata_host *host, u32 disable_bits, u32 enable_bits) { struct mv_host_priv *hpriv = host->private_data; u32 old_mask, new_mask; old_mask = hpriv->main_irq_mask; new_mask = (old_mask & ~disable_bits) | enable_bits; if (new_mask != old_mask) { hpriv->main_irq_mask = new_mask; mv_write_main_irq_mask(new_mask, hpriv); } } static void mv_enable_port_irqs(struct ata_port *ap, unsigned int port_bits) { unsigned int shift, hardport, port = ap->port_no; u32 disable_bits, enable_bits; MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); disable_bits = (DONE_IRQ | ERR_IRQ) << shift; enable_bits = port_bits << shift; mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); } static void mv_clear_and_enable_port_irqs(struct ata_port *ap, void __iomem *port_mmio, unsigned int port_irqs) { struct mv_host_priv *hpriv = ap->host->private_data; int hardport = mv_hardport_from_port(ap->port_no); void __iomem *hc_mmio = mv_hc_base_from_port( mv_host_base(ap->host), ap->port_no); u32 hc_irq_cause; /* clear EDMA event indicators, if any */ writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* clear pending irq events */ hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); /* clear FIS IRQ Cause */ if (IS_GEN_IIE(hpriv)) writelfl(0, port_mmio + FIS_IRQ_CAUSE); mv_enable_port_irqs(ap, port_irqs); } static void mv_set_irq_coalescing(struct ata_host *host, unsigned int count, unsigned int usecs) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base, *hc_mmio; u32 coal_enable = 0; unsigned long flags; unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC; const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | ALL_PORTS_COAL_DONE; /* Disable IRQ coalescing if either threshold is zero */ if (!usecs || !count) { clks = count = 0; } else { /* Respect maximum limits of the hardware */ clks = usecs * COAL_CLOCKS_PER_USEC; if (clks > MAX_COAL_TIME_THRESHOLD) clks = MAX_COAL_TIME_THRESHOLD; if (count > MAX_COAL_IO_COUNT) count = MAX_COAL_IO_COUNT; } spin_lock_irqsave(&host->lock, flags); mv_set_main_irq_mask(host, coal_disable, 0); if (is_dual_hc && !IS_GEN_I(hpriv)) { /* * GEN_II/GEN_IIE with dual host controllers: * one set of global thresholds for the entire chip. */ writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD); writel(count, mmio + IRQ_COAL_IO_THRESHOLD); /* clear leftover coal IRQ bit */ writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); if (count) coal_enable = ALL_PORTS_COAL_DONE; clks = count = 0; /* force clearing of regular regs below */ } /* * All chips: independent thresholds for each HC on the chip. */ hc_mmio = mv_hc_base_from_port(mmio, 0); writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); if (count) coal_enable |= PORTS_0_3_COAL_DONE; if (is_dual_hc) { hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC); writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD); writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD); writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE); if (count) coal_enable |= PORTS_4_7_COAL_DONE; } mv_set_main_irq_mask(host, 0, coal_enable); spin_unlock_irqrestore(&host->lock, flags); } /** * mv_start_edma - Enable eDMA engine * @base: port base address * @pp: port private data * * Verify the local cache of the eDMA state is accurate with a * WARN_ON. * * LOCKING: * Inherited from caller. */ static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, struct mv_port_priv *pp, u8 protocol) { int want_ncq = (protocol == ATA_PROT_NCQ); if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); if (want_ncq != using_ncq) mv_stop_edma(ap); } if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { struct mv_host_priv *hpriv = ap->host->private_data; mv_edma_cfg(ap, want_ncq, 1); mv_set_edma_ptrs(port_mmio, hpriv, pp); mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); writelfl(EDMA_EN, port_mmio + EDMA_CMD); pp->pp_flags |= MV_PP_FLAG_EDMA_EN; } } static void mv_wait_for_edma_empty_idle(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE); const int per_loop = 5, timeout = (15 * 1000 / per_loop); int i; /* * Wait for the EDMA engine to finish transactions in progress. * No idea what a good "timeout" value might be, but measurements * indicate that it often requires hundreds of microseconds * with two drives in-use. So we use the 15msec value above * as a rough guess at what even more drives might require. */ for (i = 0; i < timeout; ++i) { u32 edma_stat = readl(port_mmio + EDMA_STATUS); if ((edma_stat & empty_idle) == empty_idle) break; udelay(per_loop); } /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */ } /** * mv_stop_edma_engine - Disable eDMA engine * @port_mmio: io base address * * LOCKING: * Inherited from caller. */ static int mv_stop_edma_engine(void __iomem *port_mmio) { int i; /* Disable eDMA. The disable bit auto clears. */ writelfl(EDMA_DS, port_mmio + EDMA_CMD); /* Wait for the chip to confirm eDMA is off. */ for (i = 10000; i > 0; i--) { u32 reg = readl(port_mmio + EDMA_CMD); if (!(reg & EDMA_EN)) return 0; udelay(10); } return -EIO; } static int mv_stop_edma(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; int err = 0; if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; mv_wait_for_edma_empty_idle(ap); if (mv_stop_edma_engine(port_mmio)) { ata_port_err(ap, "Unable to stop eDMA\n"); err = -EIO; } mv_edma_cfg(ap, 0, 0); return err; } #ifdef ATA_DEBUG static void mv_dump_mem(void __iomem *start, unsigned bytes) { int b, w; for (b = 0; b < bytes; ) { DPRINTK("%p: ", start + b); for (w = 0; b < bytes && w < 4; w++) { printk("%08x ", readl(start + b)); b += sizeof(u32); } printk("\n"); } } #endif #if defined(ATA_DEBUG) || defined(CONFIG_PCI) static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes) { #ifdef ATA_DEBUG int b, w; u32 dw; for (b = 0; b < bytes; ) { DPRINTK("%02x: ", b); for (w = 0; b < bytes && w < 4; w++) { (void) pci_read_config_dword(pdev, b, &dw); printk("%08x ", dw); b += sizeof(u32); } printk("\n"); } #endif } #endif static void mv_dump_all_regs(void __iomem *mmio_base, int port, struct pci_dev *pdev) { #ifdef ATA_DEBUG void __iomem *hc_base = mv_hc_base(mmio_base, port >> MV_PORT_HC_SHIFT); void __iomem *port_base; int start_port, num_ports, p, start_hc, num_hcs, hc; if (0 > port) { start_hc = start_port = 0; num_ports = 8; /* shld be benign for 4 port devs */ num_hcs = 2; } else { start_hc = port >> MV_PORT_HC_SHIFT; start_port = port; num_ports = num_hcs = 1; } DPRINTK("All registers for port(s) %u-%u:\n", start_port, num_ports > 1 ? num_ports - 1 : start_port); if (NULL != pdev) { DPRINTK("PCI config space regs:\n"); mv_dump_pci_cfg(pdev, 0x68); } DPRINTK("PCI regs:\n"); mv_dump_mem(mmio_base+0xc00, 0x3c); mv_dump_mem(mmio_base+0xd00, 0x34); mv_dump_mem(mmio_base+0xf00, 0x4); mv_dump_mem(mmio_base+0x1d00, 0x6c); for (hc = start_hc; hc < start_hc + num_hcs; hc++) { hc_base = mv_hc_base(mmio_base, hc); DPRINTK("HC regs (HC %i):\n", hc); mv_dump_mem(hc_base, 0x1c); } for (p = start_port; p < start_port + num_ports; p++) { port_base = mv_port_base(mmio_base, p); DPRINTK("EDMA regs (port %i):\n", p); mv_dump_mem(port_base, 0x54); DPRINTK("SATA regs (port %i):\n", p); mv_dump_mem(port_base+0x300, 0x60); } #endif } static unsigned int mv_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_CONTROL: case SCR_ERROR: ofs = SATA_STATUS + (sc_reg_in * sizeof(u32)); break; case SCR_ACTIVE: ofs = SATA_ACTIVE; /* active is not with the others */ break; default: ofs = 0xffffffffU; break; } return ofs; } static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { unsigned int ofs = mv_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(mv_ap_base(link->ap) + ofs); return 0; } else return -EINVAL; } static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { unsigned int ofs = mv_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { void __iomem *addr = mv_ap_base(link->ap) + ofs; struct mv_host_priv *hpriv = link->ap->host->private_data; if (sc_reg_in == SCR_CONTROL) { /* * Workaround for 88SX60x1 FEr SATA#26: * * COMRESETs have to take care not to accidentally * put the drive to sleep when writing SCR_CONTROL. * Setting bits 12..15 prevents this problem. * * So if we see an outbound COMMRESET, set those bits. * Ditto for the followup write that clears the reset. * * The proprietary driver does this for * all chip versions, and so do we. */ if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1) val |= 0xf000; if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) { void __iomem *lp_phy_addr = mv_ap_base(link->ap) + LP_PHY_CTL; /* * Set PHY speed according to SControl speed. */ if ((val & 0xf0) == 0x10) writelfl(0x7, lp_phy_addr); else writelfl(0x227, lp_phy_addr); } } writelfl(val, addr); return 0; } else return -EINVAL; } static void mv6_dev_config(struct ata_device *adev) { /* * Deal with Gen-II ("mv6") hardware quirks/restrictions: * * Gen-II does not support NCQ over a port multiplier * (no FIS-based switching). */ if (adev->flags & ATA_DFLAG_NCQ) { if (sata_pmp_attached(adev->link->ap)) { adev->flags &= ~ATA_DFLAG_NCQ; ata_dev_info(adev, "NCQ disabled for command-based switching\n"); } } } static int mv_qc_defer(struct ata_queued_cmd *qc) { struct ata_link *link = qc->dev->link; struct ata_port *ap = link->ap; struct mv_port_priv *pp = ap->private_data; /* * Don't allow new commands if we're in a delayed EH state * for NCQ and/or FIS-based switching. */ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) return ATA_DEFER_PORT; /* PIO commands need exclusive link: no other commands [DMA or PIO] * can run concurrently. * set excl_link when we want to send a PIO command in DMA mode * or a non-NCQ command in NCQ mode. * When we receive a command from that link, and there are no * outstanding commands, mark a flag to clear excl_link and let * the command go through. */ if (unlikely(ap->excl_link)) { if (link == ap->excl_link) { if (ap->nr_active_links) return ATA_DEFER_PORT; qc->flags |= ATA_QCFLAG_CLEAR_EXCL; return 0; } else return ATA_DEFER_PORT; } /* * If the port is completely idle, then allow the new qc. */ if (ap->nr_active_links == 0) return 0; /* * The port is operating in host queuing mode (EDMA) with NCQ * enabled, allow multiple NCQ commands. EDMA also allows * queueing multiple DMA commands but libata core currently * doesn't allow it. */ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { if (ata_is_ncq(qc->tf.protocol)) return 0; else { ap->excl_link = link; return ATA_DEFER_PORT; } } return ATA_DEFER_PORT; } static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs) { struct mv_port_priv *pp = ap->private_data; void __iomem *port_mmio; u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; u32 ltmode, *old_ltmode = &pp->cached.ltmode; u32 haltcond, *old_haltcond = &pp->cached.haltcond; ltmode = *old_ltmode & ~LTMODE_BIT8; haltcond = *old_haltcond | EDMA_ERR_DEV; if (want_fbs) { fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC; ltmode = *old_ltmode | LTMODE_BIT8; if (want_ncq) haltcond &= ~EDMA_ERR_DEV; else fiscfg |= FISCFG_WAIT_DEV_ERR; } else { fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR); } port_mmio = mv_ap_base(ap); mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg); mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode); mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond); } static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) { struct mv_host_priv *hpriv = ap->host->private_data; u32 old, new; /* workaround for 88SX60x1 FEr SATA#25 (part 1) */ old = readl(hpriv->base + GPIO_PORT_CTL); if (want_ncq) new = old | (1 << 22); else new = old & ~(1 << 22); if (new != old) writel(new, hpriv->base + GPIO_PORT_CTL); } /** * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma * @ap: Port being initialized * * There are two DMA modes on these chips: basic DMA, and EDMA. * * Bit-0 of the "EDMA RESERVED" register enables/disables use * of basic DMA on the GEN_IIE versions of the chips. * * This bit survives EDMA resets, and must be set for basic DMA * to function, and should be cleared when EDMA is active. */ static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma) { struct mv_port_priv *pp = ap->private_data; u32 new, *old = &pp->cached.unknown_rsvd; if (enable_bmdma) new = *old | 1; else new = *old & ~1; mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new); } /* * SOC chips have an issue whereby the HDD LEDs don't always blink * during I/O when NCQ is enabled. Enabling a special "LED blink" mode * of the SOC takes care of it, generating a steady blink rate when * any drive on the chip is active. * * Unfortunately, the blink mode is a global hardware setting for the SOC, * so we must use it whenever at least one port on the SOC has NCQ enabled. * * We turn "LED blink" off when NCQ is not in use anywhere, because the normal * LED operation works then, and provides better (more accurate) feedback. * * Note that this code assumes that an SOC never has more than one HC onboard. */ static void mv_soc_led_blink_enable(struct ata_port *ap) { struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio; u32 led_ctrl; if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN) return; hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN; hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); led_ctrl = readl(hc_mmio + SOC_LED_CTRL); writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } static void mv_soc_led_blink_disable(struct ata_port *ap) { struct ata_host *host = ap->host; struct mv_host_priv *hpriv = host->private_data; void __iomem *hc_mmio; u32 led_ctrl; unsigned int port; if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)) return; /* disable led-blink only if no ports are using NCQ */ for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *this_ap = host->ports[port]; struct mv_port_priv *pp = this_ap->private_data; if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return; } hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN; hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no); led_ctrl = readl(hc_mmio + SOC_LED_CTRL); writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL); } static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) { u32 cfg; struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; void __iomem *port_mmio = mv_ap_base(ap); /* set up non-NCQ EDMA configuration */ cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); if (IS_GEN_I(hpriv)) cfg |= (1 << 8); /* enab config burst size mask */ else if (IS_GEN_II(hpriv)) { cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; mv_60x1_errata_sata25(ap, want_ncq); } else if (IS_GEN_IIE(hpriv)) { int want_fbs = sata_pmp_attached(ap); /* * Possible future enhancement: * * The chip can use FBS with non-NCQ, if we allow it, * But first we need to have the error handling in place * for this mode (datasheet section 7.3.15.4.2.3). * So disallow non-NCQ FBS for now. */ want_fbs &= want_ncq; mv_config_fbs(ap, want_ncq, want_fbs); if (want_fbs) { pp->pp_flags |= MV_PP_FLAG_FBS_EN; cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */ } cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ if (want_edma) { cfg |= (1 << 22); /* enab 4-entry host queue cache */ if (!IS_SOC(hpriv)) cfg |= (1 << 18); /* enab early completion */ } if (hpriv->hp_flags & MV_HP_CUT_THROUGH) cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ mv_bmdma_enable_iie(ap, !want_edma); if (IS_SOC(hpriv)) { if (want_ncq) mv_soc_led_blink_enable(ap); else mv_soc_led_blink_disable(ap); } } if (want_ncq) { cfg |= EDMA_CFG_NCQ; pp->pp_flags |= MV_PP_FLAG_NCQ_EN; } writelfl(cfg, port_mmio + EDMA_CFG); } static void mv_port_free_dma_mem(struct ata_port *ap) { struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; int tag; if (pp->crqb) { dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); pp->crqb = NULL; } if (pp->crpb) { dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); pp->crpb = NULL; } /* * For GEN_I, there's no NCQ, so we have only a single sg_tbl. * For later hardware, we have one unique sg_tbl per NCQ tag. */ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (pp->sg_tbl[tag]) { if (tag == 0 || !IS_GEN_I(hpriv)) dma_pool_free(hpriv->sg_tbl_pool, pp->sg_tbl[tag], pp->sg_tbl_dma[tag]); pp->sg_tbl[tag] = NULL; } } } /** * mv_port_start - Port specific init/start routine. * @ap: ATA channel to manipulate * * Allocate and point to DMA memory, init port private memory, * zero indices. * * LOCKING: * Inherited from caller. */ static int mv_port_start(struct ata_port *ap) { struct device *dev = ap->host->dev; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp; unsigned long flags; int tag; pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); if (!pp) return -ENOMEM; ap->private_data = pp; pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); if (!pp->crqb) return -ENOMEM; memset(pp->crqb, 0, MV_CRQB_Q_SZ); pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); if (!pp->crpb) goto out_port_free_dma_mem; memset(pp->crpb, 0, MV_CRPB_Q_SZ); /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) ap->flags |= ATA_FLAG_AN; /* * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. * For later hardware, we need one unique sg_tbl per NCQ tag. */ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) { if (tag == 0 || !IS_GEN_I(hpriv)) { pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, GFP_KERNEL, &pp->sg_tbl_dma[tag]); if (!pp->sg_tbl[tag]) goto out_port_free_dma_mem; } else { pp->sg_tbl[tag] = pp->sg_tbl[0]; pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; } } spin_lock_irqsave(ap->lock, flags); mv_save_cached_regs(ap); mv_edma_cfg(ap, 0, 0); spin_unlock_irqrestore(ap->lock, flags); return 0; out_port_free_dma_mem: mv_port_free_dma_mem(ap); return -ENOMEM; } /** * mv_port_stop - Port specific cleanup/stop routine. * @ap: ATA channel to manipulate * * Stop DMA, cleanup port memory. * * LOCKING: * This routine uses the host lock to protect the DMA stop. */ static void mv_port_stop(struct ata_port *ap) { unsigned long flags; spin_lock_irqsave(ap->lock, flags); mv_stop_edma(ap); mv_enable_port_irqs(ap, 0); spin_unlock_irqrestore(ap->lock, flags); mv_port_free_dma_mem(ap); } /** * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries * @qc: queued command whose SG list to source from * * Populate the SG list and mark the last entry. * * LOCKING: * Inherited from caller. */ static void mv_fill_sg(struct ata_queued_cmd *qc) { struct mv_port_priv *pp = qc->ap->private_data; struct scatterlist *sg; struct mv_sg *mv_sg, *last_sg = NULL; unsigned int si; mv_sg = pp->sg_tbl[qc->tag]; for_each_sg(qc->sg, sg, qc->n_elem, si) { dma_addr_t addr = sg_dma_address(sg); u32 sg_len = sg_dma_len(sg); while (sg_len) { u32 offset = addr & 0xffff; u32 len = sg_len; if (offset + len > 0x10000) len = 0x10000 - offset; mv_sg->addr = cpu_to_le32(addr & 0xffffffff); mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16); mv_sg->flags_size = cpu_to_le32(len & 0xffff); mv_sg->reserved = 0; sg_len -= len; addr += len; last_sg = mv_sg; mv_sg++; } } if (likely(last_sg)) last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL); mb(); /* ensure data structure is visible to the chipset */ } static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last) { u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | (last ? CRQB_CMD_LAST : 0); *cmdw = cpu_to_le16(tmp); } /** * mv_sff_irq_clear - Clear hardware interrupt after DMA. * @ap: Port associated with this ATA transaction. * * We need this only for ATAPI bmdma transactions, * as otherwise we experience spurious interrupts * after libata-sff handles the bmdma interrupts. */ static void mv_sff_irq_clear(struct ata_port *ap) { mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ); } /** * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA. * @qc: queued command to check for chipset/DMA compatibility. * * The bmdma engines cannot handle speculative data sizes * (bytecount under/over flow). So only allow DMA for * data transfer commands with known data sizes. * * LOCKING: * Inherited from caller. */ static int mv_check_atapi_dma(struct ata_queued_cmd *qc) { struct scsi_cmnd *scmd = qc->scsicmd; if (scmd) { switch (scmd->cmnd[0]) { case READ_6: case READ_10: case READ_12: case WRITE_6: case WRITE_10: case WRITE_12: case GPCMD_READ_CD: case GPCMD_SEND_DVD_STRUCTURE: case GPCMD_SEND_CUE_SHEET: return 0; /* DMA is safe */ } } return -EOPNOTSUPP; /* use PIO instead */ } /** * mv_bmdma_setup - Set up BMDMA transaction * @qc: queued command to prepare DMA for. * * LOCKING: * Inherited from caller. */ static void mv_bmdma_setup(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; mv_fill_sg(qc); /* clear all DMA cmd bits */ writel(0, port_mmio + BMDMA_CMD); /* load PRD table addr. */ writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, port_mmio + BMDMA_PRD_HIGH); writelfl(pp->sg_tbl_dma[qc->tag], port_mmio + BMDMA_PRD_LOW); /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); } /** * mv_bmdma_start - Start a BMDMA transaction * @qc: queued command to start DMA on. * * LOCKING: * Inherited from caller. */ static void mv_bmdma_start(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START; /* start host DMA transaction */ writelfl(cmd, port_mmio + BMDMA_CMD); } /** * mv_bmdma_stop - Stop BMDMA transfer * @qc: queued command to stop DMA on. * * Clears the ATA_DMA_START flag in the bmdma control register * * LOCKING: * Inherited from caller. */ static void mv_bmdma_stop_ap(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 cmd; /* clear start/stop bit */ cmd = readl(port_mmio + BMDMA_CMD); if (cmd & ATA_DMA_START) { cmd &= ~ATA_DMA_START; writelfl(cmd, port_mmio + BMDMA_CMD); /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ ata_sff_dma_pause(ap); } } static void mv_bmdma_stop(struct ata_queued_cmd *qc) { mv_bmdma_stop_ap(qc->ap); } /** * mv_bmdma_status - Read BMDMA status * @ap: port for which to retrieve DMA status. * * Read and return equivalent of the sff BMDMA status register. * * LOCKING: * Inherited from caller. */ static u8 mv_bmdma_status(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 reg, status; /* * Other bits are valid only if ATA_DMA_ACTIVE==0, * and the ATA_DMA_INTR bit doesn't exist. */ reg = readl(port_mmio + BMDMA_STATUS); if (reg & ATA_DMA_ACTIVE) status = ATA_DMA_ACTIVE; else if (reg & ATA_DMA_ERR) status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR; else { /* * Just because DMA_ACTIVE is 0 (DMA completed), * this does _not_ mean the device is "done". * So we should not yet be signalling ATA_DMA_INTR * in some cases. Eg. DSM/TRIM, and perhaps others. */ mv_bmdma_stop_ap(ap); if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY) status = 0; else status = ATA_DMA_INTR; } return status; } static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc) { struct ata_taskfile *tf = &qc->tf; /* * Workaround for 88SX60x1 FEr SATA#24. * * Chip may corrupt WRITEs if multi_count >= 4kB. * Note that READs are unaffected. * * It's not clear if this errata really means "4K bytes", * or if it always happens for multi_count > 7 * regardless of device sector_size. * * So, for safety, any write with multi_count > 7 * gets converted here into a regular PIO write instead: */ if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) { if (qc->dev->multi_count > 7) { switch (tf->command) { case ATA_CMD_WRITE_MULTI: tf->command = ATA_CMD_PIO_WRITE; break; case ATA_CMD_WRITE_MULTI_FUA_EXT: tf->flags &= ~ATA_TFLAG_FUA; /* ugh */ /* fall through */ case ATA_CMD_WRITE_MULTI_EXT: tf->command = ATA_CMD_PIO_WRITE_EXT; break; } } } } /** * mv_qc_prep - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller. */ static void mv_qc_prep(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; __le16 *cw; struct ata_taskfile *tf = &qc->tf; u16 flags = 0; unsigned in_index; switch (tf->protocol) { case ATA_PROT_DMA: if (tf->command == ATA_CMD_DSM) return; /* fall-thru */ case ATA_PROT_NCQ: break; /* continue below */ case ATA_PROT_PIO: mv_rw_multi_errata_sata24(qc); return; default: return; } /* Fill in command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; pp->crqb[in_index].sg_addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); pp->crqb[in_index].sg_addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); cw = &pp->crqb[in_index].ata_cmd[0]; /* Sadly, the CRQB cannot accommodate all registers--there are * only 11 bytes...so we must pick and choose required * registers based on the command. So, we drop feature and * hob_feature for [RW] DMA commands, but they are needed for * NCQ. NCQ will drop hob_nsect, which is not needed there * (nsect is used only for the tag; feat/hob_feat hold true nsect). */ switch (tf->command) { case ATA_CMD_READ: case ATA_CMD_READ_EXT: case ATA_CMD_WRITE: case ATA_CMD_WRITE_EXT: case ATA_CMD_WRITE_FUA_EXT: mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0); break; case ATA_CMD_FPDMA_READ: case ATA_CMD_FPDMA_WRITE: mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0); mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0); break; default: /* The only other commands EDMA supports in non-queued and * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none * of which are defined/used by Linux. If we get here, this * driver needs work. * * FIXME: modify libata to give qc_prep a return value and * return error here. */ BUG_ON(tf->command); break; } mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0); mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0); mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0); mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0); mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0); mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; mv_fill_sg(qc); } /** * mv_qc_prep_iie - Host specific command preparation. * @qc: queued command to prepare * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it handles prep of the CRQB * (command request block), does some sanity checking, and calls * the SG load routine. * * LOCKING: * Inherited from caller. */ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct mv_crqb_iie *crqb; struct ata_taskfile *tf = &qc->tf; unsigned in_index; u32 flags = 0; if ((tf->protocol != ATA_PROT_DMA) && (tf->protocol != ATA_PROT_NCQ)) return; if (tf->command == ATA_CMD_DSM) return; /* use bmdma for this */ /* Fill in Gen IIE command request block */ if (!(tf->flags & ATA_TFLAG_WRITE)) flags |= CRQB_FLAG_READ; WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); flags |= qc->tag << CRQB_TAG_SHIFT; flags |= qc->tag << CRQB_HOSTQ_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; /* get current queue index from software */ in_index = pp->req_idx; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); crqb->flags = cpu_to_le32(flags); crqb->ata_cmd[0] = cpu_to_le32( (tf->command << 16) | (tf->feature << 24) ); crqb->ata_cmd[1] = cpu_to_le32( (tf->lbal << 0) | (tf->lbam << 8) | (tf->lbah << 16) | (tf->device << 24) ); crqb->ata_cmd[2] = cpu_to_le32( (tf->hob_lbal << 0) | (tf->hob_lbam << 8) | (tf->hob_lbah << 16) | (tf->hob_feature << 24) ); crqb->ata_cmd[3] = cpu_to_le32( (tf->nsect << 0) | (tf->hob_nsect << 8) ); if (!(qc->flags & ATA_QCFLAG_DMAMAP)) return; mv_fill_sg(qc); } /** * mv_sff_check_status - fetch device status, if valid * @ap: ATA port to fetch status from * * When using command issue via mv_qc_issue_fis(), * the initial ATA_BUSY state does not show up in the * ATA status (shadow) register. This can confuse libata! * * So we have a hook here to fake ATA_BUSY for that situation, * until the first time a BUSY, DRQ, or ERR bit is seen. * * The rest of the time, it simply returns the ATA status register. */ static u8 mv_sff_check_status(struct ata_port *ap) { u8 stat = ioread8(ap->ioaddr.status_addr); struct mv_port_priv *pp = ap->private_data; if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR)) pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; else stat = ATA_BUSY; } return stat; } /** * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register * @fis: fis to be sent * @nwords: number of 32-bit words in the fis */ static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords) { void __iomem *port_mmio = mv_ap_base(ap); u32 ifctl, old_ifctl, ifstat; int i, timeout = 200, final_word = nwords - 1; /* Initiate FIS transmission mode */ old_ifctl = readl(port_mmio + SATA_IFCTL); ifctl = 0x100 | (old_ifctl & 0xf); writelfl(ifctl, port_mmio + SATA_IFCTL); /* Send all words of the FIS except for the final word */ for (i = 0; i < final_word; ++i) writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS); /* Flag end-of-transmission, and then send the final word */ writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL); writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS); /* * Wait for FIS transmission to complete. * This typically takes just a single iteration. */ do { ifstat = readl(port_mmio + SATA_IFSTAT); } while (!(ifstat & 0x1000) && --timeout); /* Restore original port configuration */ writelfl(old_ifctl, port_mmio + SATA_IFCTL); /* See if it worked */ if ((ifstat & 0x3000) != 0x1000) { ata_port_warn(ap, "%s transmission error, ifstat=%08x\n", __func__, ifstat); return AC_ERR_OTHER; } return 0; } /** * mv_qc_issue_fis - Issue a command directly as a FIS * @qc: queued command to start * * Note that the ATA shadow registers are not updated * after command issue, so the device will appear "READY" * if polled, even while it is BUSY processing the command. * * So we use a status hook to fake ATA_BUSY until the drive changes state. * * Note: we don't get updated shadow regs on *completion* * of non-data commands. So avoid sending them via this function, * as they will appear to have completed immediately. * * GEN_IIE has special registers that we could get the result tf from, * but earlier chipsets do not. For now, we ignore those registers. */ static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct mv_port_priv *pp = ap->private_data; struct ata_link *link = qc->dev->link; u32 fis[5]; int err = 0; ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis); err = mv_send_fis(ap, fis, ARRAY_SIZE(fis)); if (err) return err; switch (qc->tf.protocol) { case ATAPI_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; /* fall through */ case ATAPI_PROT_NODATA: ap->hsm_task_state = HSM_ST_FIRST; break; case ATA_PROT_PIO: pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; if (qc->tf.flags & ATA_TFLAG_WRITE) ap->hsm_task_state = HSM_ST_FIRST; else ap->hsm_task_state = HSM_ST; break; default: ap->hsm_task_state = HSM_ST_LAST; break; } if (qc->tf.flags & ATA_TFLAG_POLLING) ata_sff_queue_pio_task(link, 0); return 0; } /** * mv_qc_issue - Initiate a command to the host * @qc: queued command to start * * This routine simply redirects to the general purpose routine * if command is not DMA. Else, it sanity checks our local * caches of the request producer/consumer indices then enables * DMA and bumps the request producer index. * * LOCKING: * Inherited from caller. */ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) { static int limit_warnings = 10; struct ata_port *ap = qc->ap; void __iomem *port_mmio = mv_ap_base(ap); struct mv_port_priv *pp = ap->private_data; u32 in_index; unsigned int port_irqs; pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ switch (qc->tf.protocol) { case ATA_PROT_DMA: if (qc->tf.command == ATA_CMD_DSM) { if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */ return AC_ERR_OTHER; break; /* use bmdma for this */ } /* fall thru */ case ATA_PROT_NCQ: mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; /* Write the request in pointer to kick the EDMA to life */ writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, port_mmio + EDMA_REQ_Q_IN_PTR); return 0; case ATA_PROT_PIO: /* * Errata SATA#16, SATA#24: warn if multiple DRQs expected. * * Someday, we might implement special polling workarounds * for these, but it all seems rather unnecessary since we * normally use only DMA for commands which transfer more * than a single block of data. * * Much of the time, this could just work regardless. * So for now, just log the incident, and allow the attempt. */ if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { --limit_warnings; ata_link_warn(qc->dev->link, DRV_NAME ": attempting PIO w/multiple DRQ: " "this may fail due to h/w errata\n"); } /* drop through */ case ATA_PROT_NODATA: case ATAPI_PROT_PIO: case ATAPI_PROT_NODATA: if (ap->flags & ATA_FLAG_PIO_POLLING) qc->tf.flags |= ATA_TFLAG_POLLING; break; } if (qc->tf.flags & ATA_TFLAG_POLLING) port_irqs = ERR_IRQ; /* mask device interrupt when polling */ else port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */ /* * We're about to send a non-EDMA capable command to the * port. Turn off EDMA so there won't be problems accessing * shadow block, etc registers. */ mv_stop_edma(ap); mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs); mv_pmp_select(ap, qc->dev->link->pmp); if (qc->tf.command == ATA_CMD_READ_LOG_EXT) { struct mv_host_priv *hpriv = ap->host->private_data; /* * Workaround for 88SX60x1 FEr SATA#25 (part 2). * * After any NCQ error, the READ_LOG_EXT command * from libata-eh *must* use mv_qc_issue_fis(). * Otherwise it might fail, due to chip errata. * * Rather than special-case it, we'll just *always* * use this method here for READ_LOG_EXT, making for * easier testing. */ if (IS_GEN_II(hpriv)) return mv_qc_issue_fis(qc); } return ata_bmdma_qc_issue(qc); } static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; struct ata_queued_cmd *qc; if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) return NULL; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) return qc; return NULL; } static void mv_pmp_error_handler(struct ata_port *ap) { unsigned int pmp, pmp_map; struct mv_port_priv *pp = ap->private_data; if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { /* * Perform NCQ error analysis on failed PMPs * before we freeze the port entirely. * * The failed PMPs are marked earlier by mv_pmp_eh_prep(). */ pmp_map = pp->delayed_eh_pmp_map; pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; for (pmp = 0; pmp_map != 0; pmp++) { unsigned int this_pmp = (1 << pmp); if (pmp_map & this_pmp) { struct ata_link *link = &ap->pmp_link[pmp]; pmp_map &= ~this_pmp; ata_eh_analyze_ncq_error(link); } } ata_port_freeze(ap); } sata_pmp_error_handler(ap); } static unsigned int mv_get_err_pmp_map(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); return readl(port_mmio + SATA_TESTCTL) >> 16; } static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) { struct ata_eh_info *ehi; unsigned int pmp; /* * Initialize EH info for PMPs which saw device errors */ ehi = &ap->link.eh_info; for (pmp = 0; pmp_map != 0; pmp++) { unsigned int this_pmp = (1 << pmp); if (pmp_map & this_pmp) { struct ata_link *link = &ap->pmp_link[pmp]; pmp_map &= ~this_pmp; ehi = &link->eh_info; ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "dev err"); ehi->err_mask |= AC_ERR_DEV; ehi->action |= ATA_EH_RESET; ata_link_abort(link); } } } static int mv_req_q_empty(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 in_ptr, out_ptr; in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR) >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; return (in_ptr == out_ptr); /* 1 == queue_is_empty */ } static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) { struct mv_port_priv *pp = ap->private_data; int failed_links; unsigned int old_map, new_map; /* * Device error during FBS+NCQ operation: * * Set a port flag to prevent further I/O being enqueued. * Leave the EDMA running to drain outstanding commands from this port. * Perform the post-mortem/EH only when all responses are complete. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2). */ if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; pp->delayed_eh_pmp_map = 0; } old_map = pp->delayed_eh_pmp_map; new_map = old_map | mv_get_err_pmp_map(ap); if (old_map != new_map) { pp->delayed_eh_pmp_map = new_map; mv_pmp_eh_prep(ap, new_map & ~old_map); } failed_links = hweight16(new_map); ata_port_info(ap, "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n", __func__, pp->delayed_eh_pmp_map, ap->qc_active, failed_links, ap->nr_active_links); if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { mv_process_crpb_entries(ap, pp); mv_stop_edma(ap); mv_eh_freeze(ap); ata_port_info(ap, "%s: done\n", __func__); return 1; /* handled */ } ata_port_info(ap, "%s: waiting\n", __func__); return 1; /* handled */ } static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap) { /* * Possible future enhancement: * * FBS+non-NCQ operation is not yet implemented. * See related notes in mv_edma_cfg(). * * Device error during FBS+non-NCQ operation: * * We need to snapshot the shadow registers for each failed command. * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3). */ return 0; /* not handled */ } static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause) { struct mv_port_priv *pp = ap->private_data; if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) return 0; /* EDMA was not active: not handled */ if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) return 0; /* FBS was not active: not handled */ if (!(edma_err_cause & EDMA_ERR_DEV)) return 0; /* non DEV error: not handled */ edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT; if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS)) return 0; /* other problems: not handled */ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { /* * EDMA should NOT have self-disabled for this case. * If it did, then something is wrong elsewhere, * and we cannot handle it here. */ if (edma_err_cause & EDMA_ERR_SELF_DIS) { ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", __func__, edma_err_cause, pp->pp_flags); return 0; /* not handled */ } return mv_handle_fbs_ncq_dev_err(ap); } else { /* * EDMA should have self-disabled for this case. * If it did not, then something is wrong elsewhere, * and we cannot handle it here. */ if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) { ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n", __func__, edma_err_cause, pp->pp_flags); return 0; /* not handled */ } return mv_handle_fbs_non_ncq_dev_err(ap); } return 0; /* not handled */ } static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) { struct ata_eh_info *ehi = &ap->link.eh_info; char *when = "idle"; ata_ehi_clear_desc(ehi); if (edma_was_enabled) { when = "EDMA enabled"; } else { struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc && (qc->tf.flags & ATA_TFLAG_POLLING)) when = "polling"; } ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when); ehi->err_mask |= AC_ERR_OTHER; ehi->action |= ATA_EH_RESET; ata_port_freeze(ap); } /** * mv_err_intr - Handle error interrupts on the port * @ap: ATA channel to manipulate * * Most cases require a full reset of the chip's state machine, * which also performs a COMRESET. * Also, if the port disabled DMA, update our cached copy to match. * * LOCKING: * Inherited from caller. */ static void mv_err_intr(struct ata_port *ap) { void __iomem *port_mmio = mv_ap_base(ap); u32 edma_err_cause, eh_freeze_mask, serr = 0; u32 fis_cause = 0; struct mv_port_priv *pp = ap->private_data; struct mv_host_priv *hpriv = ap->host->private_data; unsigned int action = 0, err_mask = 0; struct ata_eh_info *ehi = &ap->link.eh_info; struct ata_queued_cmd *qc; int abort = 0; /* * Read and clear the SError and err_cause bits. * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear * the FIS_IRQ_CAUSE register before clearing edma_err_cause. */ sata_scr_read(&ap->link, SCR_ERROR, &serr); sata_scr_write_flush(&ap->link, SCR_ERROR, serr); edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE); if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { fis_cause = readl(port_mmio + FIS_IRQ_CAUSE); writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE); } writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE); if (edma_err_cause & EDMA_ERR_DEV) { /* * Device errors during FIS-based switching operation * require special handling. */ if (mv_handle_dev_err(ap, edma_err_cause)) return; } qc = mv_get_active_qc(ap); ata_ehi_clear_desc(ehi); ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", edma_err_cause, pp->pp_flags); if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); if (fis_cause & FIS_IRQ_CAUSE_AN) { u32 ec = edma_err_cause & ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); sata_async_notification(ap); if (!ec) return; /* Just an AN; no need for the nukes */ ata_ehi_push_desc(ehi, "SDB notify"); } } /* * All generations share these EDMA error cause bits: */ if (edma_err_cause & EDMA_ERR_DEV) { err_mask |= AC_ERR_DEV; action |= ATA_EH_RESET; ata_ehi_push_desc(ehi, "dev error"); } if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR)) { err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; ata_ehi_push_desc(ehi, "parity error"); } if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) { ata_ehi_hotplugged(ehi); ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ? "dev disconnect" : "dev connect"); action |= ATA_EH_RESET; } /* * Gen-I has a different SELF_DIS bit, * different FREEZE bits, and no SERR bit: */ if (IS_GEN_I(hpriv)) { eh_freeze_mask = EDMA_EH_FREEZE_5; if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; ata_ehi_push_desc(ehi, "EDMA self-disable"); } } else { eh_freeze_mask = EDMA_EH_FREEZE; if (edma_err_cause & EDMA_ERR_SELF_DIS) { pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; ata_ehi_push_desc(ehi, "EDMA self-disable"); } if (edma_err_cause & EDMA_ERR_SERR) { ata_ehi_push_desc(ehi, "SError=%08x", serr); err_mask |= AC_ERR_ATA_BUS; action |= ATA_EH_RESET; } } if (!err_mask) { err_mask = AC_ERR_OTHER; action |= ATA_EH_RESET; } ehi->serror |= serr; ehi->action |= action; if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; if (err_mask == AC_ERR_DEV) { /* * Cannot do ata_port_freeze() here, * because it would kill PIO access, * which is needed for further diagnosis. */ mv_eh_freeze(ap); abort = 1; } else if (edma_err_cause & eh_freeze_mask) { /* * Note to self: ata_port_freeze() calls ata_port_abort() */ ata_port_freeze(ap); } else { abort = 1; } if (abort) { if (qc) ata_link_abort(qc->dev->link); else ata_port_abort(ap); } } static bool mv_process_crpb_response(struct ata_port *ap, struct mv_crpb *response, unsigned int tag, int ncq_enabled) { u8 ata_status; u16 edma_status = le16_to_cpu(response->flags); /* * edma_status from a response queue entry: * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only). * MSB is saved ATA status from command completion. */ if (!ncq_enabled) { u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; if (err_cause) { /* * Error will be seen/handled by * mv_err_intr(). So do nothing at all here. */ return false; } } ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; if (!ac_err_mask(ata_status)) return true; /* else: leave it for mv_err_intr() */ return false; } static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) { void __iomem *port_mmio = mv_ap_base(ap); struct mv_host_priv *hpriv = ap->host->private_data; u32 in_index; bool work_done = false; u32 done_mask = 0; int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); /* Get the hardware queue position index */ in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR) >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; /* Process new responses from since the last time we looked */ while (in_index != pp->resp_idx) { unsigned int tag; struct mv_crpb *response = &pp->crpb[pp->resp_idx]; pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; if (IS_GEN_I(hpriv)) { /* 50xx: no NCQ, only one command active at a time */ tag = ap->link.active_tag; } else { /* Gen II/IIE: get command tag from CRPB entry */ tag = le16_to_cpu(response->id) & 0x1f; } if (mv_process_crpb_response(ap, response, tag, ncq_enabled)) done_mask |= 1 << tag; work_done = true; } if (work_done) { ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); /* Update the software queue position index in hardware */ writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), port_mmio + EDMA_RSP_Q_OUT_PTR); } } static void mv_port_intr(struct ata_port *ap, u32 port_cause) { struct mv_port_priv *pp; int edma_was_enabled; /* * Grab a snapshot of the EDMA_EN flag setting, * so that we have a consistent view for this port, * even if something we call of our routines changes it. */ pp = ap->private_data; edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); /* * Process completed CRPB response(s) before other events. */ if (edma_was_enabled && (port_cause & DONE_IRQ)) { mv_process_crpb_entries(ap, pp); if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) mv_handle_fbs_ncq_dev_err(ap); } /* * Handle chip-reported errors, or continue on to handle PIO. */ if (unlikely(port_cause & ERR_IRQ)) { mv_err_intr(ap); } else if (!edma_was_enabled) { struct ata_queued_cmd *qc = mv_get_active_qc(ap); if (qc) ata_bmdma_port_intr(ap, qc); else mv_unexpected_intr(ap, edma_was_enabled); } } /** * mv_host_intr - Handle all interrupts on the given host controller * @host: host specific structure * @main_irq_cause: Main interrupt cause register for the chip. * * LOCKING: * Inherited from caller. */ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base, *hc_mmio; unsigned int handled = 0, port; /* If asserted, clear the "all ports" IRQ coalescing bit */ if (main_irq_cause & ALL_PORTS_COAL_DONE) writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE); for (port = 0; port < hpriv->n_ports; port++) { struct ata_port *ap = host->ports[port]; unsigned int p, shift, hardport, port_cause; MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); /* * Each hc within the host has its own hc_irq_cause register, * where the interrupting ports bits get ack'd. */ if (hardport == 0) { /* first port on this hc ? */ u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND; u32 port_mask, ack_irqs; /* * Skip this entire hc if nothing pending for any ports */ if (!hc_cause) { port += MV_PORTS_PER_HC - 1; continue; } /* * We don't need/want to read the hc_irq_cause register, * because doing so hurts performance, and * main_irq_cause already gives us everything we need. * * But we do have to *write* to the hc_irq_cause to ack * the ports that we are handling this time through. * * This requires that we create a bitmap for those * ports which interrupted us, and use that bitmap * to ack (only) those ports via hc_irq_cause. */ ack_irqs = 0; if (hc_cause & PORTS_0_3_COAL_DONE) ack_irqs = HC_COAL_IRQ; for (p = 0; p < MV_PORTS_PER_HC; ++p) { if ((port + p) >= hpriv->n_ports) break; port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2); if (hc_cause & port_mask) ack_irqs |= (DMA_IRQ | DEV_IRQ) << p; } hc_mmio = mv_hc_base_from_port(mmio, port); writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE); handled = 1; } /* * Handle interrupts signalled for this port: */ port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ); if (port_cause) mv_port_intr(ap, port_cause); } return handled; } static int mv_pci_error(struct ata_host *host, void __iomem *mmio) { struct mv_host_priv *hpriv = host->private_data; struct ata_port *ap; struct ata_queued_cmd *qc; struct ata_eh_info *ehi; unsigned int i, err_mask, printed = 0; u32 err_cause; err_cause = readl(mmio + hpriv->irq_cause_offset); dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause); DPRINTK("All regs @ PCI error\n"); mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); writelfl(0, mmio + hpriv->irq_cause_offset); for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; if (!ata_link_offline(&ap->link)) { ehi = &ap->link.eh_info; ata_ehi_clear_desc(ehi); if (!printed++) ata_ehi_push_desc(ehi, "PCI err cause 0x%08x", err_cause); err_mask = AC_ERR_HOST_BUS; ehi->action = ATA_EH_RESET; qc = ata_qc_from_tag(ap, ap->link.active_tag); if (qc) qc->err_mask |= err_mask; else ehi->err_mask |= err_mask; ata_port_freeze(ap); } } return 1; /* handled */ } /** * mv_interrupt - Main interrupt event handler * @irq: unused * @dev_instance: private data; in this case the host structure * * Read the read only register to determine if any host * controllers have pending interrupts. If so, call lower level * routine to handle. Also check for PCI errors which are only * reported here. * * LOCKING: * This routine holds the host lock while processing pending * interrupts. */ static irqreturn_t mv_interrupt(int irq, void *dev_instance) { struct ata_host *host = dev_instance; struct mv_host_priv *hpriv = host->private_data; unsigned int handled = 0; int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI; u32 main_irq_cause, pending_irqs; spin_lock(&host->lock); /* for MSI: block new interrupts while in here */ if (using_msi) mv_write_main_irq_mask(0, hpriv); main_irq_cause = readl(hpriv->main_irq_cause_addr); pending_irqs = main_irq_cause & hpriv->main_irq_mask; /* * Deal with cases where we either have nothing pending, or have read * a bogus register value which can indicate HW removal or PCI fault. */ if (pending_irqs && main_irq_cause != 0xffffffffU) { if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) handled = mv_pci_error(host, hpriv->base); else handled = mv_host_intr(host, pending_irqs); } /* for MSI: unmask; interrupt cause bits will retrigger now */ if (using_msi) mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv); spin_unlock(&host->lock); return IRQ_RETVAL(handled); } static unsigned int mv5_scr_offset(unsigned int sc_reg_in) { unsigned int ofs; switch (sc_reg_in) { case SCR_STATUS: case SCR_ERROR: case SCR_CONTROL: ofs = sc_reg_in * sizeof(u32); break; default: ofs = 0xffffffffU; break; } return ofs; } static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val) { struct mv_host_priv *hpriv = link->ap->host->private_data; void __iomem *mmio = hpriv->base; void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { *val = readl(addr + ofs); return 0; } else return -EINVAL; } static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val) { struct mv_host_priv *hpriv = link->ap->host->private_data; void __iomem *mmio = hpriv->base; void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no); unsigned int ofs = mv5_scr_offset(sc_reg_in); if (ofs != 0xffffffffU) { writelfl(val, addr + ofs); return 0; } else return -EINVAL; } static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio) { struct pci_dev *pdev = to_pci_dev(host->dev); int early_5080; early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0); if (!early_5080) { u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= (1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } mv_reset_pci_bus(host, mmio); } static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { writel(0x0fcfffff, mmio + FLASH_CTL); } static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *phy_mmio = mv5_phy_base(mmio, idx); u32 tmp; tmp = readl(phy_mmio + MV5_PHY_MODE); hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */ hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */ } static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; writel(0, mmio + GPIO_PORT_CTL); /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */ tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL); tmp |= ~(1 << 0); writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL); } static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *phy_mmio = mv5_phy_base(mmio, port); const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5); u32 tmp; int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0); if (fix_apm_sq) { tmp = readl(phy_mmio + MV5_LTMODE); tmp |= (1 << 19); writel(tmp, phy_mmio + MV5_LTMODE); tmp = readl(phy_mmio + MV5_PHY_CTL); tmp &= ~0x3; tmp |= 0x1; writel(tmp, phy_mmio + MV5_PHY_CTL); } tmp = readl(phy_mmio + MV5_PHY_MODE); tmp &= ~mask; tmp |= hpriv->signal[port].pre; tmp |= hpriv->signal[port].amps; writel(tmp, phy_mmio + MV5_PHY_MODE); } #undef ZERO #define ZERO(reg) writel(0, port_mmio + (reg)) static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x11f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO #define ZERO(reg) writel(0, hc_mmio + (reg)) static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int hc) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); u32 tmp; ZERO(0x00c); ZERO(0x010); ZERO(0x014); ZERO(0x018); tmp = readl(hc_mmio + 0x20); tmp &= 0x1c1c1c1c; tmp |= 0x03030303; writel(tmp, hc_mmio + 0x20); } #undef ZERO static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { unsigned int hc, port; for (hc = 0; hc < n_hc; hc++) { for (port = 0; port < MV_PORTS_PER_HC; port++) mv5_reset_hc_port(hpriv, mmio, (hc * MV_PORTS_PER_HC) + port); mv5_reset_one_hc(hpriv, mmio, hc); } return 0; } #undef ZERO #define ZERO(reg) writel(0, mmio + (reg)) static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) { struct mv_host_priv *hpriv = host->private_data; u32 tmp; tmp = readl(mmio + MV_PCI_MODE); tmp &= 0xff00ffff; writel(tmp, mmio + MV_PCI_MODE); ZERO(MV_PCI_DISC_TIMER); ZERO(MV_PCI_MSI_TRIGGER); writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT); ZERO(MV_PCI_SERR_MASK); ZERO(hpriv->irq_cause_offset); ZERO(hpriv->irq_mask_offset); ZERO(MV_PCI_ERR_LOW_ADDRESS); ZERO(MV_PCI_ERR_HIGH_ADDRESS); ZERO(MV_PCI_ERR_ATTRIBUTE); ZERO(MV_PCI_ERR_COMMAND); } #undef ZERO static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { u32 tmp; mv5_reset_flash(hpriv, mmio); tmp = readl(mmio + GPIO_PORT_CTL); tmp &= 0x3; tmp |= (1 << 5) | (1 << 6); writel(tmp, mmio + GPIO_PORT_CTL); } /** * mv6_reset_hc - Perform the 6xxx global soft reset * @mmio: base address of the HBA * * This routine only applies to 6xxx parts. * * LOCKING: * Inherited from caller. */ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { void __iomem *reg = mmio + PCI_MAIN_CMD_STS; int i, rc = 0; u32 t; /* Following procedure defined in PCI "main command and status * register" table. */ t = readl(reg); writel(t | STOP_PCI_MASTER, reg); for (i = 0; i < 1000; i++) { udelay(1); t = readl(reg); if (PCI_MASTER_EMPTY & t) break; } if (!(PCI_MASTER_EMPTY & t)) { printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); rc = 1; goto done; } /* set reset */ i = 5; do { writel(t | GLOB_SFT_RST, reg); t = readl(reg); udelay(1); } while (!(GLOB_SFT_RST & t) && (i-- > 0)); if (!(GLOB_SFT_RST & t)) { printk(KERN_ERR DRV_NAME ": can't set global reset\n"); rc = 1; goto done; } /* clear reset and *reenable the PCI master* (not mentioned in spec) */ i = 5; do { writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg); t = readl(reg); udelay(1); } while ((GLOB_SFT_RST & t) && (i-- > 0)); if (GLOB_SFT_RST & t) { printk(KERN_ERR DRV_NAME ": can't clear global reset\n"); rc = 1; } done: return rc; } static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *port_mmio; u32 tmp; tmp = readl(mmio + RESET_CFG); if ((tmp & (1 << 0)) == 0) { hpriv->signal[idx].amps = 0x7 << 8; hpriv->signal[idx].pre = 0x1 << 5; return; } port_mmio = mv_port_base(mmio, idx); tmp = readl(port_mmio + PHY_MODE2); hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ } static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { writel(0x00000060, mmio + GPIO_PORT_CTL); } static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 hp_flags = hpriv->hp_flags; int fix_phy_mode2 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); int fix_phy_mode4 = hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); u32 m2, m3; if (fix_phy_mode2) { m2 = readl(port_mmio + PHY_MODE2); m2 &= ~(1 << 16); m2 |= (1 << 31); writel(m2, port_mmio + PHY_MODE2); udelay(200); m2 = readl(port_mmio + PHY_MODE2); m2 &= ~((1 << 16) | (1 << 31)); writel(m2, port_mmio + PHY_MODE2); udelay(200); } /* * Gen-II/IIe PHY_MODE3 errata RM#2: * Achieves better receiver noise performance than the h/w default: */ m3 = readl(port_mmio + PHY_MODE3); m3 = (m3 & 0x1f) | (0x5555601 << 5); /* Guideline 88F5182 (GL# SATA-S11) */ if (IS_SOC(hpriv)) m3 &= ~0x1c; if (fix_phy_mode4) { u32 m4 = readl(port_mmio + PHY_MODE4); /* * Enforce reserved-bit restrictions on GenIIe devices only. * For earlier chipsets, force only the internal config field * (workaround for errata FEr SATA#10 part 1). */ if (IS_GEN_IIE(hpriv)) m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; else m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; writel(m4, port_mmio + PHY_MODE4); } /* * Workaround for 60x1-B2 errata SATA#13: * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, * so we must always rewrite PHY_MODE3 after PHY_MODE4. * Or ensure we use writelfl() when writing PHY_MODE4. */ writel(m3, port_mmio + PHY_MODE3); /* Revert values of pre-emphasis and signal amps to the saved ones */ m2 = readl(port_mmio + PHY_MODE2); m2 &= ~MV_M2_PREAMP_MASK; m2 |= hpriv->signal[port].amps; m2 |= hpriv->signal[port].pre; m2 &= ~(1 << 16); /* according to mvSata 3.6.1, some IIE values are fixed */ if (IS_GEN_IIE(hpriv)) { m2 &= ~0xC30FF01F; m2 |= 0x0000900F; } writel(m2, port_mmio + PHY_MODE2); } /* TODO: use the generic LED interface to configure the SATA Presence */ /* & Acitivy LEDs on the board */ static void mv_soc_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio) { return; } static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx, void __iomem *mmio) { void __iomem *port_mmio; u32 tmp; port_mmio = mv_port_base(mmio, idx); tmp = readl(port_mmio + PHY_MODE2); hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */ } #undef ZERO #define ZERO(reg) writel(0, port_mmio + (reg)) static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); mv_reset_channel(hpriv, mmio, port); ZERO(0x028); /* command */ writel(0x101f, port_mmio + EDMA_CFG); ZERO(0x004); /* timer */ ZERO(0x008); /* irq err cause */ ZERO(0x00c); /* irq err mask */ ZERO(0x010); /* rq bah */ ZERO(0x014); /* rq inp */ ZERO(0x018); /* rq outp */ ZERO(0x01c); /* respq bah */ ZERO(0x024); /* respq outp */ ZERO(0x020); /* respq inp */ ZERO(0x02c); /* test control */ writel(0x800, port_mmio + EDMA_IORDY_TMOUT); } #undef ZERO #define ZERO(reg) writel(0, hc_mmio + (reg)) static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio) { void __iomem *hc_mmio = mv_hc_base(mmio, 0); ZERO(0x00c); ZERO(0x010); ZERO(0x014); } #undef ZERO static int mv_soc_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int n_hc) { unsigned int port; for (port = 0; port < hpriv->n_ports; port++) mv_soc_reset_hc_port(hpriv, mmio, port); mv_soc_reset_one_hc(hpriv, mmio); return 0; } static void mv_soc_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio) { return; } static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) { return; } static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port) { void __iomem *port_mmio = mv_port_base(mmio, port); u32 reg; reg = readl(port_mmio + PHY_MODE3); reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */ reg |= (0x1 << 27); reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */ reg |= (0x1 << 29); writel(reg, port_mmio + PHY_MODE3); reg = readl(port_mmio + PHY_MODE4); reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */ reg |= (0x1 << 16); writel(reg, port_mmio + PHY_MODE4); reg = readl(port_mmio + PHY_MODE9_GEN2); reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ reg |= 0x8; reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ writel(reg, port_mmio + PHY_MODE9_GEN2); reg = readl(port_mmio + PHY_MODE9_GEN1); reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */ reg |= 0x8; reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */ writel(reg, port_mmio + PHY_MODE9_GEN1); } /** * soc_is_65 - check if the soc is 65 nano device * * Detect the type of the SoC, this is done by reading the PHYCFG_OFS * register, this register should contain non-zero value and it exists only * in the 65 nano devices, when reading it from older devices we get 0. */ static bool soc_is_65n(struct mv_host_priv *hpriv) { void __iomem *port0_mmio = mv_port_base(hpriv->base, 0); if (readl(port0_mmio + PHYCFG_OFS)) return true; return false; } static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i) { u32 ifcfg = readl(port_mmio + SATA_IFCFG); ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */ if (want_gen2i) ifcfg |= (1 << 7); /* enable gen2i speed */ writelfl(ifcfg, port_mmio + SATA_IFCFG); } static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, unsigned int port_no) { void __iomem *port_mmio = mv_port_base(mmio, port_no); /* * The datasheet warns against setting EDMA_RESET when EDMA is active * (but doesn't say what the problem might be). So we first try * to disable the EDMA engine before doing the EDMA_RESET operation. */ mv_stop_edma_engine(port_mmio); writelfl(EDMA_RESET, port_mmio + EDMA_CMD); if (!IS_GEN_I(hpriv)) { /* Enable 3.0gb/s link speed: this survives EDMA_RESET */ mv_setup_ifcfg(port_mmio, 1); } /* * Strobing EDMA_RESET here causes a hard reset of the SATA transport, * link, and physical layers. It resets all SATA interface registers * (except for SATA_IFCFG), and issues a COMRESET to the dev. */ writelfl(EDMA_RESET, port_mmio + EDMA_CMD); udelay(25); /* allow reset propagation */ writelfl(0, port_mmio + EDMA_CMD); hpriv->ops->phy_errata(hpriv, mmio, port_no); if (IS_GEN_I(hpriv)) mdelay(1); } static void mv_pmp_select(struct ata_port *ap, int pmp) { if (sata_pmp_supported(ap)) { void __iomem *port_mmio = mv_ap_base(ap); u32 reg = readl(port_mmio + SATA_IFCTL); int old = reg & 0xf; if (old != pmp) { reg = (reg & ~0xf) | pmp; writelfl(reg, port_mmio + SATA_IFCTL); } } } static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { mv_pmp_select(link->ap, sata_srst_pmp(link)); return sata_std_hardreset(link, class, deadline); } static int mv_softreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { mv_pmp_select(link->ap, sata_srst_pmp(link)); return ata_sff_softreset(link, class, deadline); } static int mv_hardreset(struct ata_link *link, unsigned int *class, unsigned long deadline) { struct ata_port *ap = link->ap; struct mv_host_priv *hpriv = ap->host->private_data; struct mv_port_priv *pp = ap->private_data; void __iomem *mmio = hpriv->base; int rc, attempts = 0, extra = 0; u32 sstatus; bool online; mv_reset_channel(hpriv, mmio, ap->port_no); pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY); /* Workaround for errata FEr SATA#10 (part 2) */ do { const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); rc = sata_link_hardreset(link, timing, deadline + extra, &online, NULL); rc = online ? -EAGAIN : rc; if (rc) return rc; sata_scr_read(link, SCR_STATUS, &sstatus); if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) { /* Force 1.5gb/s link speed and try again */ mv_setup_ifcfg(mv_ap_base(ap), 0); if (time_after(jiffies + HZ, deadline)) extra = HZ; /* only extend it once, max */ } } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123); mv_save_cached_regs(ap); mv_edma_cfg(ap, 0, 0); return rc; } static void mv_eh_freeze(struct ata_port *ap) { mv_stop_edma(ap); mv_enable_port_irqs(ap, 0); } static void mv_eh_thaw(struct ata_port *ap) { struct mv_host_priv *hpriv = ap->host->private_data; unsigned int port = ap->port_no; unsigned int hardport = mv_hardport_from_port(port); void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); void __iomem *port_mmio = mv_ap_base(ap); u32 hc_irq_cause; /* clear EDMA errors on this port */ writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* clear pending irq events */ hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE); mv_enable_port_irqs(ap, ERR_IRQ); } /** * mv_port_init - Perform some early initialization on a single port. * @port: libata data structure storing shadow register addresses * @port_mmio: base address of the port * * Initialize shadow register mmio addresses, clear outstanding * interrupts on the port, and unmask interrupts for the future * start of the port. * * LOCKING: * Inherited from caller. */ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio) { void __iomem *serr, *shd_base = port_mmio + SHD_BLK; /* PIO related setup */ port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA); port->error_addr = port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR); port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT); port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL); port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM); port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH); port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE); port->status_addr = port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS); /* special case: control/altstatus doesn't have ATA_REG_ address */ port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST; /* Clear any currently outstanding port interrupt conditions */ serr = port_mmio + mv_scr_offset(SCR_ERROR); writelfl(readl(serr), serr); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE); /* unmask all non-transient EDMA error interrupts */ writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK); VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n", readl(port_mmio + EDMA_CFG), readl(port_mmio + EDMA_ERR_IRQ_CAUSE), readl(port_mmio + EDMA_ERR_IRQ_MASK)); } static unsigned int mv_in_pcix_mode(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; u32 reg; if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) return 0; /* not PCI-X capable */ reg = readl(mmio + MV_PCI_MODE); if ((reg & MV_PCI_MODE_MASK) == 0) return 0; /* conventional PCI mode */ return 1; /* chip is in PCI-X mode */ } static int mv_pci_cut_through_okay(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; u32 reg; if (!mv_in_pcix_mode(host)) { reg = readl(mmio + MV_PCI_COMMAND); if (reg & MV_PCI_COMMAND_MRDTRIG) return 0; /* not okay */ } return 1; /* okay */ } static void mv_60x1b2_errata_pci7(struct ata_host *host) { struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; /* workaround for 60x1-B2 errata PCI#7 */ if (mv_in_pcix_mode(host)) { u32 reg = readl(mmio + MV_PCI_COMMAND); writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND); } } static int mv_chip_id(struct ata_host *host, unsigned int board_idx) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; u32 hp_flags = hpriv->hp_flags; switch (board_idx) { case chip_5080: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; switch (pdev->revision) { case 0x1: hp_flags |= MV_HP_ERRATA_50XXB0; break; case 0x3: hp_flags |= MV_HP_ERRATA_50XXB2; break; default: dev_warn(&pdev->dev, "Applying 50XXB2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } break; case chip_504x: case chip_508x: hpriv->ops = &mv5xxx_ops; hp_flags |= MV_HP_GEN_I; switch (pdev->revision) { case 0x0: hp_flags |= MV_HP_ERRATA_50XXB0; break; case 0x3: hp_flags |= MV_HP_ERRATA_50XXB2; break; default: dev_warn(&pdev->dev, "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_50XXB2; break; } break; case chip_604x: case chip_608x: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_II; switch (pdev->revision) { case 0x7: mv_60x1b2_errata_pci7(host); hp_flags |= MV_HP_ERRATA_60X1B2; break; case 0x9: hp_flags |= MV_HP_ERRATA_60X1C0; break; default: dev_warn(&pdev->dev, "Applying B2 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1B2; break; } break; case chip_7042: hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH; if (pdev->vendor == PCI_VENDOR_ID_TTI && (pdev->device == 0x2300 || pdev->device == 0x2310)) { /* * Highpoint RocketRAID PCIe 23xx series cards: * * Unconfigured drives are treated as "Legacy" * by the BIOS, and it overwrites sector 8 with * a "Lgcy" metadata block prior to Linux boot. * * Configured drives (RAID or JBOD) leave sector 8 * alone, but instead overwrite a high numbered * sector for the RAID metadata. This sector can * be determined exactly, by truncating the physical * drive capacity to a nice even GB value. * * RAID metadata is at: (dev->n_sectors & ~0xfffff) * * Warn the user, lest they think we're just buggy. */ printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID" " BIOS CORRUPTS DATA on all attached drives," " regardless of if/how they are configured." " BEWARE!\n"); printk(KERN_WARNING DRV_NAME ": For data safety, do not" " use sectors 8-9 on \"Legacy\" drives," " and avoid the final two gigabytes on" " all RocketRAID BIOS initialized drives.\n"); } /* drop through */ case chip_6042: hpriv->ops = &mv6xxx_ops; hp_flags |= MV_HP_GEN_IIE; if (board_idx == chip_6042 && mv_pci_cut_through_okay(host)) hp_flags |= MV_HP_CUT_THROUGH; switch (pdev->revision) { case 0x2: /* Rev.B0: the first/only public release */ hp_flags |= MV_HP_ERRATA_60X1C0; break; default: dev_warn(&pdev->dev, "Applying 60X1C0 workarounds to unknown rev\n"); hp_flags |= MV_HP_ERRATA_60X1C0; break; } break; case chip_soc: if (soc_is_65n(hpriv)) hpriv->ops = &mv_soc_65n_ops; else hpriv->ops = &mv_soc_ops; hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE | MV_HP_ERRATA_60X1C0; break; default: dev_err(host->dev, "BUG: invalid board index %u\n", board_idx); return 1; } hpriv->hp_flags = hp_flags; if (hp_flags & MV_HP_PCIE) { hpriv->irq_cause_offset = PCIE_IRQ_CAUSE; hpriv->irq_mask_offset = PCIE_IRQ_MASK; hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS; } else { hpriv->irq_cause_offset = PCI_IRQ_CAUSE; hpriv->irq_mask_offset = PCI_IRQ_MASK; hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS; } return 0; } /** * mv_init_host - Perform some early initialization of the host. * @host: ATA host to initialize * * If possible, do an early global reset of the host. Then do * our port init and clear/unmask all/relevant host interrupts. * * LOCKING: * Inherited from caller. */ static int mv_init_host(struct ata_host *host) { int rc = 0, n_hc, port, hc; struct mv_host_priv *hpriv = host->private_data; void __iomem *mmio = hpriv->base; rc = mv_chip_id(host, hpriv->board_idx); if (rc) goto done; if (IS_SOC(hpriv)) { hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE; hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK; } else { hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE; hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK; } /* initialize shadow irq mask with register's value */ hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr); /* global interrupt mask: 0 == mask everything */ mv_set_main_irq_mask(host, ~0, 0); n_hc = mv_get_hc_count(host->ports[0]->flags); for (port = 0; port < host->n_ports; port++) if (hpriv->ops->read_preamp) hpriv->ops->read_preamp(hpriv, port, mmio); rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc); if (rc) goto done; hpriv->ops->reset_flash(hpriv, mmio); hpriv->ops->reset_bus(host, mmio); hpriv->ops->enable_leds(hpriv, mmio); for (port = 0; port < host->n_ports; port++) { struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(mmio, port); mv_port_init(&ap->ioaddr, port_mmio); } for (hc = 0; hc < n_hc; hc++) { void __iomem *hc_mmio = mv_hc_base(mmio, hc); VPRINTK("HC%i: HC config=0x%08x HC IRQ cause " "(before clear)=0x%08x\n", hc, readl(hc_mmio + HC_CFG), readl(hc_mmio + HC_IRQ_CAUSE)); /* Clear any currently outstanding hc interrupt conditions */ writelfl(0, hc_mmio + HC_IRQ_CAUSE); } if (!IS_SOC(hpriv)) { /* Clear any currently outstanding host interrupt conditions */ writelfl(0, mmio + hpriv->irq_cause_offset); /* and unmask interrupt generation for host regs */ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset); } /* * enable only global host interrupts for now. * The per-port interrupts get done later as ports are set up. */ mv_set_main_irq_mask(host, 0, PCI_ERR); mv_set_irq_coalescing(host, irq_coalescing_io_count, irq_coalescing_usecs); done: return rc; } static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev) { hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ, MV_CRQB_Q_SZ, 0); if (!hpriv->crqb_pool) return -ENOMEM; hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ, MV_CRPB_Q_SZ, 0); if (!hpriv->crpb_pool) return -ENOMEM; hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ, MV_SG_TBL_SZ, 0); if (!hpriv->sg_tbl_pool) return -ENOMEM; return 0; } static void mv_conf_mbus_windows(struct mv_host_priv *hpriv, const struct mbus_dram_target_info *dram) { int i; for (i = 0; i < 4; i++) { writel(0, hpriv->base + WINDOW_CTRL(i)); writel(0, hpriv->base + WINDOW_BASE(i)); } for (i = 0; i < dram->num_cs; i++) { const struct mbus_dram_window *cs = dram->cs + i; writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) | (dram->mbus_dram_target_id << 4) | 1, hpriv->base + WINDOW_CTRL(i)); writel(cs->base, hpriv->base + WINDOW_BASE(i)); } } /** * mv_platform_probe - handle a positive probe of an soc Marvell * host * @pdev: platform device found * * LOCKING: * Inherited from caller. */ static int mv_platform_probe(struct platform_device *pdev) { const struct mv_sata_platform_data *mv_platform_data; const struct mbus_dram_target_info *dram; const struct ata_port_info *ppi[] = { &mv_port_info[chip_soc], NULL }; struct ata_host *host; struct mv_host_priv *hpriv; struct resource *res; int n_ports = 0, irq = 0; int rc; #if defined(CONFIG_HAVE_CLK) int port; #endif ata_print_version_once(&pdev->dev, DRV_VERSION); /* * Simple resource validation .. */ if (unlikely(pdev->num_resources != 2)) { dev_err(&pdev->dev, "invalid number of resources\n"); return -EINVAL; } /* * Get the register base first */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) return -EINVAL; /* allocate host */ if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports); irq = irq_of_parse_and_map(pdev->dev.of_node, 0); } else { mv_platform_data = pdev->dev.platform_data; n_ports = mv_platform_data->n_ports; irq = platform_get_irq(pdev, 0); } host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; #if defined(CONFIG_HAVE_CLK) hpriv->port_clks = devm_kzalloc(&pdev->dev, sizeof(struct clk *) * n_ports, GFP_KERNEL); if (!hpriv->port_clks) return -ENOMEM; #endif host->private_data = hpriv; hpriv->n_ports = n_ports; hpriv->board_idx = chip_soc; host->iomap = NULL; hpriv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res)); hpriv->base -= SATAHC0_REG_BASE; #if defined(CONFIG_HAVE_CLK) hpriv->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(hpriv->clk)) dev_notice(&pdev->dev, "cannot get optional clkdev\n"); else clk_prepare_enable(hpriv->clk); for (port = 0; port < n_ports; port++) { char port_number[16]; sprintf(port_number, "%d", port); hpriv->port_clks[port] = clk_get(&pdev->dev, port_number); if (!IS_ERR(hpriv->port_clks[port])) clk_prepare_enable(hpriv->port_clks[port]); } #endif /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_conf_mbus_windows(hpriv, dram); rc = mv_create_dma_pools(hpriv, &pdev->dev); if (rc) goto err; /* * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be * updated in the LP_PHY_CTL register. */ if (pdev->dev.of_node && of_device_is_compatible(pdev->dev.of_node, "marvell,armada-370-sata")) hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL; /* initialize adapter */ rc = mv_init_host(host); if (rc) goto err; dev_info(&pdev->dev, "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH, host->n_ports); rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht); if (!rc) return 0; err: #if defined(CONFIG_HAVE_CLK) if (!IS_ERR(hpriv->clk)) { clk_disable_unprepare(hpriv->clk); clk_put(hpriv->clk); } for (port = 0; port < n_ports; port++) { if (!IS_ERR(hpriv->port_clks[port])) { clk_disable_unprepare(hpriv->port_clks[port]); clk_put(hpriv->port_clks[port]); } } #endif return rc; } /* * * mv_platform_remove - unplug a platform interface * @pdev: platform device * * A platform bus SATA device has been unplugged. Perform the needed * cleanup. Also called on module unload for any active devices. */ static int mv_platform_remove(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); #if defined(CONFIG_HAVE_CLK) struct mv_host_priv *hpriv = host->private_data; int port; #endif ata_host_detach(host); #if defined(CONFIG_HAVE_CLK) if (!IS_ERR(hpriv->clk)) { clk_disable_unprepare(hpriv->clk); clk_put(hpriv->clk); } for (port = 0; port < host->n_ports; port++) { if (!IS_ERR(hpriv->port_clks[port])) { clk_disable_unprepare(hpriv->port_clks[port]); clk_put(hpriv->port_clks[port]); } } #endif return 0; } #ifdef CONFIG_PM static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state) { struct ata_host *host = platform_get_drvdata(pdev); if (host) return ata_host_suspend(host, state); else return 0; } static int mv_platform_resume(struct platform_device *pdev) { struct ata_host *host = platform_get_drvdata(pdev); const struct mbus_dram_target_info *dram; int ret; if (host) { struct mv_host_priv *hpriv = host->private_data; /* * (Re-)program MBUS remapping windows if we are asked to. */ dram = mv_mbus_dram_info(); if (dram) mv_conf_mbus_windows(hpriv, dram); /* initialize adapter */ ret = mv_init_host(host); if (ret) { printk(KERN_ERR DRV_NAME ": Error during HW init\n"); return ret; } ata_host_resume(host); } return 0; } #else #define mv_platform_suspend NULL #define mv_platform_resume NULL #endif #ifdef CONFIG_OF static struct of_device_id mv_sata_dt_ids[] = { { .compatible = "marvell,armada-370-sata", }, { .compatible = "marvell,orion-sata", }, {}, }; MODULE_DEVICE_TABLE(of, mv_sata_dt_ids); #endif static struct platform_driver mv_platform_driver = { .probe = mv_platform_probe, .remove = mv_platform_remove, .suspend = mv_platform_suspend, .resume = mv_platform_resume, .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .of_match_table = of_match_ptr(mv_sata_dt_ids), }, }; #ifdef CONFIG_PCI static int mv_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); #ifdef CONFIG_PM static int mv_pci_device_resume(struct pci_dev *pdev); #endif static struct pci_driver mv_pci_driver = { .name = DRV_NAME, .id_table = mv_pci_tbl, .probe = mv_pci_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM .suspend = ata_pci_device_suspend, .resume = mv_pci_device_resume, #endif }; /* move to PCI layer or libata core? */ static int pci_go_64(struct pci_dev *pdev) { int rc; if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (rc) { rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "64-bit DMA enable failed\n"); return rc; } } } else { rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit DMA enable failed\n"); return rc; } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "32-bit consistent DMA enable failed\n"); return rc; } } return rc; } /** * mv_print_info - Dump key info to kernel log for perusal. * @host: ATA host to print info about * * FIXME: complete this. * * LOCKING: * Inherited from caller. */ static void mv_print_info(struct ata_host *host) { struct pci_dev *pdev = to_pci_dev(host->dev); struct mv_host_priv *hpriv = host->private_data; u8 scc; const char *scc_s, *gen; /* Use this to determine the HW stepping of the chip so we know * what errata to workaround */ pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc); if (scc == 0) scc_s = "SCSI"; else if (scc == 0x01) scc_s = "RAID"; else scc_s = "?"; if (IS_GEN_I(hpriv)) gen = "I"; else if (IS_GEN_II(hpriv)) gen = "II"; else if (IS_GEN_IIE(hpriv)) gen = "IIE"; else gen = "?"; dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n", gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports, scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx"); } /** * mv_pci_init_one - handle a positive probe of a PCI Marvell host * @pdev: PCI device found * @ent: PCI device ID entry for the matched host * * LOCKING: * Inherited from caller. */ static int mv_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned int board_idx = (unsigned int)ent->driver_data; const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; struct ata_host *host; struct mv_host_priv *hpriv; int n_ports, port, rc; ata_print_version_once(&pdev->dev, DRV_VERSION); /* allocate host */ n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); if (!host || !hpriv) return -ENOMEM; host->private_data = hpriv; hpriv->n_ports = n_ports; hpriv->board_idx = board_idx; /* acquire resources */ rc = pcim_enable_device(pdev); if (rc) return rc; rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); hpriv->base = host->iomap[MV_PRIMARY_BAR]; rc = pci_go_64(pdev); if (rc) return rc; rc = mv_create_dma_pools(hpriv, &pdev->dev); if (rc) return rc; for (port = 0; port < host->n_ports; port++) { struct ata_port *ap = host->ports[port]; void __iomem *port_mmio = mv_port_base(hpriv->base, port); unsigned int offset = port_mmio - hpriv->base; ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); } /* initialize adapter */ rc = mv_init_host(host); if (rc) return rc; /* Enable message-switched interrupts, if requested */ if (msi && pci_enable_msi(pdev) == 0) hpriv->hp_flags |= MV_HP_FLAG_MSI; mv_dump_pci_cfg(pdev, 0x68); mv_print_info(host); pci_set_master(pdev); pci_try_set_mwi(pdev); return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED, IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht); } #ifdef CONFIG_PM static int mv_pci_device_resume(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* initialize adapter */ rc = mv_init_host(host); if (rc) return rc; ata_host_resume(host); return 0; } #endif #endif static int mv_platform_probe(struct platform_device *pdev); static int mv_platform_remove(struct platform_device *pdev); static int __init mv_init(void) { int rc = -ENODEV; #ifdef CONFIG_PCI rc = pci_register_driver(&mv_pci_driver); if (rc < 0) return rc; #endif rc = platform_driver_register(&mv_platform_driver); #ifdef CONFIG_PCI if (rc < 0) pci_unregister_driver(&mv_pci_driver); #endif return rc; } static void __exit mv_exit(void) { #ifdef CONFIG_PCI pci_unregister_driver(&mv_pci_driver); #endif platform_driver_unregister(&mv_platform_driver); } MODULE_AUTHOR("Brett Russ"); MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mv_pci_tbl); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME); module_init(mv_init); module_exit(mv_exit);
gpl-2.0
TEAM-RAZOR-DEVICES/kernel_cyanogen_msm8916
drivers/staging/tidspbridge/core/dsp-clock.c
1576
8878
/* * clk.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Clock and Timer services. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #define L4_34XX_BASE 0x48000000 #include <linux/types.h> /* ----------------------------------- Host OS */ #include <dspbridge/host_os.h> #include <plat/dmtimer.h> #include <linux/platform_data/asoc-ti-mcbsp.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> #include <dspbridge/drv.h> #include <dspbridge/dev.h> #include "_tiomap.h" /* ----------------------------------- This */ #include <dspbridge/clk.h> /* ----------------------------------- Defines, Data Structures, Typedefs */ #define OMAP_SSI_OFFSET 0x58000 #define OMAP_SSI_SIZE 0x1000 #define OMAP_SSI_SYSCONFIG_OFFSET 0x10 #define SSI_AUTOIDLE (1 << 0) #define SSI_SIDLE_SMARTIDLE (2 << 3) #define SSI_MIDLE_NOIDLE (1 << 12) /* Clk types requested by the dsp */ #define IVA2_CLK 0 #define GPT_CLK 1 #define WDT_CLK 2 #define MCBSP_CLK 3 #define SSI_CLK 4 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */ #define DMT_ID(id) ((id) + 4) #define DM_TIMER_CLOCKS 4 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */ #define MCBSP_ID(id) ((id) - 6) static struct omap_dm_timer *timer[4]; struct clk *iva2_clk; struct dsp_ssi { struct clk *sst_fck; struct clk *ssr_fck; struct clk *ick; }; static struct dsp_ssi ssi; static u32 dsp_clocks; static inline u32 is_dsp_clk_active(u32 clk, u8 id) { return clk & (1 << id); } static inline void set_dsp_clk_active(u32 *clk, u8 id) { *clk |= (1 << id); } static inline void set_dsp_clk_inactive(u32 *clk, u8 id) { *clk &= ~(1 << id); } static s8 get_clk_type(u8 id) { s8 type; if (id == DSP_CLK_IVA2) type = IVA2_CLK; else if (id <= DSP_CLK_GPT8) type = GPT_CLK; else if (id == DSP_CLK_WDT3) type = WDT_CLK; else if (id <= DSP_CLK_MCBSP5) type = MCBSP_CLK; else if (id == DSP_CLK_SSI) type = SSI_CLK; else type = -1; return type; } /* * ======== dsp_clk_exit ======== * Purpose: * Cleanup CLK module. */ void dsp_clk_exit(void) { int i; dsp_clock_disable_all(dsp_clocks); for (i = 0; i < DM_TIMER_CLOCKS; i++) omap_dm_timer_free(timer[i]); clk_unprepare(iva2_clk); clk_put(iva2_clk); clk_unprepare(ssi.sst_fck); clk_put(ssi.sst_fck); clk_unprepare(ssi.ssr_fck); clk_put(ssi.ssr_fck); clk_unprepare(ssi.ick); clk_put(ssi.ick); } /* * ======== dsp_clk_init ======== * Purpose: * Initialize CLK module. */ void dsp_clk_init(void) { static struct platform_device dspbridge_device; int i, id; dspbridge_device.dev.bus = &platform_bus_type; for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++) timer[i] = omap_dm_timer_request_specific(id); iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck"); if (IS_ERR(iva2_clk)) dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk); else clk_prepare(iva2_clk); ssi.sst_fck = clk_get(&dspbridge_device.dev, "ssi_sst_fck"); ssi.ssr_fck = clk_get(&dspbridge_device.dev, "ssi_ssr_fck"); ssi.ick = clk_get(&dspbridge_device.dev, "ssi_ick"); if (IS_ERR(ssi.sst_fck) || IS_ERR(ssi.ssr_fck) || IS_ERR(ssi.ick)) { dev_err(bridge, "failed to get ssi: sst %p, ssr %p, ick %p\n", ssi.sst_fck, ssi.ssr_fck, ssi.ick); } else { clk_prepare(ssi.sst_fck); clk_prepare(ssi.ssr_fck); clk_prepare(ssi.ick); } } /** * dsp_gpt_wait_overflow - set gpt overflow and wait for fixed timeout * @clk_id: GP Timer clock id. * @load: Overflow value. * * Sets an overflow interrupt for the desired GPT waiting for a timeout * of 5 msecs for the interrupt to occur. */ void dsp_gpt_wait_overflow(short int clk_id, unsigned int load) { struct omap_dm_timer *gpt = timer[clk_id - 1]; unsigned long timeout; if (!gpt) return; /* Enable overflow interrupt */ omap_dm_timer_set_int_enable(gpt, OMAP_TIMER_INT_OVERFLOW); /* * Set counter value to overflow counter after * one tick and start timer. */ omap_dm_timer_set_load_start(gpt, 0, load); /* Wait 80us for timer to overflow */ udelay(80); timeout = msecs_to_jiffies(5); /* Check interrupt status and wait for interrupt */ while (!(omap_dm_timer_read_status(gpt) & OMAP_TIMER_INT_OVERFLOW)) { if (time_is_after_jiffies(timeout)) { pr_err("%s: GPTimer interrupt failed\n", __func__); break; } } } /* * ======== dsp_clk_enable ======== * Purpose: * Enable Clock . * */ int dsp_clk_enable(enum dsp_clk_id clk_id) { int status = 0; if (is_dsp_clk_active(dsp_clocks, clk_id)) { dev_err(bridge, "WARN: clock id %d already enabled\n", clk_id); goto out; } switch (get_clk_type(clk_id)) { case IVA2_CLK: clk_enable(iva2_clk); break; case GPT_CLK: status = omap_dm_timer_start(timer[clk_id - 1]); break; #ifdef CONFIG_SND_OMAP_SOC_MCBSP case MCBSP_CLK: omap_mcbsp_request(MCBSP_ID(clk_id)); omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC); break; #endif case WDT_CLK: dev_err(bridge, "ERROR: DSP requested to enable WDT3 clk\n"); break; case SSI_CLK: clk_enable(ssi.sst_fck); clk_enable(ssi.ssr_fck); clk_enable(ssi.ick); /* * The SSI module need to configured not to have the Forced * idle for master interface. If it is set to forced idle, * the SSI module is transitioning to standby thereby causing * the client in the DSP hang waiting for the SSI module to * be active after enabling the clocks */ ssi_clk_prepare(true); break; default: dev_err(bridge, "Invalid clock id for enable\n"); status = -EPERM; } if (!status) set_dsp_clk_active(&dsp_clocks, clk_id); out: return status; } /** * dsp_clock_enable_all - Enable clocks used by the DSP * @dev_context Driver's device context strucure * * This function enables all the peripheral clocks that were requested by DSP. */ u32 dsp_clock_enable_all(u32 dsp_per_clocks) { u32 clk_id; u32 status = -EPERM; for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) { if (is_dsp_clk_active(dsp_per_clocks, clk_id)) status = dsp_clk_enable(clk_id); } return status; } /* * ======== dsp_clk_disable ======== * Purpose: * Disable the clock. * */ int dsp_clk_disable(enum dsp_clk_id clk_id) { int status = 0; if (!is_dsp_clk_active(dsp_clocks, clk_id)) { dev_err(bridge, "ERR: clock id %d already disabled\n", clk_id); goto out; } switch (get_clk_type(clk_id)) { case IVA2_CLK: clk_disable(iva2_clk); break; case GPT_CLK: status = omap_dm_timer_stop(timer[clk_id - 1]); break; #ifdef CONFIG_SND_OMAP_SOC_MCBSP case MCBSP_CLK: omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC); omap_mcbsp_free(MCBSP_ID(clk_id)); break; #endif case WDT_CLK: dev_err(bridge, "ERROR: DSP requested to disable WDT3 clk\n"); break; case SSI_CLK: ssi_clk_prepare(false); ssi_clk_prepare(false); clk_disable(ssi.sst_fck); clk_disable(ssi.ssr_fck); clk_disable(ssi.ick); break; default: dev_err(bridge, "Invalid clock id for disable\n"); status = -EPERM; } if (!status) set_dsp_clk_inactive(&dsp_clocks, clk_id); out: return status; } /** * dsp_clock_disable_all - Disable all active clocks * @dev_context Driver's device context structure * * This function disables all the peripheral clocks that were enabled by DSP. * It is meant to be called only when DSP is entering hibernation or when DSP * is in error state. */ u32 dsp_clock_disable_all(u32 dsp_per_clocks) { u32 clk_id; u32 status = -EPERM; for (clk_id = 0; clk_id < DSP_CLK_NOT_DEFINED; clk_id++) { if (is_dsp_clk_active(dsp_per_clocks, clk_id)) status = dsp_clk_disable(clk_id); } return status; } u32 dsp_clk_get_iva2_rate(void) { u32 clk_speed_khz; clk_speed_khz = clk_get_rate(iva2_clk); clk_speed_khz /= 1000; dev_dbg(bridge, "%s: clk speed Khz = %d\n", __func__, clk_speed_khz); return clk_speed_khz; } void ssi_clk_prepare(bool FLAG) { void __iomem *ssi_base; unsigned int value; ssi_base = ioremap(L4_34XX_BASE + OMAP_SSI_OFFSET, OMAP_SSI_SIZE); if (!ssi_base) { pr_err("%s: error, SSI not configured\n", __func__); return; } if (FLAG) { /* Set Autoidle, SIDLEMode to smart idle, and MIDLEmode to * no idle */ value = SSI_AUTOIDLE | SSI_SIDLE_SMARTIDLE | SSI_MIDLE_NOIDLE; } else { /* Set Autoidle, SIDLEMode to forced idle, and MIDLEmode to * forced idle */ value = SSI_AUTOIDLE; } __raw_writel(value, ssi_base + OMAP_SSI_SYSCONFIG_OFFSET); iounmap(ssi_base); }
gpl-2.0
jamison904/kernel_m919_aokp
drivers/gpio/gpiolib.c
1576
45843
#include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/err.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/idr.h> #include <linux/slab.h> #define CREATE_TRACE_POINTS #include <trace/events/gpio.h> /* Optional implementation infrastructure for GPIO interfaces. * * Platforms may want to use this if they tend to use very many GPIOs * that aren't part of a System-On-Chip core; or across I2C/SPI/etc. * * When kernel footprint or instruction count is an issue, simpler * implementations may be preferred. The GPIO programming interface * allows for inlining speed-critical get/set operations for common * cases, so that access to SOC-integrated GPIOs can sometimes cost * only an instruction or two per bit. */ /* When debugging, extend minimal trust to callers and platform code. * Also emit diagnostic messages that may help initial bringup, when * board setup or driver bugs are most common. * * Otherwise, minimize overhead in what may be bitbanging codepaths. */ #ifdef DEBUG #define extra_checks 1 #else #define extra_checks 0 #endif /* gpio_lock prevents conflicts during gpio_desc[] table updates. * While any GPIO is requested, its gpio_chip is not removable; * each GPIO's "requested" flag serves as a lock and refcount. */ static DEFINE_SPINLOCK(gpio_lock); struct gpio_desc { struct gpio_chip *chip; unsigned long flags; /* flag symbols are bit numbers */ #define FLAG_REQUESTED 0 #define FLAG_IS_OUT 1 #define FLAG_RESERVED 2 #define FLAG_EXPORT 3 /* protected by sysfs_lock */ #define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */ #define FLAG_TRIG_FALL 5 /* trigger on falling edge */ #define FLAG_TRIG_RISE 6 /* trigger on rising edge */ #define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */ #define FLAG_OPEN_DRAIN 8 /* Gpio is open drain type */ #define FLAG_OPEN_SOURCE 9 /* Gpio is open source type */ #define ID_SHIFT 16 /* add new flags before this one */ #define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1) #define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE)) #ifdef CONFIG_DEBUG_FS const char *label; #endif }; static struct gpio_desc gpio_desc[ARCH_NR_GPIOS]; #ifdef CONFIG_GPIO_SYSFS static DEFINE_IDR(dirent_idr); #endif static inline void desc_set_label(struct gpio_desc *d, const char *label) { #ifdef CONFIG_DEBUG_FS d->label = label; #endif } /* Warn when drivers omit gpio_request() calls -- legal but ill-advised * when setting direction, and otherwise illegal. Until board setup code * and drivers use explicit requests everywhere (which won't happen when * those calls have no teeth) we can't avoid autorequesting. This nag * message should motivate switching to explicit requests... so should * the weaker cleanup after faults, compared to gpio_request(). * * NOTE: the autorequest mechanism is going away; at this point it's * only "legal" in the sense that (old) code using it won't break yet, * but instead only triggers a WARN() stack dump. */ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset) { const struct gpio_chip *chip = desc->chip; const int gpio = chip->base + offset; if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0, "autorequest GPIO-%d\n", gpio)) { if (!try_module_get(chip->owner)) { pr_err("GPIO-%d: module can't be gotten \n", gpio); clear_bit(FLAG_REQUESTED, &desc->flags); /* lose */ return -EIO; } desc_set_label(desc, "[auto]"); /* caller must chip->request() w/o spinlock */ if (chip->request) return 1; } return 0; } /* caller holds gpio_lock *OR* gpio is marked as requested */ struct gpio_chip *gpio_to_chip(unsigned gpio) { return gpio_desc[gpio].chip; } /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ static int gpiochip_find_base(int ngpio) { int i; int spare = 0; int base = -ENOSPC; for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) { struct gpio_desc *desc = &gpio_desc[i]; struct gpio_chip *chip = desc->chip; if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) { spare++; if (spare == ngpio) { base = i; break; } } else { spare = 0; if (chip) i -= chip->ngpio - 1; } } if (gpio_is_valid(base)) pr_debug("%s: found new base at %d\n", __func__, base); return base; } /** * gpiochip_reserve() - reserve range of gpios to use with platform code only * @start: starting gpio number * @ngpio: number of gpios to reserve * Context: platform init, potentially before irqs or kmalloc will work * * Returns a negative errno if any gpio within the range is already reserved * or registered, else returns zero as a success code. Use this function * to mark a range of gpios as unavailable for dynamic gpio number allocation, * for example because its driver support is not yet loaded. */ int __init gpiochip_reserve(int start, int ngpio) { int ret = 0; unsigned long flags; int i; if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1)) return -EINVAL; spin_lock_irqsave(&gpio_lock, flags); for (i = start; i < start + ngpio; i++) { struct gpio_desc *desc = &gpio_desc[i]; if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) { ret = -EBUSY; goto err; } set_bit(FLAG_RESERVED, &desc->flags); } pr_debug("%s: reserved gpios from %d to %d\n", __func__, start, start + ngpio - 1); err: spin_unlock_irqrestore(&gpio_lock, flags); return ret; } #ifdef CONFIG_GPIO_SYSFS /* lock protects against unexport_gpio() being called while * sysfs files are active. */ static DEFINE_MUTEX(sysfs_lock); /* * /sys/class/gpio/gpioN... only for GPIOs that are exported * /direction * * MAY BE OMITTED if kernel won't allow direction changes * * is read/write as "in" or "out" * * may also be written as "high" or "low", initializing * output value as specified ("out" implies "low") * /value * * always readable, subject to hardware behavior * * may be writable, as zero/nonzero * /edge * * configures behavior of poll(2) on /value * * available only if pin can generate IRQs on input * * is read/write as "none", "falling", "rising", or "both" * /active_low * * configures polarity of /value * * is read/write as zero/nonzero * * also affects existing and subsequent "falling" and "rising" * /edge configuration */ static ssize_t gpio_direction_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%s\n", test_bit(FLAG_IS_OUT, &desc->flags) ? "out" : "in"); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_direction_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (sysfs_streq(buf, "high")) status = gpio_direction_output(gpio, 1); else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low")) status = gpio_direction_output(gpio, 0); else if (sysfs_streq(buf, "in")) status = gpio_direction_input(gpio); else status = -EINVAL; mutex_unlock(&sysfs_lock); return status ? : size; } static /* const */ DEVICE_ATTR(direction, 0644, gpio_direction_show, gpio_direction_store); static ssize_t gpio_value_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) { status = -EIO; } else { int value; value = !!gpio_get_value_cansleep(gpio); if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; status = sprintf(buf, "%d\n", value); } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_value_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { const struct gpio_desc *desc = dev_get_drvdata(dev); unsigned gpio = desc - gpio_desc; ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else if (!test_bit(FLAG_IS_OUT, &desc->flags)) status = -EPERM; else { long value; status = strict_strtol(buf, 0, &value); if (status == 0) { if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) value = !value; gpio_set_value_cansleep(gpio, value != 0); status = size; } } mutex_unlock(&sysfs_lock); return status; } static const DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) { struct sysfs_dirent *value_sd = priv; sysfs_notify_dirent(value_sd); return IRQ_HANDLED; } static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, unsigned long gpio_flags) { struct sysfs_dirent *value_sd; unsigned long irq_flags; int ret, irq, id; if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) return 0; irq = gpio_to_irq(desc - gpio_desc); if (irq < 0) return -EIO; id = desc->flags >> ID_SHIFT; value_sd = idr_find(&dirent_idr, id); if (value_sd) free_irq(irq, value_sd); desc->flags &= ~GPIO_TRIGGER_MASK; if (!gpio_flags) { ret = 0; goto free_id; } irq_flags = IRQF_SHARED; if (test_bit(FLAG_TRIG_FALL, &gpio_flags)) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING; if (test_bit(FLAG_TRIG_RISE, &gpio_flags)) irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING; if (!value_sd) { value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value"); if (!value_sd) { ret = -ENODEV; goto err_out; } do { ret = -ENOMEM; if (idr_pre_get(&dirent_idr, GFP_KERNEL)) ret = idr_get_new_above(&dirent_idr, value_sd, 1, &id); } while (ret == -EAGAIN); if (ret) goto free_sd; desc->flags &= GPIO_FLAGS_MASK; desc->flags |= (unsigned long)id << ID_SHIFT; if (desc->flags >> ID_SHIFT != id) { ret = -ERANGE; goto free_id; } } ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags, "gpiolib", value_sd); if (ret < 0) goto free_id; desc->flags |= gpio_flags; return 0; free_id: idr_remove(&dirent_idr, id); desc->flags &= GPIO_FLAGS_MASK; free_sd: if (value_sd) sysfs_put(value_sd); err_out: return ret; } static const struct { const char *name; unsigned long flags; } trigger_types[] = { { "none", 0 }, { "falling", BIT(FLAG_TRIG_FALL) }, { "rising", BIT(FLAG_TRIG_RISE) }, { "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) }, }; static ssize_t gpio_edge_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { int i; status = 0; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if ((desc->flags & GPIO_TRIGGER_MASK) == trigger_types[i].flags) { status = sprintf(buf, "%s\n", trigger_types[i].name); break; } } mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_edge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; int i; for (i = 0; i < ARRAY_SIZE(trigger_types); i++) if (sysfs_streq(trigger_types[i].name, buf)) goto found; return -EINVAL; found: mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else { status = gpio_setup_irq(desc, dev, trigger_types[i].flags); if (!status) status = size; } mutex_unlock(&sysfs_lock); return status; } static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store); static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, int value) { int status = 0; if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) return 0; if (value) set_bit(FLAG_ACTIVE_LOW, &desc->flags); else clear_bit(FLAG_ACTIVE_LOW, &desc->flags); /* reconfigure poll(2) support if enabled on one edge only */ if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; gpio_setup_irq(desc, dev, 0); status = gpio_setup_irq(desc, dev, trigger_flags); } return status; } static ssize_t gpio_active_low_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) status = -EIO; else status = sprintf(buf, "%d\n", !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); mutex_unlock(&sysfs_lock); return status; } static ssize_t gpio_active_low_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gpio_desc *desc = dev_get_drvdata(dev); ssize_t status; mutex_lock(&sysfs_lock); if (!test_bit(FLAG_EXPORT, &desc->flags)) { status = -EIO; } else { long value; status = strict_strtol(buf, 0, &value); if (status == 0) status = sysfs_set_active_low(desc, dev, value != 0); } mutex_unlock(&sysfs_lock); return status ? : size; } static const DEVICE_ATTR(active_low, 0644, gpio_active_low_show, gpio_active_low_store); static const struct attribute *gpio_attrs[] = { &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; static const struct attribute_group gpio_attr_group = { .attrs = (struct attribute **) gpio_attrs, }; /* * /sys/class/gpio/gpiochipN/ * /base ... matching gpio_chip.base (N) * /label ... matching gpio_chip.label * /ngpio ... matching gpio_chip.ngpio */ static ssize_t chip_base_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%d\n", chip->base); } static DEVICE_ATTR(base, 0444, chip_base_show, NULL); static ssize_t chip_label_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%s\n", chip->label ? : ""); } static DEVICE_ATTR(label, 0444, chip_label_show, NULL); static ssize_t chip_ngpio_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct gpio_chip *chip = dev_get_drvdata(dev); return sprintf(buf, "%u\n", chip->ngpio); } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); static const struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; static const struct attribute_group gpiochip_attr_group = { .attrs = (struct attribute **) gpiochip_attrs, }; /* * /sys/class/gpio/export ... write-only * integer N ... number of GPIO to export (full access) * /sys/class/gpio/unexport ... write-only * integer N ... number of GPIO to unexport */ static ssize_t export_store(struct class *class, struct class_attribute *attr, const char *buf, size_t len) { long gpio; int status; status = strict_strtol(buf, 0, &gpio); if (status < 0) goto done; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ status = gpio_request(gpio, "sysfs"); if (status < 0) goto done; status = gpio_export(gpio, true); if (status < 0) gpio_free(gpio); else set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags); done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static ssize_t unexport_store(struct class *class, struct class_attribute *attr, const char *buf, size_t len) { long gpio; int status; status = strict_strtol(buf, 0, &gpio); if (status < 0) goto done; status = -EINVAL; /* reject bogus commands (gpio_unexport ignores them) */ if (!gpio_is_valid(gpio)) goto done; /* No extra locking here; FLAG_SYSFS just signifies that the * request and export were done by on behalf of userspace, so * they may be undone on its behalf too. */ if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) { status = 0; gpio_free(gpio); } done: if (status) pr_debug("%s: status %d\n", __func__, status); return status ? : len; } static struct class_attribute gpio_class_attrs[] = { __ATTR(export, 0200, NULL, export_store), __ATTR(unexport, 0200, NULL, unexport_store), __ATTR_NULL, }; static struct class gpio_class = { .name = "gpio", .owner = THIS_MODULE, .class_attrs = gpio_class_attrs, }; /** * gpio_export - export a GPIO through sysfs * @gpio: gpio to make available, already requested * @direction_may_change: true if userspace may change gpio direction * Context: arch_initcall or later * * When drivers want to make a GPIO accessible to userspace after they * have requested it -- perhaps while debugging, or as part of their * public interface -- they may use this routine. If the GPIO can * change direction (some can't) and the caller allows it, userspace * will see "direction" sysfs attribute which may be used to change * the gpio's direction. A "value" attribute will always be provided. * * Returns zero on success, else an error. */ int gpio_export(unsigned gpio, bool direction_may_change) { unsigned long flags; struct gpio_desc *desc; int status = -EINVAL; const char *ioname = NULL; /* can't export until sysfs is available ... */ if (!gpio_class.p) { pr_debug("%s: called too early!\n", __func__); return -ENOENT; } if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); spin_lock_irqsave(&gpio_lock, flags); desc = &gpio_desc[gpio]; if (test_bit(FLAG_REQUESTED, &desc->flags) && !test_bit(FLAG_EXPORT, &desc->flags)) { status = 0; if (!desc->chip->direction_input || !desc->chip->direction_output) direction_may_change = false; } spin_unlock_irqrestore(&gpio_lock, flags); if (desc->chip->names && desc->chip->names[gpio - desc->chip->base]) ioname = desc->chip->names[gpio - desc->chip->base]; if (status == 0) { struct device *dev; dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), desc, ioname ? ioname : "gpio%u", gpio); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpio_attr_group); if (!status && direction_may_change) status = device_create_file(dev, &dev_attr_direction); if (!status && gpio_to_irq(gpio) >= 0 && (direction_may_change || !test_bit(FLAG_IS_OUT, &desc->flags))) status = device_create_file(dev, &dev_attr_edge); if (status != 0) device_unregister(dev); } else status = PTR_ERR(dev); if (status == 0) set_bit(FLAG_EXPORT, &desc->flags); } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_export); static int match_export(struct device *dev, void *data) { return dev_get_drvdata(dev) == data; } /** * gpio_export_link - create a sysfs link to an exported GPIO node * @dev: device under which to create symlink * @name: name of the symlink * @gpio: gpio to create symlink to, already exported * * Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN * node. Caller is responsible for unlinking. * * Returns zero on success, else an error. */ int gpio_export_link(struct device *dev, const char *name, unsigned gpio) { struct gpio_desc *desc; int status = -EINVAL; if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { struct device *tdev; tdev = class_find_device(&gpio_class, NULL, desc, match_export); if (tdev != NULL) { status = sysfs_create_link(&dev->kobj, &tdev->kobj, name); } else { status = -ENODEV; } } mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_export_link); /** * gpio_sysfs_set_active_low - set the polarity of gpio sysfs value * @gpio: gpio to change * @value: non-zero to use active low, i.e. inverted values * * Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute. * The GPIO does not have to be exported yet. If poll(2) support has * been enabled for either rising or falling edge, it will be * reconfigured to follow the new polarity. * * Returns zero on success, else an error. */ int gpio_sysfs_set_active_low(unsigned gpio, int value) { struct gpio_desc *desc; struct device *dev = NULL; int status = -EINVAL; if (!gpio_is_valid(gpio)) goto done; mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev == NULL) { status = -ENODEV; goto unlock; } } status = sysfs_set_active_low(desc, dev, value); unlock: mutex_unlock(&sysfs_lock); done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low); /** * gpio_unexport - reverse effect of gpio_export() * @gpio: gpio to make unavailable * * This is implicit on gpio_free(). */ void gpio_unexport(unsigned gpio) { struct gpio_desc *desc; int status = 0; struct device *dev = NULL; if (!gpio_is_valid(gpio)) { status = -EINVAL; goto done; } mutex_lock(&sysfs_lock); desc = &gpio_desc[gpio]; if (test_bit(FLAG_EXPORT, &desc->flags)) { dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev) { gpio_setup_irq(desc, dev, 0); clear_bit(FLAG_EXPORT, &desc->flags); } else status = -ENODEV; } mutex_unlock(&sysfs_lock); if (dev) { device_unregister(dev); put_device(dev); } done: if (status) pr_debug("%s: gpio%d status %d\n", __func__, gpio, status); } EXPORT_SYMBOL_GPL(gpio_unexport); static int gpiochip_export(struct gpio_chip *chip) { int status; struct device *dev; /* Many systems register gpio chips for SOC support very early, * before driver model support is available. In those cases we * export this later, in gpiolib_sysfs_init() ... here we just * verify that _some_ field of gpio_class got initialized. */ if (!gpio_class.p) return 0; /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, "gpiochip%d", chip->base); if (!IS_ERR(dev)) { status = sysfs_create_group(&dev->kobj, &gpiochip_attr_group); } else status = PTR_ERR(dev); chip->exported = (status == 0); mutex_unlock(&sysfs_lock); if (status) { unsigned long flags; unsigned gpio; spin_lock_irqsave(&gpio_lock, flags); gpio = chip->base; while (gpio_desc[gpio].chip == chip) gpio_desc[gpio++].chip = NULL; spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: chip %s status %d\n", __func__, chip->label, status); } return status; } static void gpiochip_unexport(struct gpio_chip *chip) { int status; struct device *dev; mutex_lock(&sysfs_lock); dev = class_find_device(&gpio_class, NULL, chip, match_export); if (dev) { put_device(dev); device_unregister(dev); chip->exported = 0; status = 0; } else status = -ENODEV; mutex_unlock(&sysfs_lock); if (status) pr_debug("%s: chip %s status %d\n", __func__, chip->label, status); } static int __init gpiolib_sysfs_init(void) { int status; unsigned long flags; unsigned gpio; status = class_register(&gpio_class); if (status < 0) return status; /* Scan and register the gpio_chips which registered very * early (e.g. before the class_register above was called). * * We run before arch_initcall() so chip->dev nodes can have * registered, and so arch_initcall() can always gpio_export(). */ spin_lock_irqsave(&gpio_lock, flags); for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) { struct gpio_chip *chip; chip = gpio_desc[gpio].chip; if (!chip || chip->exported) continue; spin_unlock_irqrestore(&gpio_lock, flags); status = gpiochip_export(chip); spin_lock_irqsave(&gpio_lock, flags); } spin_unlock_irqrestore(&gpio_lock, flags); return status; } postcore_initcall(gpiolib_sysfs_init); #else static inline int gpiochip_export(struct gpio_chip *chip) { return 0; } static inline void gpiochip_unexport(struct gpio_chip *chip) { } #endif /* CONFIG_GPIO_SYSFS */ /** * gpiochip_add() - register a gpio_chip * @chip: the chip to register, with chip->base initialized * Context: potentially before irqs or kmalloc will work * * Returns a negative errno if the chip can't be registered, such as * because the chip->base is invalid or already associated with a * different chip. Otherwise it returns zero as a success code. * * When gpiochip_add() is called very early during boot, so that GPIOs * can be freely used, the chip->dev device must be registered before * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * * If chip->base is negative, this requests dynamic assignment of * a range of valid GPIOs. */ int gpiochip_add(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; int base = chip->base; if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1)) && base >= 0) { status = -EINVAL; goto fail; } spin_lock_irqsave(&gpio_lock, flags); if (base < 0) { base = gpiochip_find_base(chip->ngpio); if (base < 0) { status = base; goto unlock; } chip->base = base; } /* these GPIO numbers must not be managed by another gpio_chip */ for (id = base; id < base + chip->ngpio; id++) { if (gpio_desc[id].chip != NULL) { status = -EBUSY; break; } } if (status == 0) { for (id = base; id < base + chip->ngpio; id++) { gpio_desc[id].chip = chip; /* REVISIT: most hardware initializes GPIOs as * inputs (often with pullups enabled) so power * usage is minimized. Linux code should set the * gpio direction first thing; but until it does, * we may expose the wrong direction in sysfs. */ gpio_desc[id].flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; } } of_gpiochip_add(chip); unlock: spin_unlock_irqrestore(&gpio_lock, flags); if (status) goto fail; status = gpiochip_export(chip); if (status) goto fail; pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n", chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return 0; fail: /* failures here can mean systems won't boot... */ pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n", chip->base, chip->base + chip->ngpio - 1, chip->label ? : "generic"); return status; } EXPORT_SYMBOL_GPL(gpiochip_add); /** * gpiochip_remove() - unregister a gpio_chip * @chip: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ int gpiochip_remove(struct gpio_chip *chip) { unsigned long flags; int status = 0; unsigned id; spin_lock_irqsave(&gpio_lock, flags); of_gpiochip_remove(chip); for (id = chip->base; id < chip->base + chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) { status = -EBUSY; break; } } if (status == 0) { for (id = chip->base; id < chip->base + chip->ngpio; id++) gpio_desc[id].chip = NULL; } spin_unlock_irqrestore(&gpio_lock, flags); if (status == 0) gpiochip_unexport(chip); return status; } EXPORT_SYMBOL_GPL(gpiochip_remove); /** * gpiochip_find() - iterator for locating a specific gpio_chip * @data: data to pass to match function * @callback: Callback function to check gpio_chip * * Similar to bus_find_device. It returns a reference to a gpio_chip as * determined by a user supplied @match callback. The callback should return * 0 if the device doesn't match and non-zero if it does. If the callback is * non-zero, this function will return to the caller and not iterate over any * more gpio_chips. */ struct gpio_chip *gpiochip_find(const void *data, int (*match)(struct gpio_chip *chip, const void *data)) { struct gpio_chip *chip = NULL; unsigned long flags; int i; spin_lock_irqsave(&gpio_lock, flags); for (i = 0; i < ARCH_NR_GPIOS; i++) { if (!gpio_desc[i].chip) continue; if (match(gpio_desc[i].chip, data)) { chip = gpio_desc[i].chip; break; } } spin_unlock_irqrestore(&gpio_lock, flags); return chip; } EXPORT_SYMBOL_GPL(gpiochip_find); /* These "optional" allocation calls help prevent drivers from stomping * on each other, and help provide better diagnostics in debugfs. * They're called even less than the "set direction" calls. */ int gpio_request(unsigned gpio, const char *label) { struct gpio_desc *desc; struct gpio_chip *chip; int status = -EINVAL; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto done; desc = &gpio_desc[gpio]; chip = desc->chip; if (chip == NULL) goto done; if (!try_module_get(chip->owner)) goto done; /* NOTE: gpio_request() can be called in early boot, * before IRQs are enabled, for non-sleeping (SOC) GPIOs. */ if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { desc_set_label(desc, label ? : "?"); status = 0; } else { status = -EBUSY; module_put(chip->owner); goto done; } if (chip->request) { /* chip->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); status = chip->request(chip, gpio - chip->base); spin_lock_irqsave(&gpio_lock, flags); if (status < 0) { desc_set_label(desc, NULL); module_put(chip->owner); clear_bit(FLAG_REQUESTED, &desc->flags); } } done: if (status) pr_debug("gpio_request: gpio-%d (%s) status %d\n", gpio, label ? : "?", status); spin_unlock_irqrestore(&gpio_lock, flags); return status; } EXPORT_SYMBOL_GPL(gpio_request); void gpio_free(unsigned gpio) { unsigned long flags; struct gpio_desc *desc; struct gpio_chip *chip; might_sleep(); if (!gpio_is_valid(gpio)) { WARN_ON(extra_checks); return; } gpio_unexport(gpio); spin_lock_irqsave(&gpio_lock, flags); desc = &gpio_desc[gpio]; chip = desc->chip; if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { if (chip->free) { spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); chip->free(chip, gpio - chip->base); spin_lock_irqsave(&gpio_lock, flags); } desc_set_label(desc, NULL); module_put(desc->chip->owner); clear_bit(FLAG_ACTIVE_LOW, &desc->flags); clear_bit(FLAG_REQUESTED, &desc->flags); clear_bit(FLAG_OPEN_DRAIN, &desc->flags); clear_bit(FLAG_OPEN_SOURCE, &desc->flags); } else WARN_ON(extra_checks); spin_unlock_irqrestore(&gpio_lock, flags); } EXPORT_SYMBOL_GPL(gpio_free); /** * gpio_request_one - request a single GPIO with initial configuration * @gpio: the GPIO number * @flags: GPIO configuration as specified by GPIOF_* * @label: a literal description string of this GPIO */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) { int err; err = gpio_request(gpio, label); if (err) return err; if (flags & GPIOF_OPEN_DRAIN) set_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags); if (flags & GPIOF_OPEN_SOURCE) set_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags); if (flags & GPIOF_DIR_IN) err = gpio_direction_input(gpio); else err = gpio_direction_output(gpio, (flags & GPIOF_INIT_HIGH) ? 1 : 0); if (err) gpio_free(gpio); return err; } EXPORT_SYMBOL_GPL(gpio_request_one); /** * gpio_request_array - request multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ int gpio_request_array(const struct gpio *array, size_t num) { int i, err; for (i = 0; i < num; i++, array++) { err = gpio_request_one(array->gpio, array->flags, array->label); if (err) goto err_free; } return 0; err_free: while (i--) gpio_free((--array)->gpio); return err; } EXPORT_SYMBOL_GPL(gpio_request_array); /** * gpio_free_array - release multiple GPIOs in a single call * @array: array of the 'struct gpio' * @num: how many GPIOs in the array */ void gpio_free_array(const struct gpio *array, size_t num) { while (num--) gpio_free((array++)->gpio); } EXPORT_SYMBOL_GPL(gpio_free_array); /** * gpiochip_is_requested - return string iff signal was requested * @chip: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. * If debugfs support is enabled, the string returned is the label passed * to gpio_request(); otherwise it is a meaningless constant. * * This function is for use by GPIO controller drivers. The label can * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) { unsigned gpio = chip->base + offset; if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip) return NULL; if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0) return NULL; #ifdef CONFIG_DEBUG_FS return gpio_desc[gpio].label; #else return "?"; #endif } EXPORT_SYMBOL_GPL(gpiochip_is_requested); /* Drivers MUST set GPIO direction before making get/set calls. In * some cases this is done in early boot, before IRQs are enabled. * * As a rule these aren't called more than once (except for drivers * using the open-drain emulation idiom) so these are natural places * to accumulate extra debugging checks. Note that we can't (yet) * rely on gpio_request() having been called beforehand. */ int gpio_direction_input(unsigned gpio) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->get || !chip->direction_input) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); if (status) { status = chip->request(chip, gpio); if (status < 0) { pr_debug("GPIO-%d: chip request fail, %d\n", chip->base + gpio, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_input(chip, gpio); if (status == 0) clear_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_direction(chip->base + gpio, 1, status); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_direction_input); int gpio_direction_output(unsigned gpio, int value) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; /* Open drain pin should not be driven to 1 */ if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags)) return gpio_direction_input(gpio); /* Open source pin should not be driven to 0 */ if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags)) return gpio_direction_input(gpio); spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->set || !chip->direction_output) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); if (status) { status = chip->request(chip, gpio); if (status < 0) { pr_debug("GPIO-%d: chip request fail, %d\n", chip->base + gpio, status); /* and it's not available to anyone else ... * gpio_request() is the fully clean solution. */ goto lose; } } status = chip->direction_output(chip, gpio, value); if (status == 0) set_bit(FLAG_IS_OUT, &desc->flags); trace_gpio_value(chip->base + gpio, 0, value); trace_gpio_direction(chip->base + gpio, 0, status); lose: return status; fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_direction_output); /** * gpio_set_debounce - sets @debounce time for a @gpio * @gpio: the gpio to set debounce time * @debounce: debounce time is microseconds */ int gpio_set_debounce(unsigned gpio, unsigned debounce) { unsigned long flags; struct gpio_chip *chip; struct gpio_desc *desc = &gpio_desc[gpio]; int status = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); if (!gpio_is_valid(gpio)) goto fail; chip = desc->chip; if (!chip || !chip->set || !chip->set_debounce) goto fail; gpio -= chip->base; if (gpio >= chip->ngpio) goto fail; status = gpio_ensure_requested(desc, gpio); if (status < 0) goto fail; /* now we know the gpio is valid and chip won't vanish */ spin_unlock_irqrestore(&gpio_lock, flags); might_sleep_if(chip->can_sleep); return chip->set_debounce(chip, gpio, debounce); fail: spin_unlock_irqrestore(&gpio_lock, flags); if (status) pr_debug("%s: gpio-%d status %d\n", __func__, gpio, status); return status; } EXPORT_SYMBOL_GPL(gpio_set_debounce); /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * * "Get" operations are often inlinable as reading a pin value register, * and masking the relevant bit in that register. * * When "set" operations are inlinable, they involve writing that mask to * one register to set a low value, or a different register to set it high. * Otherwise locking is needed, so there may be little value to inlining. * *------------------------------------------------------------------------ * * IMPORTANT!!! The hot paths -- get/set value -- assume that callers * have requested the GPIO. That can include implicit requesting by * a direction setting call. Marking a gpio as requested locks its chip * in memory, guaranteeing that these table lookups need no more locking * and that gpiochip_remove() will fail. * * REVISIT when debugging, consider adding some instrumentation to ensure * that the GPIO was actually requested. */ /** * __gpio_get_value() - return a gpio's value * @gpio: gpio whose value will be returned * Context: any * * This is used directly or indirectly to implement gpio_get_value(). * It returns the zero or nonzero value provided by the associated * gpio_chip.get() method; or zero if no such method is provided. */ int __gpio_get_value(unsigned gpio) { struct gpio_chip *chip; int value; chip = gpio_to_chip(gpio); /* Should be using gpio_get_value_cansleep() */ WARN_ON(chip->can_sleep); value = chip->get ? chip->get(chip, gpio - chip->base) : 0; trace_gpio_value(gpio, 1, value); return value; } EXPORT_SYMBOL_GPL(__gpio_get_value); /* * _gpio_set_open_drain_value() - Set the open drain gpio's value. * @gpio: Gpio whose state need to be set. * @chip: Gpio chip. * @value: Non-zero for setting it HIGH otherise it will set to LOW. */ static void _gpio_set_open_drain_value(unsigned gpio, struct gpio_chip *chip, int value) { int err = 0; if (value) { err = chip->direction_input(chip, gpio - chip->base); if (!err) clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags); } else { err = chip->direction_output(chip, gpio - chip->base, 0); if (!err) set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags); } trace_gpio_direction(gpio, value, err); if (err < 0) pr_err("%s: Error in set_value for open drain gpio%d err %d\n", __func__, gpio, err); } /* * _gpio_set_open_source() - Set the open source gpio's value. * @gpio: Gpio whose state need to be set. * @chip: Gpio chip. * @value: Non-zero for setting it HIGH otherise it will set to LOW. */ static void _gpio_set_open_source_value(unsigned gpio, struct gpio_chip *chip, int value) { int err = 0; if (value) { err = chip->direction_output(chip, gpio - chip->base, 1); if (!err) set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags); } else { err = chip->direction_input(chip, gpio - chip->base); if (!err) clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags); } trace_gpio_direction(gpio, !value, err); if (err < 0) pr_err("%s: Error in set_value for open source gpio%d err %d\n", __func__, gpio, err); } /** * __gpio_set_value() - assign a gpio's value * @gpio: gpio whose value will be assigned * @value: value to assign * Context: any * * This is used directly or indirectly to implement gpio_set_value(). * It invokes the associated gpio_chip.set() method. */ void __gpio_set_value(unsigned gpio, int value) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); /* Should be using gpio_set_value_cansleep() */ WARN_ON(chip->can_sleep); trace_gpio_value(gpio, 0, value); if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags)) _gpio_set_open_drain_value(gpio, chip, value); else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags)) _gpio_set_open_source_value(gpio, chip, value); else chip->set(chip, gpio - chip->base, value); } EXPORT_SYMBOL_GPL(__gpio_set_value); /** * __gpio_cansleep() - report whether gpio value access will sleep * @gpio: gpio in question * Context: any * * This is used directly or indirectly to implement gpio_cansleep(). It * returns nonzero if access reading or writing the GPIO value can sleep. */ int __gpio_cansleep(unsigned gpio) { struct gpio_chip *chip; /* only call this on GPIOs that are valid! */ chip = gpio_to_chip(gpio); return chip->can_sleep; } EXPORT_SYMBOL_GPL(__gpio_cansleep); /** * __gpio_to_irq() - return the IRQ corresponding to a GPIO * @gpio: gpio whose IRQ will be returned (already requested) * Context: any * * This is used directly or indirectly to implement gpio_to_irq(). * It returns the number of the IRQ signaled by this (input) GPIO, * or a negative errno. */ int __gpio_to_irq(unsigned gpio) { struct gpio_chip *chip; chip = gpio_to_chip(gpio); return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO; } EXPORT_SYMBOL_GPL(__gpio_to_irq); /* There's no value in making it easy to inline GPIO calls that may sleep. * Common examples include ones connected to I2C or SPI chips. */ int gpio_get_value_cansleep(unsigned gpio) { struct gpio_chip *chip; int value; might_sleep_if(extra_checks); chip = gpio_to_chip(gpio); value = chip->get ? chip->get(chip, gpio - chip->base) : 0; trace_gpio_value(gpio, 1, value); return value; } EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); void gpio_set_value_cansleep(unsigned gpio, int value) { struct gpio_chip *chip; might_sleep_if(extra_checks); chip = gpio_to_chip(gpio); trace_gpio_value(gpio, 0, value); if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags)) _gpio_set_open_drain_value(gpio, chip, value); else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags)) _gpio_set_open_source_value(gpio, chip, value); else chip->set(chip, gpio - chip->base, value); } EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); #ifdef CONFIG_DEBUG_FS static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip) { unsigned i; unsigned gpio = chip->base; struct gpio_desc *gdesc = &gpio_desc[gpio]; int is_out; for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) { if (!test_bit(FLAG_REQUESTED, &gdesc->flags)) continue; is_out = test_bit(FLAG_IS_OUT, &gdesc->flags); seq_printf(s, " gpio-%-3d (%-20.20s) %s %s", gpio, gdesc->label, is_out ? "out" : "in ", chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? "); seq_printf(s, "\n"); } } static int gpiolib_show(struct seq_file *s, void *unused) { struct gpio_chip *chip = NULL; unsigned gpio; int started = 0; /* REVISIT this isn't locked against gpio_chip removal ... */ for (gpio = 0; gpio_is_valid(gpio); gpio++) { struct device *dev; if (chip == gpio_desc[gpio].chip) continue; chip = gpio_desc[gpio].chip; if (!chip) continue; seq_printf(s, "%sGPIOs %d-%d", started ? "\n" : "", chip->base, chip->base + chip->ngpio - 1); dev = chip->dev; if (dev) seq_printf(s, ", %s/%s", dev->bus ? dev->bus->name : "no-bus", dev_name(dev)); if (chip->label) seq_printf(s, ", %s", chip->label); if (chip->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); started = 1; if (chip->dbg_show) chip->dbg_show(s, chip); else gpiolib_dbg_show(s, chip); } return 0; } static int gpiolib_open(struct inode *inode, struct file *file) { return single_open(file, gpiolib_show, NULL); } static const struct file_operations gpiolib_operations = { .open = gpiolib_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init gpiolib_debugfs_init(void) { /* /sys/kernel/debug/gpio */ (void) debugfs_create_file("gpio", S_IFREG | S_IRUGO, NULL, NULL, &gpiolib_operations); return 0; } subsys_initcall(gpiolib_debugfs_init); #endif /* DEBUG_FS */
gpl-2.0
opensgn/N8000
net/mac80211/cfg.c
1832
55507
/* * mac80211 configuration hooks for cfg80211 * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * * This file is GPLv2 as found in COPYING. */ #include <linux/ieee80211.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <linux/rcupdate.h> #include <net/cfg80211.h> #include "ieee80211_i.h" #include "driver-ops.h" #include "cfg.h" #include "rate.h" #include "mesh.h" static struct net_device *ieee80211_add_iface(struct wiphy *wiphy, char *name, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct net_device *dev; struct ieee80211_sub_if_data *sdata; int err; err = ieee80211_if_add(local, name, &dev, type, params); if (err) return ERR_PTR(err); if (type == NL80211_IFTYPE_MONITOR && flags) { sdata = IEEE80211_DEV_TO_SUB_IF(dev); sdata->u.mntr_flags = *flags; } return dev; } static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev) { ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev)); return 0; } static int ieee80211_change_iface(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); int ret; ret = ieee80211_if_change_type(sdata, type); if (ret) return ret; if (type == NL80211_IFTYPE_AP_VLAN && params && params->use_4addr == 0) rcu_assign_pointer(sdata->u.vlan.sta, NULL); else if (type == NL80211_IFTYPE_STATION && params && params->use_4addr >= 0) sdata->u.mgd.use_4addr = params->use_4addr; if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) { struct ieee80211_local *local = sdata->local; if (ieee80211_sdata_running(sdata)) { /* * Prohibit MONITOR_FLAG_COOK_FRAMES to be * changed while the interface is up. * Else we would need to add a lot of cruft * to update everything: * cooked_mntrs, monitor and all fif_* counters * reconfigure hardware */ if ((*flags & MONITOR_FLAG_COOK_FRAMES) != (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)) return -EBUSY; ieee80211_adjust_monitor_flags(sdata, -1); sdata->u.mntr_flags = *flags; ieee80211_adjust_monitor_flags(sdata, 1); ieee80211_configure_filter(local); } else { /* * Because the interface is down, ieee80211_do_stop * and ieee80211_do_open take care of "everything" * mentioned in the comment above. */ sdata->u.mntr_flags = *flags; } } return 0; } static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta = NULL; struct ieee80211_key *key; int err; if (!ieee80211_sdata_running(sdata)) return -ENETDOWN; /* reject WEP and TKIP keys if WEP failed to initialize */ switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_WEP104: if (IS_ERR(sdata->local->wep_tx_tfm)) return -EINVAL; break; default: break; } key = ieee80211_key_alloc(params->cipher, key_idx, params->key_len, params->key, params->seq_len, params->seq); if (IS_ERR(key)) return PTR_ERR(key); if (pairwise) key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE; mutex_lock(&sdata->local->sta_mtx); if (mac_addr) { if (ieee80211_vif_is_mesh(&sdata->vif)) sta = sta_info_get(sdata, mac_addr); else sta = sta_info_get_bss(sdata, mac_addr); if (!sta) { ieee80211_key_free(sdata->local, key); err = -ENOENT; goto out_unlock; } } err = ieee80211_key_link(key, sdata, sta); if (err) ieee80211_key_free(sdata->local, key); out_unlock: mutex_unlock(&sdata->local->sta_mtx); return err; } static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sta_info *sta; struct ieee80211_key *key = NULL; int ret; mutex_lock(&local->sta_mtx); mutex_lock(&local->key_mtx); if (mac_addr) { ret = -ENOENT; sta = sta_info_get_bss(sdata, mac_addr); if (!sta) goto out_unlock; if (pairwise) key = key_mtx_dereference(local, sta->ptk); else key = key_mtx_dereference(local, sta->gtk[key_idx]); } else key = key_mtx_dereference(local, sdata->keys[key_idx]); if (!key) { ret = -ENOENT; goto out_unlock; } __ieee80211_key_free(key); ret = 0; out_unlock: mutex_unlock(&local->key_mtx); mutex_unlock(&local->sta_mtx); return ret; } static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params *params)) { struct ieee80211_sub_if_data *sdata; struct sta_info *sta = NULL; u8 seq[6] = {0}; struct key_params params; struct ieee80211_key *key = NULL; u32 iv32; u16 iv16; int err = -ENOENT; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); if (mac_addr) { sta = sta_info_get_bss(sdata, mac_addr); if (!sta) goto out; if (pairwise) key = rcu_dereference(sta->ptk); else if (key_idx < NUM_DEFAULT_KEYS) key = rcu_dereference(sta->gtk[key_idx]); } else key = rcu_dereference(sdata->keys[key_idx]); if (!key) goto out; memset(&params, 0, sizeof(params)); params.cipher = key->conf.cipher; switch (key->conf.cipher) { case WLAN_CIPHER_SUITE_TKIP: iv32 = key->u.tkip.tx.iv32; iv16 = key->u.tkip.tx.iv16; if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) drv_get_tkip_seq(sdata->local, key->conf.hw_key_idx, &iv32, &iv16); seq[0] = iv16 & 0xff; seq[1] = (iv16 >> 8) & 0xff; seq[2] = iv32 & 0xff; seq[3] = (iv32 >> 8) & 0xff; seq[4] = (iv32 >> 16) & 0xff; seq[5] = (iv32 >> 24) & 0xff; params.seq = seq; params.seq_len = 6; break; case WLAN_CIPHER_SUITE_CCMP: seq[0] = key->u.ccmp.tx_pn[5]; seq[1] = key->u.ccmp.tx_pn[4]; seq[2] = key->u.ccmp.tx_pn[3]; seq[3] = key->u.ccmp.tx_pn[2]; seq[4] = key->u.ccmp.tx_pn[1]; seq[5] = key->u.ccmp.tx_pn[0]; params.seq = seq; params.seq_len = 6; break; case WLAN_CIPHER_SUITE_AES_CMAC: seq[0] = key->u.aes_cmac.tx_pn[5]; seq[1] = key->u.aes_cmac.tx_pn[4]; seq[2] = key->u.aes_cmac.tx_pn[3]; seq[3] = key->u.aes_cmac.tx_pn[2]; seq[4] = key->u.aes_cmac.tx_pn[1]; seq[5] = key->u.aes_cmac.tx_pn[0]; params.seq = seq; params.seq_len = 6; break; } params.key = key->conf.key; params.key_len = key->conf.keylen; callback(cookie, &params); err = 0; out: rcu_read_unlock(); return err; } static int ieee80211_config_default_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx, bool uni, bool multi) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_set_default_key(sdata, key_idx, uni, multi); return 0; } static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy, struct net_device *dev, u8 key_idx) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_set_default_mgmt_key(sdata, key_idx); return 0; } static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, int idx) { if (!(rate->flags & RATE_INFO_FLAGS_MCS)) { struct ieee80211_supported_band *sband; sband = sta->local->hw.wiphy->bands[ sta->local->hw.conf.channel->band]; rate->legacy = sband->bitrates[idx].bitrate; } else rate->mcs = idx; } static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = sta->sdata; struct timespec uptime; sinfo->generation = sdata->local->sta_generation; sinfo->filled = STATION_INFO_INACTIVE_TIME | STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS | STATION_INFO_TX_RETRIES | STATION_INFO_TX_FAILED | STATION_INFO_TX_BITRATE | STATION_INFO_RX_BITRATE | STATION_INFO_RX_DROP_MISC | STATION_INFO_BSS_PARAM | STATION_INFO_CONNECTED_TIME; do_posix_clock_monotonic_gettime(&uptime); sinfo->connected_time = uptime.tv_sec - sta->last_connected; sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); sinfo->rx_bytes = sta->rx_bytes; sinfo->tx_bytes = sta->tx_bytes; sinfo->rx_packets = sta->rx_packets; sinfo->tx_packets = sta->tx_packets; sinfo->tx_retries = sta->tx_retry_count; sinfo->tx_failed = sta->tx_retry_failed; sinfo->rx_dropped_misc = sta->rx_dropped; if ((sta->local->hw.flags & IEEE80211_HW_SIGNAL_DBM) || (sta->local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)) { sinfo->filled |= STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG; sinfo->signal = (s8)sta->last_signal; sinfo->signal_avg = (s8) -ewma_read(&sta->avg_signal); } sinfo->txrate.flags = 0; if (sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS) sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; if (sta->last_tx_rate.flags & IEEE80211_TX_RC_40_MHZ_WIDTH) sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; if (sta->last_tx_rate.flags & IEEE80211_TX_RC_SHORT_GI) sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; rate_idx_to_bitrate(&sinfo->txrate, sta, sta->last_tx_rate.idx); sinfo->rxrate.flags = 0; if (sta->last_rx_rate_flag & RX_FLAG_HT) sinfo->rxrate.flags |= RATE_INFO_FLAGS_MCS; if (sta->last_rx_rate_flag & RX_FLAG_40MHZ) sinfo->rxrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI) sinfo->rxrate.flags |= RATE_INFO_FLAGS_SHORT_GI; rate_idx_to_bitrate(&sinfo->rxrate, sta, sta->last_rx_rate_idx); if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CONFIG_MAC80211_MESH sinfo->filled |= STATION_INFO_LLID | STATION_INFO_PLID | STATION_INFO_PLINK_STATE; sinfo->llid = le16_to_cpu(sta->llid); sinfo->plid = le16_to_cpu(sta->plid); sinfo->plink_state = sta->plink_state; #endif } sinfo->bss_param.flags = 0; if (sdata->vif.bss_conf.use_cts_prot) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT; if (sdata->vif.bss_conf.use_short_preamble) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; if (sdata->vif.bss_conf.use_short_slot) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period; sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int; } static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; int ret = -ENOENT; rcu_read_lock(); sta = sta_info_get_by_idx(sdata, idx); if (sta) { ret = 0; memcpy(mac, sta->sta.addr, ETH_ALEN); sta_set_sinfo(sta, sinfo); } rcu_read_unlock(); return ret; } static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev, int idx, struct survey_info *survey) { struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); return drv_get_survey(local, idx, survey); } static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sta_info *sta; int ret = -ENOENT; rcu_read_lock(); sta = sta_info_get_bss(sdata, mac); if (sta) { ret = 0; sta_set_sinfo(sta, sinfo); } rcu_read_unlock(); return ret; } /* * This handles both adding a beacon and setting new beacon info */ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata, struct beacon_parameters *params) { struct beacon_data *new, *old; int new_head_len, new_tail_len; int size; int err = -EINVAL; old = rtnl_dereference(sdata->u.ap.beacon); /* head must not be zero-length */ if (params->head && !params->head_len) return -EINVAL; /* * This is a kludge. beacon interval should really be part * of the beacon information. */ if (params->interval && (sdata->vif.bss_conf.beacon_int != params->interval)) { sdata->vif.bss_conf.beacon_int = params->interval; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_INT); } /* Need to have a beacon head if we don't have one yet */ if (!params->head && !old) return err; /* sorry, no way to start beaconing without dtim period */ if (!params->dtim_period && !old) return err; /* new or old head? */ if (params->head) new_head_len = params->head_len; else new_head_len = old->head_len; /* new or old tail? */ if (params->tail || !old) /* params->tail_len will be zero for !params->tail */ new_tail_len = params->tail_len; else new_tail_len = old->tail_len; size = sizeof(*new) + new_head_len + new_tail_len; new = kzalloc(size, GFP_KERNEL); if (!new) return -ENOMEM; /* start filling the new info now */ /* new or old dtim period? */ if (params->dtim_period) new->dtim_period = params->dtim_period; else new->dtim_period = old->dtim_period; /* * pointers go into the block we allocated, * memory is | beacon_data | head | tail | */ new->head = ((u8 *) new) + sizeof(*new); new->tail = new->head + new_head_len; new->head_len = new_head_len; new->tail_len = new_tail_len; /* copy in head */ if (params->head) memcpy(new->head, params->head, new_head_len); else memcpy(new->head, old->head, new_head_len); /* copy in optional tail */ if (params->tail) memcpy(new->tail, params->tail, new_tail_len); else if (old) memcpy(new->tail, old->tail, new_tail_len); sdata->vif.bss_conf.dtim_period = new->dtim_period; rcu_assign_pointer(sdata->u.ap.beacon, new); synchronize_rcu(); kfree(old); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON); return 0; } static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *params) { struct ieee80211_sub_if_data *sdata; struct beacon_data *old; sdata = IEEE80211_DEV_TO_SUB_IF(dev); old = rtnl_dereference(sdata->u.ap.beacon); if (old) return -EALREADY; return ieee80211_config_beacon(sdata, params); } static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *params) { struct ieee80211_sub_if_data *sdata; struct beacon_data *old; sdata = IEEE80211_DEV_TO_SUB_IF(dev); old = rtnl_dereference(sdata->u.ap.beacon); if (!old) return -ENOENT; return ieee80211_config_beacon(sdata, params); } static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata; struct beacon_data *old; sdata = IEEE80211_DEV_TO_SUB_IF(dev); old = rtnl_dereference(sdata->u.ap.beacon); if (!old) return -ENOENT; rcu_assign_pointer(sdata->u.ap.beacon, NULL); synchronize_rcu(); kfree(old); ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); return 0; } /* Layer 2 Update frame (802.2 Type 1 LLC XID Update response) */ struct iapp_layer2_update { u8 da[ETH_ALEN]; /* broadcast */ u8 sa[ETH_ALEN]; /* STA addr */ __be16 len; /* 6 */ u8 dsap; /* 0 */ u8 ssap; /* 0 */ u8 control; u8 xid_info[3]; } __packed; static void ieee80211_send_layer2_update(struct sta_info *sta) { struct iapp_layer2_update *msg; struct sk_buff *skb; /* Send Level 2 Update Frame to update forwarding tables in layer 2 * bridge devices */ skb = dev_alloc_skb(sizeof(*msg)); if (!skb) return; msg = (struct iapp_layer2_update *)skb_put(skb, sizeof(*msg)); /* 802.2 Type 1 Logical Link Control (LLC) Exchange Identifier (XID) * Update response frame; IEEE Std 802.2-1998, 5.4.1.2.1 */ memset(msg->da, 0xff, ETH_ALEN); memcpy(msg->sa, sta->sta.addr, ETH_ALEN); msg->len = htons(6); msg->dsap = 0; msg->ssap = 0x01; /* NULL LSAP, CR Bit: Response */ msg->control = 0xaf; /* XID response lsb.1111F101. * F=0 (no poll command; unsolicited frame) */ msg->xid_info[0] = 0x81; /* XID format identifier */ msg->xid_info[1] = 1; /* LLC types/classes: Type 1 LLC */ msg->xid_info[2] = 0; /* XID sender's receive window size (RW) */ skb->dev = sta->sdata->dev; skb->protocol = eth_type_trans(skb, sta->sdata->dev); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx_ni(skb); } static void sta_apply_parameters(struct ieee80211_local *local, struct sta_info *sta, struct station_parameters *params) { unsigned long flags; u32 rates; int i, j; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; u32 mask, set; sband = local->hw.wiphy->bands[local->oper_channel->band]; spin_lock_irqsave(&sta->flaglock, flags); mask = params->sta_flags_mask; set = params->sta_flags_set; if (mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) { sta->flags &= ~WLAN_STA_AUTHORIZED; if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) sta->flags |= WLAN_STA_AUTHORIZED; } if (mask & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) { sta->flags &= ~WLAN_STA_SHORT_PREAMBLE; if (set & BIT(NL80211_STA_FLAG_SHORT_PREAMBLE)) sta->flags |= WLAN_STA_SHORT_PREAMBLE; } if (mask & BIT(NL80211_STA_FLAG_WME)) { sta->flags &= ~WLAN_STA_WME; if (set & BIT(NL80211_STA_FLAG_WME)) sta->flags |= WLAN_STA_WME; } if (mask & BIT(NL80211_STA_FLAG_MFP)) { sta->flags &= ~WLAN_STA_MFP; if (set & BIT(NL80211_STA_FLAG_MFP)) sta->flags |= WLAN_STA_MFP; } if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) { sta->flags &= ~WLAN_STA_AUTH; if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) sta->flags |= WLAN_STA_AUTH; } spin_unlock_irqrestore(&sta->flaglock, flags); /* * cfg80211 validates this (1-2007) and allows setting the AID * only when creating a new station entry */ if (params->aid) sta->sta.aid = params->aid; /* * FIXME: updating the following information is racy when this * function is called from ieee80211_change_station(). * However, all this information should be static so * maybe we should just reject attemps to change it. */ if (params->listen_interval >= 0) sta->listen_interval = params->listen_interval; if (params->supported_rates) { rates = 0; for (i = 0; i < params->supported_rates_len; i++) { int rate = (params->supported_rates[i] & 0x7f) * 5; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) rates |= BIT(j); } } sta->sta.supp_rates[local->oper_channel->band] = rates; } if (params->ht_capa) ieee80211_ht_cap_ie_to_sta_ht_cap(sband, params->ht_capa, &sta->sta.ht_cap); if (ieee80211_vif_is_mesh(&sdata->vif)) { #ifdef CONFIG_MAC80211_MESH if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) switch (params->plink_state) { case NL80211_PLINK_LISTEN: case NL80211_PLINK_ESTAB: case NL80211_PLINK_BLOCKED: sta->plink_state = params->plink_state; break; default: /* nothing */ break; } else switch (params->plink_action) { case PLINK_ACTION_OPEN: mesh_plink_open(sta); break; case PLINK_ACTION_BLOCK: mesh_plink_block(sta); break; } #endif } } static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_parameters *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; struct ieee80211_sub_if_data *sdata; int err; int layer2_update; if (params->vlan) { sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && sdata->vif.type != NL80211_IFTYPE_AP) return -EINVAL; } else sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (compare_ether_addr(mac, sdata->vif.addr) == 0) return -EINVAL; if (is_multicast_ether_addr(mac)) return -EINVAL; sta = sta_info_alloc(sdata, mac, GFP_KERNEL); if (!sta) return -ENOMEM; sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; sta_apply_parameters(local, sta, params); rate_control_rate_init(sta); layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN || sdata->vif.type == NL80211_IFTYPE_AP; err = sta_info_insert_rcu(sta); if (err) { rcu_read_unlock(); return err; } if (layer2_update) ieee80211_send_layer2_update(sta); rcu_read_unlock(); return 0; } static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (mac) return sta_info_destroy_addr_bss(sdata, mac); sta_info_flush(local, sdata); return 0; } static int ieee80211_change_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; struct ieee80211_sub_if_data *vlansdata; rcu_read_lock(); sta = sta_info_get_bss(sdata, mac); if (!sta) { rcu_read_unlock(); return -ENOENT; } if (params->vlan && params->vlan != sta->sdata->dev) { vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && vlansdata->vif.type != NL80211_IFTYPE_AP) { rcu_read_unlock(); return -EINVAL; } if (params->vlan->ieee80211_ptr->use_4addr) { if (vlansdata->u.vlan.sta) { rcu_read_unlock(); return -EBUSY; } rcu_assign_pointer(vlansdata->u.vlan.sta, sta); } sta->sdata = vlansdata; ieee80211_send_layer2_update(sta); } sta_apply_parameters(local, sta, params); rcu_read_unlock(); if (sdata->vif.type == NL80211_IFTYPE_STATION && params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)) ieee80211_recalc_ps(local, -1); return 0; } #ifdef CONFIG_MAC80211_MESH static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; struct sta_info *sta; int err; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); sta = sta_info_get(sdata, next_hop); if (!sta) { rcu_read_unlock(); return -ENOENT; } err = mesh_path_add(dst, sdata); if (err) { rcu_read_unlock(); return err; } mpath = mesh_path_lookup(dst, sdata); if (!mpath) { rcu_read_unlock(); return -ENXIO; } mesh_path_fix_nexthop(mpath, sta); rcu_read_unlock(); return 0; } static int ieee80211_del_mpath(struct wiphy *wiphy, struct net_device *dev, u8 *dst) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (dst) return mesh_path_del(dst, sdata); mesh_path_flush(sdata); return 0; } static int ieee80211_change_mpath(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; struct sta_info *sta; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); sta = sta_info_get(sdata, next_hop); if (!sta) { rcu_read_unlock(); return -ENOENT; } mpath = mesh_path_lookup(dst, sdata); if (!mpath) { rcu_read_unlock(); return -ENOENT; } mesh_path_fix_nexthop(mpath, sta); rcu_read_unlock(); return 0; } static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, struct mpath_info *pinfo) { struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop); if (next_hop_sta) memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN); else memset(next_hop, 0, ETH_ALEN); pinfo->generation = mesh_paths_generation; pinfo->filled = MPATH_INFO_FRAME_QLEN | MPATH_INFO_SN | MPATH_INFO_METRIC | MPATH_INFO_EXPTIME | MPATH_INFO_DISCOVERY_TIMEOUT | MPATH_INFO_DISCOVERY_RETRIES | MPATH_INFO_FLAGS; pinfo->frame_qlen = mpath->frame_queue.qlen; pinfo->sn = mpath->sn; pinfo->metric = mpath->metric; if (time_before(jiffies, mpath->exp_time)) pinfo->exptime = jiffies_to_msecs(mpath->exp_time - jiffies); pinfo->discovery_timeout = jiffies_to_msecs(mpath->discovery_timeout); pinfo->discovery_retries = mpath->discovery_retries; pinfo->flags = 0; if (mpath->flags & MESH_PATH_ACTIVE) pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; if (mpath->flags & MESH_PATH_RESOLVING) pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; if (mpath->flags & MESH_PATH_SN_VALID) pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; if (mpath->flags & MESH_PATH_FIXED) pinfo->flags |= NL80211_MPATH_FLAG_FIXED; if (mpath->flags & MESH_PATH_RESOLVING) pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; pinfo->flags = mpath->flags; } static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mesh_path_lookup(dst, sdata); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpath_set_pinfo(mpath, next_hop, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { struct ieee80211_sub_if_data *sdata; struct mesh_path *mpath; sdata = IEEE80211_DEV_TO_SUB_IF(dev); rcu_read_lock(); mpath = mesh_path_lookup_by_idx(idx, sdata); if (!mpath) { rcu_read_unlock(); return -ENOENT; } memcpy(dst, mpath->dst, ETH_ALEN); mpath_set_pinfo(mpath, next_hop, pinfo); rcu_read_unlock(); return 0; } static int ieee80211_get_mesh_config(struct wiphy *wiphy, struct net_device *dev, struct mesh_config *conf) { struct ieee80211_sub_if_data *sdata; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); return 0; } static inline bool _chg_mesh_attr(enum nl80211_meshconf_params parm, u32 mask) { return (mask >> (parm-1)) & 0x1; } static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh, const struct mesh_setup *setup) { u8 *new_ie; const u8 *old_ie; /* allocate information elements */ new_ie = NULL; old_ie = ifmsh->ie; if (setup->ie_len) { new_ie = kmemdup(setup->ie, setup->ie_len, GFP_KERNEL); if (!new_ie) return -ENOMEM; } ifmsh->ie_len = setup->ie_len; ifmsh->ie = new_ie; kfree(old_ie); /* now copy the rest of the setup parameters */ ifmsh->mesh_id_len = setup->mesh_id_len; memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); ifmsh->mesh_pp_id = setup->path_sel_proto; ifmsh->mesh_pm_id = setup->path_metric; ifmsh->security = IEEE80211_MESH_SEC_NONE; if (setup->is_authenticated) ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; if (setup->is_secure) ifmsh->security |= IEEE80211_MESH_SEC_SECURED; return 0; } static int ieee80211_update_mesh_config(struct wiphy *wiphy, struct net_device *dev, u32 mask, const struct mesh_config *nconf) { struct mesh_config *conf; struct ieee80211_sub_if_data *sdata; struct ieee80211_if_mesh *ifmsh; sdata = IEEE80211_DEV_TO_SUB_IF(dev); ifmsh = &sdata->u.mesh; /* Set the config options which we are interested in setting */ conf = &(sdata->u.mesh.mshcfg); if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) conf->dot11MeshRetryTimeout = nconf->dot11MeshRetryTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_CONFIRM_TIMEOUT, mask)) conf->dot11MeshConfirmTimeout = nconf->dot11MeshConfirmTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HOLDING_TIMEOUT, mask)) conf->dot11MeshHoldingTimeout = nconf->dot11MeshHoldingTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_MAX_PEER_LINKS, mask)) conf->dot11MeshMaxPeerLinks = nconf->dot11MeshMaxPeerLinks; if (_chg_mesh_attr(NL80211_MESHCONF_MAX_RETRIES, mask)) conf->dot11MeshMaxRetries = nconf->dot11MeshMaxRetries; if (_chg_mesh_attr(NL80211_MESHCONF_TTL, mask)) conf->dot11MeshTTL = nconf->dot11MeshTTL; if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) conf->dot11MeshTTL = nconf->element_ttl; if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) conf->auto_open_plinks = nconf->auto_open_plinks; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) conf->dot11MeshHWMPmaxPREQretries = nconf->dot11MeshHWMPmaxPREQretries; if (_chg_mesh_attr(NL80211_MESHCONF_PATH_REFRESH_TIME, mask)) conf->path_refresh_time = nconf->path_refresh_time; if (_chg_mesh_attr(NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, mask)) conf->min_discovery_timeout = nconf->min_discovery_timeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, mask)) conf->dot11MeshHWMPactivePathTimeout = nconf->dot11MeshHWMPactivePathTimeout; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, mask)) conf->dot11MeshHWMPpreqMinInterval = nconf->dot11MeshHWMPpreqMinInterval; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, mask)) conf->dot11MeshHWMPnetDiameterTraversalTime = nconf->dot11MeshHWMPnetDiameterTraversalTime; if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_ROOTMODE, mask)) { conf->dot11MeshHWMPRootMode = nconf->dot11MeshHWMPRootMode; ieee80211_mesh_root_setup(ifmsh); } return 0; } static int ieee80211_join_mesh(struct wiphy *wiphy, struct net_device *dev, const struct mesh_config *conf, const struct mesh_setup *setup) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; int err; memcpy(&ifmsh->mshcfg, conf, sizeof(struct mesh_config)); err = copy_mesh_setup(ifmsh, setup); if (err) return err; ieee80211_start_mesh(sdata); return 0; } static int ieee80211_leave_mesh(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_stop_mesh(sdata); return 0; } #endif static int ieee80211_change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { struct ieee80211_sub_if_data *sdata; u32 changed = 0; sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (params->use_cts_prot >= 0) { sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; changed |= BSS_CHANGED_ERP_CTS_PROT; } if (params->use_short_preamble >= 0) { sdata->vif.bss_conf.use_short_preamble = params->use_short_preamble; changed |= BSS_CHANGED_ERP_PREAMBLE; } if (!sdata->vif.bss_conf.use_short_slot && sdata->local->hw.conf.channel->band == IEEE80211_BAND_5GHZ) { sdata->vif.bss_conf.use_short_slot = true; changed |= BSS_CHANGED_ERP_SLOT; } if (params->use_short_slot_time >= 0) { sdata->vif.bss_conf.use_short_slot = params->use_short_slot_time; changed |= BSS_CHANGED_ERP_SLOT; } if (params->basic_rates) { int i, j; u32 rates = 0; struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_supported_band *sband = wiphy->bands[local->oper_channel->band]; for (i = 0; i < params->basic_rates_len; i++) { int rate = (params->basic_rates[i] & 0x7f) * 5; for (j = 0; j < sband->n_bitrates; j++) { if (sband->bitrates[j].bitrate == rate) rates |= BIT(j); } } sdata->vif.bss_conf.basic_rates = rates; changed |= BSS_CHANGED_BASIC_RATES; } if (params->ap_isolate >= 0) { if (params->ap_isolate) sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS; else sdata->flags &= ~IEEE80211_SDATA_DONT_BRIDGE_PACKETS; } if (params->ht_opmode >= 0) { sdata->vif.bss_conf.ht_operation_mode = (u16) params->ht_opmode; changed |= BSS_CHANGED_HT; } ieee80211_bss_info_change_notify(sdata, changed); return 0; } static int ieee80211_set_txq_params(struct wiphy *wiphy, struct ieee80211_txq_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_tx_queue_params p; if (!local->ops->conf_tx) return -EOPNOTSUPP; memset(&p, 0, sizeof(p)); p.aifs = params->aifs; p.cw_max = params->cwmax; p.cw_min = params->cwmin; p.txop = params->txop; /* * Setting tx queue params disables u-apsd because it's only * called in master mode. */ p.uapsd = false; if (drv_conf_tx(local, params->queue, &p)) { wiphy_debug(local->hw.wiphy, "failed to set TX queue parameters for queue %d\n", params->queue); return -EINVAL; } return 0; } static int ieee80211_set_channel(struct wiphy *wiphy, struct net_device *netdev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = NULL; struct ieee80211_channel *old_oper; enum nl80211_channel_type old_oper_type; enum nl80211_channel_type old_vif_oper_type= NL80211_CHAN_NO_HT; if (netdev) sdata = IEEE80211_DEV_TO_SUB_IF(netdev); switch (ieee80211_get_channel_mode(local, NULL)) { case CHAN_MODE_HOPPING: return -EBUSY; case CHAN_MODE_FIXED: if (local->oper_channel != chan) return -EBUSY; if (!sdata && local->_oper_channel_type == channel_type) return 0; break; case CHAN_MODE_UNDEFINED: break; } if (sdata) old_vif_oper_type = sdata->vif.bss_conf.channel_type; old_oper_type = local->_oper_channel_type; if (!ieee80211_set_channel_type(local, sdata, channel_type)) return -EBUSY; old_oper = local->oper_channel; local->oper_channel = chan; /* Update driver if changes were actually made. */ if ((old_oper != local->oper_channel) || (old_oper_type != local->_oper_channel_type)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); if ((sdata && sdata->vif.type != NL80211_IFTYPE_MONITOR) && old_vif_oper_type != sdata->vif.bss_conf.channel_type) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT); return 0; } #ifdef CONFIG_PM static int ieee80211_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wowlan) { return __ieee80211_suspend(wiphy_priv(wiphy), wowlan); } static int ieee80211_resume(struct wiphy *wiphy) { return __ieee80211_resume(wiphy_priv(wiphy)); } #else #define ieee80211_suspend NULL #define ieee80211_resume NULL #endif static int ieee80211_scan(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); switch (ieee80211_vif_type_p2p(&sdata->vif)) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_P2P_CLIENT: break; case NL80211_IFTYPE_P2P_GO: if (sdata->local->ops->hw_scan) break; /* * FIXME: implement NoA while scanning in software, * for now fall through to allow scanning only when * beaconing hasn't been configured yet */ case NL80211_IFTYPE_AP: if (sdata->u.ap.beacon) return -EOPNOTSUPP; break; default: return -EOPNOTSUPP; } return ieee80211_request_scan(sdata, req); } static int ieee80211_sched_scan_start(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_sched_scan_request *req) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!sdata->local->ops->sched_scan_start) return -EOPNOTSUPP; return ieee80211_request_sched_scan_start(sdata, req); } static int ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (!sdata->local->ops->sched_scan_stop) return -EOPNOTSUPP; return ieee80211_request_sched_scan_stop(sdata); } static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_auth_request *req) { return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_assoc_request *req) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); switch (ieee80211_get_channel_mode(local, sdata)) { case CHAN_MODE_HOPPING: return -EBUSY; case CHAN_MODE_FIXED: if (local->oper_channel == req->bss->channel) break; return -EBUSY; case CHAN_MODE_UNDEFINED: break; } return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req); } static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_deauth_request *req, void *cookie) { return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev), req, cookie); } static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_disassoc_request *req, void *cookie) { return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev), req, cookie); } static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); switch (ieee80211_get_channel_mode(local, sdata)) { case CHAN_MODE_HOPPING: return -EBUSY; case CHAN_MODE_FIXED: if (!params->channel_fixed) return -EBUSY; if (local->oper_channel == params->channel) break; return -EBUSY; case CHAN_MODE_UNDEFINED: break; } return ieee80211_ibss_join(sdata, params); } static int ieee80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); return ieee80211_ibss_leave(sdata); } static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct ieee80211_local *local = wiphy_priv(wiphy); int err; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { err = drv_set_frag_threshold(local, wiphy->frag_threshold); if (err) return err; } if (changed & WIPHY_PARAM_COVERAGE_CLASS) { err = drv_set_coverage_class(local, wiphy->coverage_class); if (err) return err; } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { err = drv_set_rts_threshold(local, wiphy->rts_threshold); if (err) return err; } if (changed & WIPHY_PARAM_RETRY_SHORT) local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; if (changed & WIPHY_PARAM_RETRY_LONG) local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; if (changed & (WIPHY_PARAM_RETRY_SHORT | WIPHY_PARAM_RETRY_LONG)) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_RETRY_LIMITS); return 0; } static int ieee80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, int mbm) { struct ieee80211_local *local = wiphy_priv(wiphy); struct ieee80211_channel *chan = local->hw.conf.channel; u32 changes = 0; switch (type) { case NL80211_TX_POWER_AUTOMATIC: local->user_power_level = -1; break; case NL80211_TX_POWER_LIMITED: if (mbm < 0 || (mbm % 100)) return -EOPNOTSUPP; local->user_power_level = MBM_TO_DBM(mbm); break; case NL80211_TX_POWER_FIXED: if (mbm < 0 || (mbm % 100)) return -EOPNOTSUPP; /* TODO: move to cfg80211 when it knows the channel */ if (MBM_TO_DBM(mbm) > chan->max_power) return -EINVAL; local->user_power_level = MBM_TO_DBM(mbm); break; } ieee80211_hw_config(local, changes); return 0; } static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm) { struct ieee80211_local *local = wiphy_priv(wiphy); *dbm = local->hw.conf.power_level; return 0; } static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev, const u8 *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(&sdata->u.wds.remote_addr, addr, ETH_ALEN); return 0; } static void ieee80211_rfkill_poll(struct wiphy *wiphy) { struct ieee80211_local *local = wiphy_priv(wiphy); drv_rfkill_poll(local); } #ifdef CONFIG_NL80211_TESTMODE static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len) { struct ieee80211_local *local = wiphy_priv(wiphy); if (!local->ops->testmode_cmd) return -EOPNOTSUPP; return local->ops->testmode_cmd(&local->hw, data, len); } #endif int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, enum ieee80211_smps_mode smps_mode) { const u8 *ap; enum ieee80211_smps_mode old_req; int err; lockdep_assert_held(&sdata->u.mgd.mtx); old_req = sdata->u.mgd.req_smps; sdata->u.mgd.req_smps = smps_mode; if (old_req == smps_mode && smps_mode != IEEE80211_SMPS_AUTOMATIC) return 0; /* * If not associated, or current association is not an HT * association, there's no need to send an action frame. */ if (!sdata->u.mgd.associated || sdata->vif.bss_conf.channel_type == NL80211_CHAN_NO_HT) { mutex_lock(&sdata->local->iflist_mtx); ieee80211_recalc_smps(sdata->local); mutex_unlock(&sdata->local->iflist_mtx); return 0; } ap = sdata->u.mgd.associated->bssid; if (smps_mode == IEEE80211_SMPS_AUTOMATIC) { if (sdata->u.mgd.powersave) smps_mode = IEEE80211_SMPS_DYNAMIC; else smps_mode = IEEE80211_SMPS_OFF; } /* send SM PS frame to AP */ err = ieee80211_send_smps_action(sdata, smps_mode, ap, ap); if (err) sdata->u.mgd.req_smps = old_req; return err; } static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS)) return -EOPNOTSUPP; if (enabled == sdata->u.mgd.powersave && timeout == local->dynamic_ps_forced_timeout) return 0; sdata->u.mgd.powersave = enabled; local->dynamic_ps_forced_timeout = timeout; /* no change, but if automatic follow powersave */ mutex_lock(&sdata->u.mgd.mtx); __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps); mutex_unlock(&sdata->u.mgd.mtx); if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS) ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); ieee80211_recalc_ps(local, -1); return 0; } static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_vif *vif = &sdata->vif; struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; if (rssi_thold == bss_conf->cqm_rssi_thold && rssi_hyst == bss_conf->cqm_rssi_hyst) return 0; bss_conf->cqm_rssi_thold = rssi_thold; bss_conf->cqm_rssi_hyst = rssi_hyst; if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { if (sdata->vif.type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; return 0; } /* tell the driver upon association, unless already associated */ if (sdata->u.mgd.associated) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM); return 0; } static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev, const u8 *addr, const struct cfg80211_bitrate_mask *mask) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int i, ret; if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { ret = drv_set_bitrate_mask(local, sdata, mask); if (ret) return ret; } for (i = 0; i < IEEE80211_NUM_BANDS; i++) sdata->rc_rateidx_mask[i] = mask->control[i].legacy; return 0; } static int ieee80211_remain_on_channel_hw(struct ieee80211_local *local, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type chantype, unsigned int duration, u64 *cookie) { int ret; u32 random_cookie; lockdep_assert_held(&local->mtx); if (local->hw_roc_cookie) return -EBUSY; /* must be nonzero */ random_cookie = random32() | 1; *cookie = random_cookie; local->hw_roc_dev = dev; local->hw_roc_cookie = random_cookie; local->hw_roc_channel = chan; local->hw_roc_channel_type = chantype; local->hw_roc_duration = duration; ret = drv_remain_on_channel(local, chan, chantype, duration); if (ret) { local->hw_roc_channel = NULL; local->hw_roc_cookie = 0; } return ret; } static int ieee80211_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, unsigned int duration, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; if (local->ops->remain_on_channel) { int ret; mutex_lock(&local->mtx); ret = ieee80211_remain_on_channel_hw(local, dev, chan, channel_type, duration, cookie); local->hw_roc_for_tx = false; mutex_unlock(&local->mtx); return ret; } return ieee80211_wk_remain_on_channel(sdata, chan, channel_type, duration, cookie); } static int ieee80211_cancel_remain_on_channel_hw(struct ieee80211_local *local, u64 cookie) { int ret; lockdep_assert_held(&local->mtx); if (local->hw_roc_cookie != cookie) return -ENOENT; ret = drv_cancel_remain_on_channel(local); if (ret) return ret; local->hw_roc_cookie = 0; local->hw_roc_channel = NULL; ieee80211_recalc_idle(local); return 0; } static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, struct net_device *dev, u64 cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; if (local->ops->cancel_remain_on_channel) { int ret; mutex_lock(&local->mtx); ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); mutex_unlock(&local->mtx); return ret; } return ieee80211_wk_cancel_remain_on_channel(sdata, cookie); } static enum work_done_result ieee80211_offchan_tx_done(struct ieee80211_work *wk, struct sk_buff *skb) { /* * Use the data embedded in the work struct for reporting * here so if the driver mangled the SKB before dropping * it (which is the only way we really should get here) * then we don't report mangled data. * * If there was no wait time, then by the time we get here * the driver will likely not have reported the status yet, * so in that case userspace will have to deal with it. */ if (wk->offchan_tx.wait && !wk->offchan_tx.status) cfg80211_mgmt_tx_status(wk->sdata->dev, (unsigned long) wk->offchan_tx.frame, wk->ie, wk->ie_len, false, GFP_KERNEL); return WORK_DONE_DESTROY; } static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, bool offchan, enum nl80211_channel_type channel_type, bool channel_type_valid, unsigned int wait, const u8 *buf, size_t len, u64 *cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; struct ieee80211_work *wk; const struct ieee80211_mgmt *mgmt = (void *)buf; u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | IEEE80211_TX_CTL_REQ_TX_STATUS; bool is_offchan = false; /* Check that we are on the requested channel for transmission */ if (chan != local->tmp_channel && chan != local->oper_channel) is_offchan = true; if (channel_type_valid && (channel_type != local->tmp_channel_type && channel_type != local->_oper_channel_type)) is_offchan = true; if (chan == local->hw_roc_channel) { /* TODO: check channel type? */ is_offchan = false; flags |= IEEE80211_TX_CTL_TX_OFFCHAN; } if (is_offchan && !offchan) return -EBUSY; switch (sdata->vif.type) { case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_is_action(mgmt->frame_control) || mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) break; rcu_read_lock(); sta = sta_info_get(sdata, mgmt->da); rcu_read_unlock(); if (!sta) return -ENOLINK; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: break; default: return -EOPNOTSUPP; } skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); if (!skb) return -ENOMEM; skb_reserve(skb, local->hw.extra_tx_headroom); memcpy(skb_put(skb, len), buf, len); IEEE80211_SKB_CB(skb)->flags = flags; skb->dev = sdata->dev; *cookie = (unsigned long) skb; if (is_offchan && local->ops->offchannel_tx) { int ret; IEEE80211_SKB_CB(skb)->band = chan->band; mutex_lock(&local->mtx); if (local->hw_offchan_tx_cookie) { mutex_unlock(&local->mtx); return -EBUSY; } /* TODO: bitrate control, TX processing? */ ret = drv_offchannel_tx(local, skb, chan, channel_type, wait); if (ret == 0) local->hw_offchan_tx_cookie = *cookie; mutex_unlock(&local->mtx); /* * Allow driver to return 1 to indicate it wants to have the * frame transmitted with a remain_on_channel + regular TX. */ if (ret != 1) return ret; } if (is_offchan && local->ops->remain_on_channel) { unsigned int duration; int ret; mutex_lock(&local->mtx); /* * If the duration is zero, then the driver * wouldn't actually do anything. Set it to * 100 for now. * * TODO: cancel the off-channel operation * when we get the SKB's TX status and * the wait time was zero before. */ duration = 100; if (wait) duration = wait; ret = ieee80211_remain_on_channel_hw(local, dev, chan, channel_type, duration, cookie); if (ret) { kfree_skb(skb); mutex_unlock(&local->mtx); return ret; } local->hw_roc_for_tx = true; local->hw_roc_duration = wait; /* * queue up frame for transmission after * ieee80211_ready_on_channel call */ /* modify cookie to prevent API mismatches */ *cookie ^= 2; IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; local->hw_roc_skb = skb; local->hw_roc_skb_for_status = skb; mutex_unlock(&local->mtx); return 0; } /* * Can transmit right away if the channel was the * right one and there's no wait involved... If a * wait is involved, we might otherwise not be on * the right channel for long enough! */ if (!is_offchan && !wait && !sdata->vif.bss_conf.idle) { ieee80211_tx_skb(sdata, skb); return 0; } wk = kzalloc(sizeof(*wk) + len, GFP_KERNEL); if (!wk) { kfree_skb(skb); return -ENOMEM; } wk->type = IEEE80211_WORK_OFFCHANNEL_TX; wk->chan = chan; wk->chan_type = channel_type; wk->sdata = sdata; wk->done = ieee80211_offchan_tx_done; wk->offchan_tx.frame = skb; wk->offchan_tx.wait = wait; wk->ie_len = len; memcpy(wk->ie, buf, len); ieee80211_add_work(wk); return 0; } static int ieee80211_mgmt_tx_cancel_wait(struct wiphy *wiphy, struct net_device *dev, u64 cookie) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_work *wk; int ret = -ENOENT; mutex_lock(&local->mtx); if (local->ops->offchannel_tx_cancel_wait && local->hw_offchan_tx_cookie == cookie) { ret = drv_offchannel_tx_cancel_wait(local); if (!ret) local->hw_offchan_tx_cookie = 0; mutex_unlock(&local->mtx); return ret; } if (local->ops->cancel_remain_on_channel) { cookie ^= 2; ret = ieee80211_cancel_remain_on_channel_hw(local, cookie); if (ret == 0) { kfree_skb(local->hw_roc_skb); local->hw_roc_skb = NULL; local->hw_roc_skb_for_status = NULL; } mutex_unlock(&local->mtx); return ret; } list_for_each_entry(wk, &local->work_list, list) { if (wk->sdata != sdata) continue; if (wk->type != IEEE80211_WORK_OFFCHANNEL_TX) continue; if (cookie != (unsigned long) wk->offchan_tx.frame) continue; wk->timeout = jiffies; ieee80211_queue_work(&local->hw, &local->work_work); ret = 0; break; } mutex_unlock(&local->mtx); return ret; } static void ieee80211_mgmt_frame_register(struct wiphy *wiphy, struct net_device *dev, u16 frame_type, bool reg) { struct ieee80211_local *local = wiphy_priv(wiphy); if (frame_type != (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_REQ)) return; if (reg) local->probe_req_reg++; else local->probe_req_reg--; ieee80211_queue_work(&local->hw, &local->reconfig_filter); } static int ieee80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); if (local->started) return -EOPNOTSUPP; return drv_set_antenna(local, tx_ant, rx_ant); } static int ieee80211_get_antenna(struct wiphy *wiphy, u32 *tx_ant, u32 *rx_ant) { struct ieee80211_local *local = wiphy_priv(wiphy); return drv_get_antenna(local, tx_ant, rx_ant); } static int ieee80211_set_ringparam(struct wiphy *wiphy, u32 tx, u32 rx) { struct ieee80211_local *local = wiphy_priv(wiphy); return drv_set_ringparam(local, tx, rx); } static void ieee80211_get_ringparam(struct wiphy *wiphy, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) { struct ieee80211_local *local = wiphy_priv(wiphy); drv_get_ringparam(local, tx, tx_max, rx, rx_max); } struct cfg80211_ops mac80211_config_ops = { .add_virtual_intf = ieee80211_add_iface, .del_virtual_intf = ieee80211_del_iface, .change_virtual_intf = ieee80211_change_iface, .add_key = ieee80211_add_key, .del_key = ieee80211_del_key, .get_key = ieee80211_get_key, .set_default_key = ieee80211_config_default_key, .set_default_mgmt_key = ieee80211_config_default_mgmt_key, .add_beacon = ieee80211_add_beacon, .set_beacon = ieee80211_set_beacon, .del_beacon = ieee80211_del_beacon, .add_station = ieee80211_add_station, .del_station = ieee80211_del_station, .change_station = ieee80211_change_station, .get_station = ieee80211_get_station, .dump_station = ieee80211_dump_station, .dump_survey = ieee80211_dump_survey, #ifdef CONFIG_MAC80211_MESH .add_mpath = ieee80211_add_mpath, .del_mpath = ieee80211_del_mpath, .change_mpath = ieee80211_change_mpath, .get_mpath = ieee80211_get_mpath, .dump_mpath = ieee80211_dump_mpath, .update_mesh_config = ieee80211_update_mesh_config, .get_mesh_config = ieee80211_get_mesh_config, .join_mesh = ieee80211_join_mesh, .leave_mesh = ieee80211_leave_mesh, #endif .change_bss = ieee80211_change_bss, .set_txq_params = ieee80211_set_txq_params, .set_channel = ieee80211_set_channel, .suspend = ieee80211_suspend, .resume = ieee80211_resume, .scan = ieee80211_scan, .sched_scan_start = ieee80211_sched_scan_start, .sched_scan_stop = ieee80211_sched_scan_stop, .auth = ieee80211_auth, .assoc = ieee80211_assoc, .deauth = ieee80211_deauth, .disassoc = ieee80211_disassoc, .join_ibss = ieee80211_join_ibss, .leave_ibss = ieee80211_leave_ibss, .set_wiphy_params = ieee80211_set_wiphy_params, .set_tx_power = ieee80211_set_tx_power, .get_tx_power = ieee80211_get_tx_power, .set_wds_peer = ieee80211_set_wds_peer, .rfkill_poll = ieee80211_rfkill_poll, CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) .set_power_mgmt = ieee80211_set_power_mgmt, .set_bitrate_mask = ieee80211_set_bitrate_mask, .remain_on_channel = ieee80211_remain_on_channel, .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, .mgmt_tx = ieee80211_mgmt_tx, .mgmt_tx_cancel_wait = ieee80211_mgmt_tx_cancel_wait, .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config, .mgmt_frame_register = ieee80211_mgmt_frame_register, .set_antenna = ieee80211_set_antenna, .get_antenna = ieee80211_get_antenna, .set_ringparam = ieee80211_set_ringparam, .get_ringparam = ieee80211_get_ringparam, };
gpl-2.0
MikeC84/mac_kernel_htc_flounder
drivers/crypto/omap-sham.c
2088
45688
/* * Cryptographic API. * * Support for OMAP SHA1/MD5 HW acceleration. * * Copyright (c) 2010 Nokia Corporation * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * Copyright (c) 2011 Texas Instruments Incorporated * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * Some ideas are from old omap-sha1-md5.c driver. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/err.h> #include <linux/device.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/omap-dma.h> #include <linux/pm_runtime.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/delay.h> #include <linux/crypto.h> #include <linux/cryptohash.h> #include <crypto/scatterwalk.h> #include <crypto/algapi.h> #include <crypto/sha.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> #define SHA1_MD5_BLOCK_SIZE SHA1_BLOCK_SIZE #define MD5_DIGEST_SIZE 16 #define DST_MAXBURST 16 #define DMA_MIN (DST_MAXBURST * sizeof(u32)) #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04)) #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04)) #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs) #define SHA_REG_ODIGEST(x) (0x00 + ((x) * 0x04)) #define SHA_REG_CTRL 0x18 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5) #define SHA_REG_CTRL_CLOSE_HASH (1 << 4) #define SHA_REG_CTRL_ALGO_CONST (1 << 3) #define SHA_REG_CTRL_ALGO (1 << 2) #define SHA_REG_CTRL_INPUT_READY (1 << 1) #define SHA_REG_CTRL_OUTPUT_READY (1 << 0) #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs) #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs) #define SHA_REG_MASK_DMA_EN (1 << 3) #define SHA_REG_MASK_IT_EN (1 << 2) #define SHA_REG_MASK_SOFTRESET (1 << 1) #define SHA_REG_AUTOIDLE (1 << 0) #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs) #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0) #define SHA_REG_MODE 0x44 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7) #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5) #define SHA_REG_MODE_CLOSE_HASH (1 << 4) #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3) #define SHA_REG_MODE_ALGO_MASK (3 << 1) #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1) #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1) #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1) #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1) #define SHA_REG_LENGTH 0x48 #define SHA_REG_IRQSTATUS 0x118 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3) #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2) #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1) #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0) #define SHA_REG_IRQENA 0x11C #define SHA_REG_IRQENA_CTX_RDY (1 << 3) #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2) #define SHA_REG_IRQENA_INPUT_RDY (1 << 1) #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0) #define DEFAULT_TIMEOUT_INTERVAL HZ /* mostly device flags */ #define FLAGS_BUSY 0 #define FLAGS_FINAL 1 #define FLAGS_DMA_ACTIVE 2 #define FLAGS_OUTPUT_READY 3 #define FLAGS_INIT 4 #define FLAGS_CPU 5 #define FLAGS_DMA_READY 6 #define FLAGS_AUTO_XOR 7 #define FLAGS_BE32_SHA1 8 /* context flags */ #define FLAGS_FINUP 16 #define FLAGS_SG 17 #define FLAGS_MODE_SHIFT 18 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK \ << (FLAGS_MODE_SHIFT - 1)) #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 \ << (FLAGS_MODE_SHIFT - 1)) #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 \ << (FLAGS_MODE_SHIFT - 1)) #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 \ << (FLAGS_MODE_SHIFT - 1)) #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 \ << (FLAGS_MODE_SHIFT - 1)) #define FLAGS_HMAC 20 #define FLAGS_ERROR 21 #define OP_UPDATE 1 #define OP_FINAL 2 #define OMAP_ALIGN_MASK (sizeof(u32)-1) #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) #define BUFLEN PAGE_SIZE struct omap_sham_dev; struct omap_sham_reqctx { struct omap_sham_dev *dd; unsigned long flags; unsigned long op; u8 digest[SHA256_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; size_t bufcnt; size_t buflen; dma_addr_t dma_addr; /* walk state */ struct scatterlist *sg; struct scatterlist sgl; unsigned int offset; /* offset in current sg */ unsigned int total; /* total request */ u8 buffer[0] OMAP_ALIGNED; }; struct omap_sham_hmac_ctx { struct crypto_shash *shash; u8 ipad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; u8 opad[SHA1_MD5_BLOCK_SIZE] OMAP_ALIGNED; }; struct omap_sham_ctx { struct omap_sham_dev *dd; unsigned long flags; /* fallback stuff */ struct crypto_shash *fallback; struct omap_sham_hmac_ctx base[0]; }; #define OMAP_SHAM_QUEUE_LENGTH 1 struct omap_sham_algs_info { struct ahash_alg *algs_list; unsigned int size; unsigned int registered; }; struct omap_sham_pdata { struct omap_sham_algs_info *algs_info; unsigned int algs_info_size; unsigned long flags; int digest_size; void (*copy_hash)(struct ahash_request *req, int out); void (*write_ctrl)(struct omap_sham_dev *dd, size_t length, int final, int dma); void (*trigger)(struct omap_sham_dev *dd, size_t length); int (*poll_irq)(struct omap_sham_dev *dd); irqreturn_t (*intr_hdlr)(int irq, void *dev_id); u32 odigest_ofs; u32 idigest_ofs; u32 din_ofs; u32 digcnt_ofs; u32 rev_ofs; u32 mask_ofs; u32 sysstatus_ofs; u32 major_mask; u32 major_shift; u32 minor_mask; u32 minor_shift; }; struct omap_sham_dev { struct list_head list; unsigned long phys_base; struct device *dev; void __iomem *io_base; int irq; spinlock_t lock; int err; unsigned int dma; struct dma_chan *dma_lch; struct tasklet_struct done_task; unsigned long flags; struct crypto_queue queue; struct ahash_request *req; const struct omap_sham_pdata *pdata; }; struct omap_sham_drv { struct list_head dev_list; spinlock_t lock; unsigned long flags; }; static struct omap_sham_drv sham = { .dev_list = LIST_HEAD_INIT(sham.dev_list), .lock = __SPIN_LOCK_UNLOCKED(sham.lock), }; static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset) { return __raw_readl(dd->io_base + offset); } static inline void omap_sham_write(struct omap_sham_dev *dd, u32 offset, u32 value) { __raw_writel(value, dd->io_base + offset); } static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address, u32 value, u32 mask) { u32 val; val = omap_sham_read(dd, address); val &= ~mask; val |= value; omap_sham_write(dd, address, val); } static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) { unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL; while (!(omap_sham_read(dd, offset) & bit)) { if (time_is_before_jiffies(timeout)) return -ETIMEDOUT; } return 0; } static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; u32 *hash = (u32 *)ctx->digest; int i; for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { if (out) hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i)); else omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]); } } static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; int i; if (ctx->flags & BIT(FLAGS_HMAC)) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; u32 *opad = (u32 *)bctx->opad; for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) { if (out) opad[i] = omap_sham_read(dd, SHA_REG_ODIGEST(i)); else omap_sham_write(dd, SHA_REG_ODIGEST(i), opad[i]); } } omap_sham_copy_hash_omap2(req, out); } static void omap_sham_copy_ready_hash(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); u32 *in = (u32 *)ctx->digest; u32 *hash = (u32 *)req->result; int i, d, big_endian = 0; if (!hash) return; switch (ctx->flags & FLAGS_MODE_MASK) { case FLAGS_MODE_MD5: d = MD5_DIGEST_SIZE / sizeof(u32); break; case FLAGS_MODE_SHA1: /* OMAP2 SHA1 is big endian */ if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) big_endian = 1; d = SHA1_DIGEST_SIZE / sizeof(u32); break; case FLAGS_MODE_SHA224: d = SHA224_DIGEST_SIZE / sizeof(u32); break; case FLAGS_MODE_SHA256: d = SHA256_DIGEST_SIZE / sizeof(u32); break; default: d = 0; } if (big_endian) for (i = 0; i < d; i++) hash[i] = be32_to_cpu(in[i]); else for (i = 0; i < d; i++) hash[i] = le32_to_cpu(in[i]); } static int omap_sham_hw_init(struct omap_sham_dev *dd) { pm_runtime_get_sync(dd->dev); if (!test_bit(FLAGS_INIT, &dd->flags)) { set_bit(FLAGS_INIT, &dd->flags); dd->err = 0; } return 0; } static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length, int final, int dma) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); u32 val = length << 5, mask; if (likely(ctx->digcnt)) omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); omap_sham_write_mask(dd, SHA_REG_MASK(dd), SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); /* * Setting ALGO_CONST only for the first iteration * and CLOSE_HASH only for the last one. */ if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1) val |= SHA_REG_CTRL_ALGO; if (!ctx->digcnt) val |= SHA_REG_CTRL_ALGO_CONST; if (final) val |= SHA_REG_CTRL_CLOSE_HASH; mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH | SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); } static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length) { } static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd) { return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY); } static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset, u32 *value, int count) { for (; count--; value++, offset += 4) omap_sham_write(dd, offset, *value); } static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length, int final, int dma) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); u32 val, mask; /* * Setting ALGO_CONST only for the first iteration and * CLOSE_HASH only for the last one. Note that flags mode bits * correspond to algorithm encoding in mode register. */ val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT - 1); if (!ctx->digcnt) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; val |= SHA_REG_MODE_ALGO_CONSTANT; if (ctx->flags & BIT(FLAGS_HMAC)) { val |= SHA_REG_MODE_HMAC_KEY_PROC; omap_sham_write_n(dd, SHA_REG_ODIGEST(0), (u32 *)bctx->ipad, SHA1_BLOCK_SIZE / sizeof(u32)); ctx->digcnt += SHA1_BLOCK_SIZE; } } if (final) { val |= SHA_REG_MODE_CLOSE_HASH; if (ctx->flags & BIT(FLAGS_HMAC)) val |= SHA_REG_MODE_HMAC_OUTER_HASH; } mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH | SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH | SHA_REG_MODE_HMAC_KEY_PROC; dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); omap_sham_write_mask(dd, SHA_REG_MODE, val, mask); omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY); omap_sham_write_mask(dd, SHA_REG_MASK(dd), SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN); } static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length) { omap_sham_write(dd, SHA_REG_LENGTH, length); } static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd) { return omap_sham_wait(dd, SHA_REG_IRQSTATUS, SHA_REG_IRQSTATUS_INPUT_RDY); } static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); int count, len32; const u32 *buffer = (const u32 *)buf; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); dd->pdata->write_ctrl(dd, length, final, 0); dd->pdata->trigger(dd, length); /* should be non-zero before next lines to disable clocks later */ ctx->digcnt += length; if (dd->pdata->poll_irq(dd)) return -ETIMEDOUT; if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ set_bit(FLAGS_CPU, &dd->flags); len32 = DIV_ROUND_UP(length, sizeof(u32)); for (count = 0; count < len32; count++) omap_sham_write(dd, SHA_REG_DIN(dd, count), buffer[count]); return -EINPROGRESS; } static void omap_sham_dma_callback(void *param) { struct omap_sham_dev *dd = param; set_bit(FLAGS_DMA_READY, &dd->flags); tasklet_schedule(&dd->done_task); } static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, size_t length, int final, int is_sg) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); struct dma_async_tx_descriptor *tx; struct dma_slave_config cfg; int len32, ret; dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); memset(&cfg, 0, sizeof(cfg)); cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0); cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.dst_maxburst = DST_MAXBURST; ret = dmaengine_slave_config(dd->dma_lch, &cfg); if (ret) { pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret); return ret; } len32 = DIV_ROUND_UP(length, DMA_MIN) * DMA_MIN; if (is_sg) { /* * The SG entry passed in may not have the 'length' member * set correctly so use a local SG entry (sgl) with the * proper value for 'length' instead. If this is not done, * the dmaengine may try to DMA the incorrect amount of data. */ sg_init_table(&ctx->sgl, 1); ctx->sgl.page_link = ctx->sg->page_link; ctx->sgl.offset = ctx->sg->offset; sg_dma_len(&ctx->sgl) = len32; sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } else { tx = dmaengine_prep_slave_single(dd->dma_lch, dma_addr, len32, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } if (!tx) { dev_err(dd->dev, "prep_slave_sg/single() failed\n"); return -EINVAL; } tx->callback = omap_sham_dma_callback; tx->callback_param = dd; dd->pdata->write_ctrl(dd, length, final, 1); ctx->digcnt += length; if (final) set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */ set_bit(FLAGS_DMA_ACTIVE, &dd->flags); dmaengine_submit(tx); dma_async_issue_pending(dd->dma_lch); dd->pdata->trigger(dd, length); return -EINPROGRESS; } static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, const u8 *data, size_t length) { size_t count = min(length, ctx->buflen - ctx->bufcnt); count = min(count, ctx->total); if (count <= 0) return 0; memcpy(ctx->buffer + ctx->bufcnt, data, count); ctx->bufcnt += count; return count; } static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) { size_t count; while (ctx->sg) { count = omap_sham_append_buffer(ctx, sg_virt(ctx->sg) + ctx->offset, ctx->sg->length - ctx->offset); if (!count) break; ctx->offset += count; ctx->total -= count; if (ctx->offset == ctx->sg->length) { ctx->sg = sg_next(ctx->sg); if (ctx->sg) ctx->offset = 0; else ctx->total = 0; } } return 0; } static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, struct omap_sham_reqctx *ctx, size_t length, int final) { int ret; ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, DMA_TO_DEVICE); if (dma_mapping_error(dd->dev, ctx->dma_addr)) { dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); return -EINVAL; } ctx->flags &= ~BIT(FLAGS_SG); ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0); if (ret != -EINPROGRESS) dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, DMA_TO_DEVICE); return ret; } static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int final; size_t count; omap_sham_append_sg(ctx); final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n", ctx->bufcnt, ctx->digcnt, final); if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { count = ctx->bufcnt; ctx->bufcnt = 0; return omap_sham_xmit_dma_map(dd, ctx, count, final); } return 0; } /* Start address alignment */ #define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) /* SHA1 block size alignment */ #define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) static int omap_sham_update_dma_start(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int length, final, tail; struct scatterlist *sg; int ret; if (!ctx->total) return 0; if (ctx->bufcnt || ctx->offset) return omap_sham_update_dma_slow(dd); /* * Don't use the sg interface when the transfer size is less * than the number of elements in a DMA frame. Otherwise, * the dmaengine infrastructure will calculate that it needs * to transfer 0 frames which ultimately fails. */ if (ctx->total < (DST_MAXBURST * sizeof(u32))) return omap_sham_update_dma_slow(dd); dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", ctx->digcnt, ctx->bufcnt, ctx->total); sg = ctx->sg; if (!SG_AA(sg)) return omap_sham_update_dma_slow(dd); if (!sg_is_last(sg) && !SG_SA(sg)) /* size is not SHA1_BLOCK_SIZE aligned */ return omap_sham_update_dma_slow(dd); length = min(ctx->total, sg->length); if (sg_is_last(sg)) { if (!(ctx->flags & BIT(FLAGS_FINUP))) { /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ tail = length & (SHA1_MD5_BLOCK_SIZE - 1); /* without finup() we need one block to close hash */ if (!tail) tail = SHA1_MD5_BLOCK_SIZE; length -= tail; } } if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { dev_err(dd->dev, "dma_map_sg error\n"); return -EINVAL; } ctx->flags |= BIT(FLAGS_SG); ctx->total -= length; ctx->offset = length; /* offset where to start slow */ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); if (ret != -EINPROGRESS) dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); return ret; } static int omap_sham_update_cpu(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); int bufcnt; omap_sham_append_sg(ctx); bufcnt = ctx->bufcnt; ctx->bufcnt = 0; return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, 1); } static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); dmaengine_terminate_all(dd->dma_lch); if (ctx->flags & BIT(FLAGS_SG)) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); if (ctx->sg->length == ctx->offset) { ctx->sg = sg_next(ctx->sg); if (ctx->sg) ctx->offset = 0; } } else { dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, DMA_TO_DEVICE); } return 0; } static int omap_sham_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = NULL, *tmp; spin_lock_bh(&sham.lock); if (!tctx->dd) { list_for_each_entry(tmp, &sham.dev_list, list) { dd = tmp; break; } tctx->dd = dd; } else { dd = tctx->dd; } spin_unlock_bh(&sham.lock); ctx->dd = dd; ctx->flags = 0; dev_dbg(dd->dev, "init: digest size: %d\n", crypto_ahash_digestsize(tfm)); switch (crypto_ahash_digestsize(tfm)) { case MD5_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_MD5; break; case SHA1_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_SHA1; break; case SHA224_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_SHA224; break; case SHA256_DIGEST_SIZE: ctx->flags |= FLAGS_MODE_SHA256; break; } ctx->bufcnt = 0; ctx->digcnt = 0; ctx->buflen = BUFLEN; if (tctx->flags & BIT(FLAGS_HMAC)) { if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { struct omap_sham_hmac_ctx *bctx = tctx->base; memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE); ctx->bufcnt = SHA1_MD5_BLOCK_SIZE; } ctx->flags |= BIT(FLAGS_HMAC); } return 0; } static int omap_sham_update_req(struct omap_sham_dev *dd) { struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err; dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n", ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); if (ctx->flags & BIT(FLAGS_CPU)) err = omap_sham_update_cpu(dd); else err = omap_sham_update_dma_start(dd); /* wait for dma completion before can take more data */ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); return err; } static int omap_sham_final_req(struct omap_sham_dev *dd) { struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err = 0, use_dma = 1; if (ctx->bufcnt <= DMA_MIN) /* faster to handle last block with cpu */ use_dma = 0; if (use_dma) err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); else err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); ctx->bufcnt = 0; dev_dbg(dd->dev, "final_req: err: %d\n", err); return err; } static int omap_sham_finish_hmac(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); struct { struct shash_desc shash; char ctx[crypto_shash_descsize(bctx->shash)]; } desc; desc.shash.tfm = bctx->shash; desc.shash.flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ return crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, bctx->opad, bs) ?: crypto_shash_finup(&desc.shash, req->result, ds, req->result); } static int omap_sham_finish(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; int err = 0; if (ctx->digcnt) { omap_sham_copy_ready_hash(req); if ((ctx->flags & BIT(FLAGS_HMAC)) && !test_bit(FLAGS_AUTO_XOR, &dd->flags)) err = omap_sham_finish_hmac(req); } dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); return err; } static void omap_sham_finish_req(struct ahash_request *req, int err) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_dev *dd = ctx->dd; if (!err) { dd->pdata->copy_hash(req, 1); if (test_bit(FLAGS_FINAL, &dd->flags)) err = omap_sham_finish(req); } else { ctx->flags |= BIT(FLAGS_ERROR); } /* atomic operation is not needed here */ dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) | BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY)); pm_runtime_put(dd->dev); if (req->base.complete) req->base.complete(&req->base, err); /* handle new request */ tasklet_schedule(&dd->done_task); } static int omap_sham_handle_queue(struct omap_sham_dev *dd, struct ahash_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; unsigned long flags; int err = 0, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) ret = ahash_enqueue_request(&dd->queue, req); if (test_bit(FLAGS_BUSY, &dd->flags)) { spin_unlock_irqrestore(&dd->lock, flags); return ret; } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); if (async_req) set_bit(FLAGS_BUSY, &dd->flags); spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); req = ahash_request_cast(async_req); dd->req = req; ctx = ahash_request_ctx(req); dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); err = omap_sham_hw_init(dd); if (err) goto err1; if (ctx->digcnt) /* request has changed - restore hash */ dd->pdata->copy_hash(req, 0); if (ctx->op == OP_UPDATE) { err = omap_sham_update_req(dd); if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) /* no final() after finup() */ err = omap_sham_final_req(dd); } else if (ctx->op == OP_FINAL) { err = omap_sham_final_req(dd); } err1: if (err != -EINPROGRESS) /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); dev_dbg(dd->dev, "exit, err: %d\n", err); return ret; } static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_dev *dd = tctx->dd; ctx->op = op; return omap_sham_handle_queue(dd, req); } static int omap_sham_update(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); if (!req->nbytes) return 0; ctx->total = req->nbytes; ctx->sg = req->src; ctx->offset = 0; if (ctx->flags & BIT(FLAGS_FINUP)) { if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { /* * OMAP HW accel works only with buffers >= 9 * will switch to bypass in final() * final has the same request and data */ omap_sham_append_sg(ctx); return 0; } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { /* * faster to use CPU for short transfers */ ctx->flags |= BIT(FLAGS_CPU); } } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_append_sg(ctx); return 0; } return omap_sham_enqueue(req, OP_UPDATE); } static int omap_sham_shash_digest(struct crypto_shash *shash, u32 flags, const u8 *data, unsigned int len, u8 *out) { struct { struct shash_desc shash; char ctx[crypto_shash_descsize(shash)]; } desc; desc.shash.tfm = shash; desc.shash.flags = flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_digest(&desc.shash, data, len, out); } static int omap_sham_final_shash(struct ahash_request *req) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_reqctx *ctx = ahash_request_ctx(req); return omap_sham_shash_digest(tctx->fallback, req->base.flags, ctx->buffer, ctx->bufcnt, req->result); } static int omap_sham_final(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); ctx->flags |= BIT(FLAGS_FINUP); if (ctx->flags & BIT(FLAGS_ERROR)) return 0; /* uncompleted hash is not needed */ /* OMAP HW accel works only with buffers >= 9 */ /* HMAC is always >= 9 because ipad == block size */ if ((ctx->digcnt + ctx->bufcnt) < 9) return omap_sham_final_shash(req); else if (ctx->bufcnt) return omap_sham_enqueue(req, OP_FINAL); /* copy ready hash (+ finalize hmac) */ return omap_sham_finish(req); } static int omap_sham_finup(struct ahash_request *req) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); int err1, err2; ctx->flags |= BIT(FLAGS_FINUP); err1 = omap_sham_update(req); if (err1 == -EINPROGRESS || err1 == -EBUSY) return err1; /* * final() has to be always called to cleanup resources * even if udpate() failed, except EINPROGRESS */ err2 = omap_sham_final(req); return err1 ?: err2; } static int omap_sham_digest(struct ahash_request *req) { return omap_sham_init(req) ?: omap_sham_finup(req); } static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); int ds = crypto_shash_digestsize(bctx->shash); struct omap_sham_dev *dd = NULL, *tmp; int err, i; spin_lock_bh(&sham.lock); if (!tctx->dd) { list_for_each_entry(tmp, &sham.dev_list, list) { dd = tmp; break; } tctx->dd = dd; } else { dd = tctx->dd; } spin_unlock_bh(&sham.lock); err = crypto_shash_setkey(tctx->fallback, key, keylen); if (err) return err; if (keylen > bs) { err = omap_sham_shash_digest(bctx->shash, crypto_shash_get_flags(bctx->shash), key, keylen, bctx->ipad); if (err) return err; keylen = ds; } else { memcpy(bctx->ipad, key, keylen); } memset(bctx->ipad + keylen, 0, bs - keylen); if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) { memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { bctx->ipad[i] ^= 0x36; bctx->opad[i] ^= 0x5c; } } return err; } static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); const char *alg_name = crypto_tfm_alg_name(tfm); /* Allocate a fallback and abort if it failed. */ tctx->fallback = crypto_alloc_shash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(tctx->fallback)) { pr_err("omap-sham: fallback driver '%s' " "could not be loaded.\n", alg_name); return PTR_ERR(tctx->fallback); } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct omap_sham_reqctx) + BUFLEN); if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; tctx->flags |= BIT(FLAGS_HMAC); bctx->shash = crypto_alloc_shash(alg_base, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(bctx->shash)) { pr_err("omap-sham: base driver '%s' " "could not be loaded.\n", alg_base); crypto_free_shash(tctx->fallback); return PTR_ERR(bctx->shash); } } return 0; } static int omap_sham_cra_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, NULL); } static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, "sha1"); } static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, "sha224"); } static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, "sha256"); } static int omap_sham_cra_md5_init(struct crypto_tfm *tfm) { return omap_sham_cra_init_alg(tfm, "md5"); } static void omap_sham_cra_exit(struct crypto_tfm *tfm) { struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); crypto_free_shash(tctx->fallback); tctx->fallback = NULL; if (tctx->flags & BIT(FLAGS_HMAC)) { struct omap_sham_hmac_ctx *bctx = tctx->base; crypto_free_shash(bctx->shash); } } static struct ahash_alg algs_sha1_md5[] = { { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "sha1", .cra_driver_name = "omap-sha1", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .halg.digestsize = MD5_DIGEST_SIZE, .halg.base = { .cra_name = "md5", .cra_driver_name = "omap-md5", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .setkey = omap_sham_setkey, .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "omap-hmac-sha1", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha1_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .setkey = omap_sham_setkey, .halg.digestsize = MD5_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(md5)", .cra_driver_name = "omap-hmac-md5", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_md5_init, .cra_exit = omap_sham_cra_exit, } } }; /* OMAP4 has some algs in addition to what OMAP2 has */ static struct ahash_alg algs_sha224_sha256[] = { { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .halg.digestsize = SHA224_DIGEST_SIZE, .halg.base = { .cra_name = "sha224", .cra_driver_name = "omap-sha224", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.base = { .cra_name = "sha256", .cra_driver_name = "omap-sha256", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .setkey = omap_sham_setkey, .halg.digestsize = SHA224_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "omap-hmac-sha224", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha224_init, .cra_exit = omap_sham_cra_exit, } }, { .init = omap_sham_init, .update = omap_sham_update, .final = omap_sham_final, .finup = omap_sham_finup, .digest = omap_sham_digest, .setkey = omap_sham_setkey, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "omap-hmac-sha256", .cra_priority = 100, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha256_init, .cra_exit = omap_sham_cra_exit, } }, }; static void omap_sham_done_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; int err = 0; if (!test_bit(FLAGS_BUSY, &dd->flags)) { omap_sham_handle_queue(dd, NULL); return; } if (test_bit(FLAGS_CPU, &dd->flags)) { if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) goto finish; } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) { if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) { omap_sham_update_dma_stop(dd); if (dd->err) { err = dd->err; goto finish; } } if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) { /* hash or semi-hash ready */ clear_bit(FLAGS_DMA_READY, &dd->flags); err = omap_sham_update_dma_start(dd); if (err != -EINPROGRESS) goto finish; } } return; finish: dev_dbg(dd->dev, "update done: err: %d\n", err); /* finish curent request */ omap_sham_finish_req(dd->req, err); } static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd) { if (!test_bit(FLAGS_BUSY, &dd->flags)) { dev_warn(dd->dev, "Interrupt when no active requests.\n"); } else { set_bit(FLAGS_OUTPUT_READY, &dd->flags); tasklet_schedule(&dd->done_task); } return IRQ_HANDLED; } static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id) { struct omap_sham_dev *dd = dev_id; if (unlikely(test_bit(FLAGS_FINAL, &dd->flags))) /* final -> allow device to go to power-saving mode */ omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH); omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY, SHA_REG_CTRL_OUTPUT_READY); omap_sham_read(dd, SHA_REG_CTRL); return omap_sham_irq_common(dd); } static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id) { struct omap_sham_dev *dd = dev_id; omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN); return omap_sham_irq_common(dd); } static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = { { .algs_list = algs_sha1_md5, .size = ARRAY_SIZE(algs_sha1_md5), }, }; static const struct omap_sham_pdata omap_sham_pdata_omap2 = { .algs_info = omap_sham_algs_info_omap2, .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2), .flags = BIT(FLAGS_BE32_SHA1), .digest_size = SHA1_DIGEST_SIZE, .copy_hash = omap_sham_copy_hash_omap2, .write_ctrl = omap_sham_write_ctrl_omap2, .trigger = omap_sham_trigger_omap2, .poll_irq = omap_sham_poll_irq_omap2, .intr_hdlr = omap_sham_irq_omap2, .idigest_ofs = 0x00, .din_ofs = 0x1c, .digcnt_ofs = 0x14, .rev_ofs = 0x5c, .mask_ofs = 0x60, .sysstatus_ofs = 0x64, .major_mask = 0xf0, .major_shift = 4, .minor_mask = 0x0f, .minor_shift = 0, }; #ifdef CONFIG_OF static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = { { .algs_list = algs_sha1_md5, .size = ARRAY_SIZE(algs_sha1_md5), }, { .algs_list = algs_sha224_sha256, .size = ARRAY_SIZE(algs_sha224_sha256), }, }; static const struct omap_sham_pdata omap_sham_pdata_omap4 = { .algs_info = omap_sham_algs_info_omap4, .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4), .flags = BIT(FLAGS_AUTO_XOR), .digest_size = SHA256_DIGEST_SIZE, .copy_hash = omap_sham_copy_hash_omap4, .write_ctrl = omap_sham_write_ctrl_omap4, .trigger = omap_sham_trigger_omap4, .poll_irq = omap_sham_poll_irq_omap4, .intr_hdlr = omap_sham_irq_omap4, .idigest_ofs = 0x020, .din_ofs = 0x080, .digcnt_ofs = 0x040, .rev_ofs = 0x100, .mask_ofs = 0x110, .sysstatus_ofs = 0x114, .major_mask = 0x0700, .major_shift = 8, .minor_mask = 0x003f, .minor_shift = 0, }; static const struct of_device_id omap_sham_of_match[] = { { .compatible = "ti,omap2-sham", .data = &omap_sham_pdata_omap2, }, { .compatible = "ti,omap4-sham", .data = &omap_sham_pdata_omap4, }, {}, }; MODULE_DEVICE_TABLE(of, omap_sham_of_match); static int omap_sham_get_res_of(struct omap_sham_dev *dd, struct device *dev, struct resource *res) { struct device_node *node = dev->of_node; const struct of_device_id *match; int err = 0; match = of_match_device(of_match_ptr(omap_sham_of_match), dev); if (!match) { dev_err(dev, "no compatible OF match\n"); err = -EINVAL; goto err; } err = of_address_to_resource(node, 0, res); if (err < 0) { dev_err(dev, "can't translate OF node address\n"); err = -EINVAL; goto err; } dd->irq = of_irq_to_resource(node, 0, NULL); if (!dd->irq) { dev_err(dev, "can't translate OF irq value\n"); err = -EINVAL; goto err; } dd->dma = -1; /* Dummy value that's unused */ dd->pdata = match->data; err: return err; } #else static const struct of_device_id omap_sham_of_match[] = { {}, }; static int omap_sham_get_res_of(struct omap_sham_dev *dd, struct device *dev, struct resource *res) { return -EINVAL; } #endif static int omap_sham_get_res_pdev(struct omap_sham_dev *dd, struct platform_device *pdev, struct resource *res) { struct device *dev = &pdev->dev; struct resource *r; int err = 0; /* Get the base address */ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(dev, "no MEM resource info\n"); err = -ENODEV; goto err; } memcpy(res, r, sizeof(*res)); /* Get the IRQ */ dd->irq = platform_get_irq(pdev, 0); if (dd->irq < 0) { dev_err(dev, "no IRQ resource info\n"); err = dd->irq; goto err; } /* Get the DMA */ r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!r) { dev_err(dev, "no DMA resource info\n"); err = -ENODEV; goto err; } dd->dma = r->start; /* Only OMAP2/3 can be non-DT */ dd->pdata = &omap_sham_pdata_omap2; err: return err; } static int omap_sham_probe(struct platform_device *pdev) { struct omap_sham_dev *dd; struct device *dev = &pdev->dev; struct resource res; dma_cap_mask_t mask; int err, i, j; u32 rev; dd = kzalloc(sizeof(struct omap_sham_dev), GFP_KERNEL); if (dd == NULL) { dev_err(dev, "unable to alloc data struct.\n"); err = -ENOMEM; goto data_err; } dd->dev = dev; platform_set_drvdata(pdev, dd); INIT_LIST_HEAD(&dd->list); spin_lock_init(&dd->lock); tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd); crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH); err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) : omap_sham_get_res_pdev(dd, pdev, &res); if (err) goto res_err; dd->io_base = devm_request_and_ioremap(dev, &res); if (!dd->io_base) { dev_err(dev, "can't ioremap\n"); err = -ENOMEM; goto res_err; } dd->phys_base = res.start; err = request_irq(dd->irq, dd->pdata->intr_hdlr, IRQF_TRIGGER_LOW, dev_name(dev), dd); if (err) { dev_err(dev, "unable to request irq.\n"); goto res_err; } dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); dd->dma_lch = dma_request_slave_channel_compat(mask, omap_dma_filter_fn, &dd->dma, dev, "rx"); if (!dd->dma_lch) { dev_err(dev, "unable to obtain RX DMA engine channel %u\n", dd->dma); err = -ENXIO; goto dma_err; } dd->flags |= dd->pdata->flags; pm_runtime_enable(dev); pm_runtime_get_sync(dev); rev = omap_sham_read(dd, SHA_REG_REV(dd)); pm_runtime_put_sync(&pdev->dev); dev_info(dev, "hw accel on OMAP rev %u.%u\n", (rev & dd->pdata->major_mask) >> dd->pdata->major_shift, (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift); spin_lock(&sham.lock); list_add_tail(&dd->list, &sham.dev_list); spin_unlock(&sham.lock); for (i = 0; i < dd->pdata->algs_info_size; i++) { for (j = 0; j < dd->pdata->algs_info[i].size; j++) { err = crypto_register_ahash( &dd->pdata->algs_info[i].algs_list[j]); if (err) goto err_algs; dd->pdata->algs_info[i].registered++; } } return 0; err_algs: for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); pm_runtime_disable(dev); dma_release_channel(dd->dma_lch); dma_err: free_irq(dd->irq, dd); res_err: kfree(dd); dd = NULL; data_err: dev_err(dev, "initialization failed.\n"); return err; } static int omap_sham_remove(struct platform_device *pdev) { static struct omap_sham_dev *dd; int i, j; dd = platform_get_drvdata(pdev); if (!dd) return -ENODEV; spin_lock(&sham.lock); list_del(&dd->list); spin_unlock(&sham.lock); for (i = dd->pdata->algs_info_size - 1; i >= 0; i--) for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) crypto_unregister_ahash( &dd->pdata->algs_info[i].algs_list[j]); tasklet_kill(&dd->done_task); pm_runtime_disable(&pdev->dev); dma_release_channel(dd->dma_lch); free_irq(dd->irq, dd); kfree(dd); dd = NULL; return 0; } #ifdef CONFIG_PM_SLEEP static int omap_sham_suspend(struct device *dev) { pm_runtime_put_sync(dev); return 0; } static int omap_sham_resume(struct device *dev) { pm_runtime_get_sync(dev); return 0; } #endif static const struct dev_pm_ops omap_sham_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(omap_sham_suspend, omap_sham_resume) }; static struct platform_driver omap_sham_driver = { .probe = omap_sham_probe, .remove = omap_sham_remove, .driver = { .name = "omap-sham", .owner = THIS_MODULE, .pm = &omap_sham_pm_ops, .of_match_table = omap_sham_of_match, }, }; module_platform_driver(omap_sham_driver); MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support."); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Dmitry Kasatkin");
gpl-2.0
SM-G920P/TeamSPR
net/netfilter/ipvs/ip_vs_dh.c
2088
6617
/* * IPVS: Destination Hashing scheduling module * * Authors: Wensong Zhang <wensong@gnuchina.org> * * Inspired by the consistent hashing scheduler patch from * Thomas Proell <proellt@gmx.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Changes: * */ /* * The dh algorithm is to select server by the hash key of destination IP * address. The pseudo code is as follows: * * n <- servernode[dest_ip]; * if (n is dead) OR * (n is overloaded) OR (n.weight <= 0) then * return NULL; * * return n; * * Notes that servernode is a 256-bucket hash table that maps the hash * index derived from packet destination IP address to the current server * array. If the dh scheduler is used in cache cluster, it is good to * combine it with cache_bypass feature. When the statically assigned * server is dead or overloaded, the load balancer can bypass the cache * server and send requests to the original server directly. * */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/ip.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/ip_vs.h> /* * IPVS DH bucket */ struct ip_vs_dh_bucket { struct ip_vs_dest __rcu *dest; /* real server (cache) */ }; /* * for IPVS DH entry hash table */ #ifndef CONFIG_IP_VS_DH_TAB_BITS #define CONFIG_IP_VS_DH_TAB_BITS 8 #endif #define IP_VS_DH_TAB_BITS CONFIG_IP_VS_DH_TAB_BITS #define IP_VS_DH_TAB_SIZE (1 << IP_VS_DH_TAB_BITS) #define IP_VS_DH_TAB_MASK (IP_VS_DH_TAB_SIZE - 1) struct ip_vs_dh_state { struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE]; struct rcu_head rcu_head; }; /* * Returns hash value for IPVS DH entry */ static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) { __be32 addr_fold = addr->ip; #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) addr_fold = addr->ip6[0]^addr->ip6[1]^ addr->ip6[2]^addr->ip6[3]; #endif return (ntohl(addr_fold)*2654435761UL) & IP_VS_DH_TAB_MASK; } /* * Get ip_vs_dest associated with supplied parameters. */ static inline struct ip_vs_dest * ip_vs_dh_get(int af, struct ip_vs_dh_state *s, const union nf_inet_addr *addr) { return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest); } /* * Assign all the hash buckets of the specified table with the service. */ static int ip_vs_dh_reassign(struct ip_vs_dh_state *s, struct ip_vs_service *svc) { int i; struct ip_vs_dh_bucket *b; struct list_head *p; struct ip_vs_dest *dest; bool empty; b = &s->buckets[0]; p = &svc->destinations; empty = list_empty(p); for (i=0; i<IP_VS_DH_TAB_SIZE; i++) { dest = rcu_dereference_protected(b->dest, 1); if (dest) ip_vs_dest_put(dest); if (empty) RCU_INIT_POINTER(b->dest, NULL); else { if (p == &svc->destinations) p = p->next; dest = list_entry(p, struct ip_vs_dest, n_list); ip_vs_dest_hold(dest); RCU_INIT_POINTER(b->dest, dest); p = p->next; } b++; } return 0; } /* * Flush all the hash buckets of the specified table. */ static void ip_vs_dh_flush(struct ip_vs_dh_state *s) { int i; struct ip_vs_dh_bucket *b; struct ip_vs_dest *dest; b = &s->buckets[0]; for (i=0; i<IP_VS_DH_TAB_SIZE; i++) { dest = rcu_dereference_protected(b->dest, 1); if (dest) { ip_vs_dest_put(dest); RCU_INIT_POINTER(b->dest, NULL); } b++; } } static int ip_vs_dh_init_svc(struct ip_vs_service *svc) { struct ip_vs_dh_state *s; /* allocate the DH table for this service */ s = kzalloc(sizeof(struct ip_vs_dh_state), GFP_KERNEL); if (s == NULL) return -ENOMEM; svc->sched_data = s; IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for " "current service\n", sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE); /* assign the hash buckets with current dests */ ip_vs_dh_reassign(s, svc); return 0; } static void ip_vs_dh_done_svc(struct ip_vs_service *svc) { struct ip_vs_dh_state *s = svc->sched_data; /* got to clean up hash buckets here */ ip_vs_dh_flush(s); /* release the table itself */ kfree_rcu(s, rcu_head); IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) released\n", sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE); } static int ip_vs_dh_dest_changed(struct ip_vs_service *svc, struct ip_vs_dest *dest) { struct ip_vs_dh_state *s = svc->sched_data; /* assign the hash buckets with the updated service */ ip_vs_dh_reassign(s, svc); return 0; } /* * If the dest flags is set with IP_VS_DEST_F_OVERLOAD, * consider that the server is overloaded here. */ static inline int is_overloaded(struct ip_vs_dest *dest) { return dest->flags & IP_VS_DEST_F_OVERLOAD; } /* * Destination hashing scheduling */ static struct ip_vs_dest * ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) { struct ip_vs_dest *dest; struct ip_vs_dh_state *s; struct ip_vs_iphdr iph; ip_vs_fill_iph_addr_only(svc->af, skb, &iph); IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); s = (struct ip_vs_dh_state *) svc->sched_data; dest = ip_vs_dh_get(svc->af, s, &iph.daddr); if (!dest || !(dest->flags & IP_VS_DEST_F_AVAILABLE) || atomic_read(&dest->weight) <= 0 || is_overloaded(dest)) { ip_vs_scheduler_err(svc, "no destination available"); return NULL; } IP_VS_DBG_BUF(6, "DH: destination IP address %s --> server %s:%d\n", IP_VS_DBG_ADDR(svc->af, &iph.daddr), IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port)); return dest; } /* * IPVS DH Scheduler structure */ static struct ip_vs_scheduler ip_vs_dh_scheduler = { .name = "dh", .refcnt = ATOMIC_INIT(0), .module = THIS_MODULE, .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), .init_service = ip_vs_dh_init_svc, .done_service = ip_vs_dh_done_svc, .add_dest = ip_vs_dh_dest_changed, .del_dest = ip_vs_dh_dest_changed, .schedule = ip_vs_dh_schedule, }; static int __init ip_vs_dh_init(void) { return register_ip_vs_scheduler(&ip_vs_dh_scheduler); } static void __exit ip_vs_dh_cleanup(void) { unregister_ip_vs_scheduler(&ip_vs_dh_scheduler); synchronize_rcu(); } module_init(ip_vs_dh_init); module_exit(ip_vs_dh_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
hellsgod/hells-Core-N6P
drivers/staging/csr/unifi_pdu_processing.c
2088
168622
/* * --------------------------------------------------------------------------- * FILE: unifi_pdu_processing.c * * PURPOSE: * This file provides the PDU handling functionality before it gets sent to unfi and after * receiving a PDU from unifi * * Copyright (C) 2010 by Cambridge Silicon Radio Ltd. * * Refer to LICENSE.txt included with this source code for details on * the license terms. * * --------------------------------------------------------------------------- */ #include <linux/types.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include "csr_wifi_hip_unifi.h" #include "csr_wifi_hip_conversions.h" #include "csr_time.h" #include "unifi_priv.h" #include <net/pkt_sched.h> #ifdef CSR_SUPPORT_SME static void _update_buffered_pkt_params_after_alignment(unifi_priv_t *priv, bulk_data_param_t *bulkdata, tx_buffered_packets_t* buffered_pkt) { struct sk_buff *skb ; u32 align_offset; if (priv == NULL || bulkdata == NULL || buffered_pkt == NULL){ return; } skb = (struct sk_buff*)bulkdata->d[0].os_net_buf_ptr; align_offset = (u32)(long)(bulkdata->d[0].os_data_ptr) & (CSR_WIFI_ALIGN_BYTES-1); if(align_offset){ skb_pull(skb,align_offset); } buffered_pkt->bulkdata.os_data_ptr = bulkdata->d[0].os_data_ptr; buffered_pkt->bulkdata.data_length = bulkdata->d[0].data_length; buffered_pkt->bulkdata.os_net_buf_ptr = bulkdata->d[0].os_net_buf_ptr; buffered_pkt->bulkdata.net_buf_length = bulkdata->d[0].net_buf_length; } #endif void unifi_frame_ma_packet_req(unifi_priv_t *priv, CSR_PRIORITY priority, CSR_RATE TransmitRate, CSR_CLIENT_TAG hostTag, u16 interfaceTag, CSR_TRANSMISSION_CONTROL transmissionControl, CSR_PROCESS_ID leSenderProcessId, u8 *peerMacAddress, CSR_SIGNAL *signal) { CSR_MA_PACKET_REQUEST *req = &signal->u.MaPacketRequest; netInterface_priv_t *interfacePriv; u8 ba_session_idx = 0; ba_session_tx_struct *ba_session = NULL; u8 *ba_addr = NULL; interfacePriv = priv->interfacePriv[interfaceTag]; unifi_trace(priv, UDBG5, "In unifi_frame_ma_packet_req, Frame for Peer: %pMF\n", peerMacAddress); signal->SignalPrimitiveHeader.SignalId = CSR_MA_PACKET_REQUEST_ID; signal->SignalPrimitiveHeader.ReceiverProcessId = 0; signal->SignalPrimitiveHeader.SenderProcessId = leSenderProcessId; /* Fill the MA-PACKET.req */ req->Priority = priority; unifi_trace(priv, UDBG3, "Tx Frame with Priority: 0x%x\n", req->Priority); /* A value of 0 is used for auto selection of rates. But for P2P GO case * for action frames the rate is governed by SME. Hence instead of 0, * the rate is filled in with the value passed here */ req->TransmitRate = TransmitRate; /* packets from netdev then no confirm required but packets from * Nme/Sme eapol data frames requires the confirmation */ req->TransmissionControl = transmissionControl; req->VirtualInterfaceIdentifier = uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag); memcpy(req->Ra.x, peerMacAddress, ETH_ALEN); if (hostTag == 0xffffffff) { req->HostTag = interfacePriv->tag++; req->HostTag |= 0x40000000; unifi_trace(priv, UDBG3, "new host tag assigned = 0x%x\n", req->HostTag); interfacePriv->tag &= 0x0fffffff; } else { req->HostTag = hostTag; unifi_trace(priv, UDBG3, "host tag got from SME = 0x%x\n", req->HostTag); } /* check if BA session exists for the peer MAC address on same tID */ if(interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP || interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO){ ba_addr = peerMacAddress; }else{ ba_addr = interfacePriv->bssid.a; } for (ba_session_idx=0; ba_session_idx < MAX_SUPPORTED_BA_SESSIONS_TX; ba_session_idx++){ ba_session = interfacePriv->ba_session_tx[ba_session_idx]; if (ba_session){ if ((!memcmp(ba_session->macAddress.a, ba_addr, ETH_ALEN)) && (ba_session->tID == priority)){ req->TransmissionControl |= CSR_ALLOW_BA; break; } } } unifi_trace(priv, UDBG5, "leaving unifi_frame_ma_packet_req\n"); } #ifdef CSR_SUPPORT_SME #define TRANSMISSION_CONTROL_TRIGGER_MASK 0x0001 #define TRANSMISSION_CONTROL_EOSP_MASK 0x0002 static int frame_and_send_queued_pdu(unifi_priv_t* priv,tx_buffered_packets_t* buffered_pkt, CsrWifiRouterCtrlStaInfo_t *staRecord,u8 moreData , u8 eosp) { CSR_SIGNAL signal; bulk_data_param_t bulkdata; int result; u8 toDs, fromDs, macHeaderLengthInBytes = MAC_HEADER_SIZE; u8 *qc; u16 *fc = (u16*)(buffered_pkt->bulkdata.os_data_ptr); unsigned long lock_flags; unifi_trace(priv, UDBG3, "frame_and_send_queued_pdu with moreData: %d , EOSP: %d\n",moreData,eosp); unifi_frame_ma_packet_req(priv, buffered_pkt->priority, buffered_pkt->rate, buffered_pkt->hostTag, buffered_pkt->interfaceTag, buffered_pkt->transmissionControl, buffered_pkt->leSenderProcessId, buffered_pkt->peerMacAddress.a, &signal); bulkdata.d[0].os_data_ptr = buffered_pkt->bulkdata.os_data_ptr; bulkdata.d[0].data_length = buffered_pkt->bulkdata.data_length; bulkdata.d[0].os_net_buf_ptr = buffered_pkt->bulkdata.os_net_buf_ptr; bulkdata.d[0].net_buf_length = buffered_pkt->bulkdata.net_buf_length; bulkdata.d[1].os_data_ptr = NULL; bulkdata.d[1].data_length = 0; bulkdata.d[1].os_net_buf_ptr =0; bulkdata.d[1].net_buf_length =0; if(moreData) { *fc |= cpu_to_le16(IEEE802_11_FC_MOREDATA_MASK); } else { *fc &= cpu_to_le16(~IEEE802_11_FC_MOREDATA_MASK); } if((staRecord != NULL)&& (staRecord->wmmOrQosEnabled == TRUE)) { unifi_trace(priv, UDBG3, "frame_and_send_queued_pdu WMM Enabled: %d \n",staRecord->wmmOrQosEnabled); toDs = (*fc & cpu_to_le16(IEEE802_11_FC_TO_DS_MASK))?1 : 0; fromDs = (*fc & cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK))? 1: 0; switch(le16_to_cpu(*fc) & IEEE80211_FC_SUBTYPE_MASK) { case IEEE802_11_FC_TYPE_QOS_DATA & IEEE80211_FC_SUBTYPE_MASK: case IEEE802_11_FC_TYPE_QOS_NULL & IEEE80211_FC_SUBTYPE_MASK: /* If both are set then the Address4 exists (only for AP) */ if (fromDs && toDs) { /* 6 is the size of Address4 field */ macHeaderLengthInBytes += (QOS_CONTROL_HEADER_SIZE + 6); } else { macHeaderLengthInBytes += QOS_CONTROL_HEADER_SIZE; } /* If order bit set then HT control field is the part of MAC header */ if (*fc & cpu_to_le16(IEEE80211_FC_ORDER_MASK)) { macHeaderLengthInBytes += HT_CONTROL_HEADER_SIZE; qc = (u8*)(buffered_pkt->bulkdata.os_data_ptr + (macHeaderLengthInBytes-6)); } else { qc = (u8*)(buffered_pkt->bulkdata.os_data_ptr + (macHeaderLengthInBytes-2)); } *qc = eosp ? *qc | (1 << 4) : *qc & (~(1 << 4)); break; default: if (fromDs && toDs) macHeaderLengthInBytes += 6; } } result = ul_send_signal_unpacked(priv, &signal, &bulkdata); if(result){ _update_buffered_pkt_params_after_alignment(priv, &bulkdata,buffered_pkt); } /* Decrement the packet counts queued in driver */ if (result != -ENOSPC) { /* protect entire counter updation by disabling preemption */ if (!priv->noOfPktQueuedInDriver) { unifi_error(priv, "packets queued in driver 0 still decrementing\n"); } else { spin_lock_irqsave(&priv->tx_q_lock,lock_flags); priv->noOfPktQueuedInDriver--; spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); } /* Sta Record is available for all unicast (except genericMgt Frames) & in other case its NULL */ if (staRecord) { spin_lock_irqsave(&priv->staRecord_lock,lock_flags); if (!staRecord->noOfPktQueued) { unifi_error(priv, "packets queued in driver per station is 0 still decrementing\n"); } else { staRecord->noOfPktQueued--; } /* if the STA alive probe frame has failed then reset the saved host tag */ if (result){ if (staRecord->nullDataHostTag == buffered_pkt->hostTag){ staRecord->nullDataHostTag = INVALID_HOST_TAG; } } spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } } return result; } #ifdef CSR_SUPPORT_SME static void set_eosp_transmit_ctrl(unifi_priv_t *priv, struct list_head *txList) { /* dequeue the tx data packets from the appropriate queue */ tx_buffered_packets_t *tx_q_item = NULL; struct list_head *listHead; struct list_head *placeHolder; unsigned long lock_flags; unifi_trace(priv, UDBG5, "entering set_eosp_transmit_ctrl\n"); /* check for list empty */ if (list_empty(txList)) { unifi_warning(priv, "In set_eosp_transmit_ctrl, the list is empty\n"); return; } /* return the last node , and modify it. */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_for_each_prev_safe(listHead, placeHolder, txList) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); tx_q_item->transmissionControl |= TRANSMISSION_CONTROL_EOSP_MASK; tx_q_item->transmissionControl = (tx_q_item->transmissionControl & ~(CSR_NO_CONFIRM_REQUIRED)); unifi_trace(priv, UDBG1, "set_eosp_transmit_ctrl Transmission Control = 0x%x hostTag = 0x%x \n",tx_q_item->transmissionControl,tx_q_item->hostTag); unifi_trace(priv,UDBG3,"in set_eosp_transmit_ctrl no.of buffered frames %d\n",priv->noOfPktQueuedInDriver); break; } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); unifi_trace(priv, UDBG1,"List Empty %d\n",list_empty(txList)); unifi_trace(priv, UDBG5, "leaving set_eosp_transmit_ctrl\n"); return; } static void send_vif_availibility_rsp(unifi_priv_t *priv,CSR_VIF_IDENTIFIER vif,CSR_RESULT_CODE resultCode) { CSR_SIGNAL signal; CSR_MA_VIF_AVAILABILITY_RESPONSE *rsp; bulk_data_param_t *bulkdata = NULL; int r; unifi_trace(priv, UDBG3, "send_vif_availibility_rsp : invoked with resultCode = %d \n", resultCode); memset(&signal,0,sizeof(CSR_SIGNAL)); rsp = &signal.u.MaVifAvailabilityResponse; rsp->VirtualInterfaceIdentifier = vif; rsp->ResultCode = resultCode; signal.SignalPrimitiveHeader.SignalId = CSR_MA_VIF_AVAILABILITY_RESPONSE_ID; signal.SignalPrimitiveHeader.ReceiverProcessId = 0; signal.SignalPrimitiveHeader.SenderProcessId = priv->netdev_client->sender_id; /* Send the signal to UniFi */ r = ul_send_signal_unpacked(priv, &signal, bulkdata); if(r) { unifi_error(priv,"Availibility response sending failed %x status %d\n",vif,r); } else { unifi_trace(priv, UDBG3, "send_vif_availibility_rsp : status = %d \n", r); } } #endif static void verify_and_accomodate_tx_packet(unifi_priv_t *priv) { tx_buffered_packets_t *tx_q_item; unsigned long lock_flags; struct list_head *listHead, *list; struct list_head *placeHolder; u8 i, j,eospFramedeleted=0; u8 thresholdExcedeDueToBroadcast = TRUE; /* it will be made it interface Specific in the future when multi interfaces are supported , right now interface 0 is considered */ netInterface_priv_t *interfacePriv = priv->interfacePriv[0]; CsrWifiRouterCtrlStaInfo_t *staInfo = NULL; unifi_trace(priv, UDBG3, "entering verify_and_accomodate_tx_packet\n"); for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) { staInfo = interfacePriv->staInfo[i]; if (staInfo && (staInfo->noOfPktQueued >= CSR_WIFI_DRIVER_MAX_PKT_QUEUING_THRESHOLD_PER_PEER)) { /* remove the first(oldest) packet from the all the access catogory, since data * packets for station record crossed the threshold limit (64 for AP supporting * 8 peers) */ unifi_trace(priv,UDBG3,"number of station pkts queued= %d for sta id = %d\n", staInfo->noOfPktQueued, staInfo->aid); for(j = 0; j < MAX_ACCESS_CATOGORY; j++) { list = &staInfo->dataPdu[j]; spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_for_each_safe(listHead, placeHolder, list) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); list_del(listHead); thresholdExcedeDueToBroadcast = FALSE; unifi_net_data_free(priv, &tx_q_item->bulkdata); kfree(tx_q_item); tx_q_item = NULL; if (!priv->noOfPktQueuedInDriver) { unifi_error(priv, "packets queued in driver 0 still decrementing in %s\n", __FUNCTION__); } else { /* protection provided by spinlock */ priv->noOfPktQueuedInDriver--; } /* Sta Record is available for all unicast (except genericMgt Frames) & in other case its NULL */ if (!staInfo->noOfPktQueued) { unifi_error(priv, "packets queued in driver per station is 0 still decrementing in %s\n", __FUNCTION__); } else { spin_lock(&priv->staRecord_lock); staInfo->noOfPktQueued--; spin_unlock(&priv->staRecord_lock); } break; } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); } } } if (thresholdExcedeDueToBroadcast && interfacePriv->noOfbroadcastPktQueued > CSR_WIFI_DRIVER_MINIMUM_BROADCAST_PKT_THRESHOLD ) { /* Remove the packets from genericMulticastOrBroadCastFrames queue * (the max packets in driver is reached due to broadcast/multicast frames) */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_for_each_safe(listHead, placeHolder, &interfacePriv->genericMulticastOrBroadCastFrames) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); if(eospFramedeleted){ tx_q_item->transmissionControl |= TRANSMISSION_CONTROL_EOSP_MASK; tx_q_item->transmissionControl = (tx_q_item->transmissionControl & ~(CSR_NO_CONFIRM_REQUIRED)); unifi_trace(priv, UDBG1,"updating eosp for next packet hostTag:= 0x%x ",tx_q_item->hostTag); eospFramedeleted =0; break; } if(tx_q_item->transmissionControl & TRANSMISSION_CONTROL_EOSP_MASK ){ eospFramedeleted = 1; } unifi_trace(priv,UDBG1, "freeing of multicast packets ToC = 0x%x hostTag = 0x%x \n",tx_q_item->transmissionControl,tx_q_item->hostTag); list_del(listHead); unifi_net_data_free(priv, &tx_q_item->bulkdata); kfree(tx_q_item); priv->noOfPktQueuedInDriver--; spin_lock(&priv->staRecord_lock); interfacePriv->noOfbroadcastPktQueued--; spin_unlock(&priv->staRecord_lock); if(!eospFramedeleted){ break; } } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); } unifi_trace(priv, UDBG3, "leaving verify_and_accomodate_tx_packet\n"); } static CsrResult enque_tx_data_pdu(unifi_priv_t *priv, bulk_data_param_t *bulkdata, struct list_head *list, CSR_SIGNAL *signal, u8 requeueOnSamePos) { /* queue the tx data packets on to appropriate queue */ CSR_MA_PACKET_REQUEST *req = &signal->u.MaPacketRequest; tx_buffered_packets_t *tx_q_item; unsigned long lock_flags; unifi_trace(priv, UDBG5, "entering enque_tx_data_pdu\n"); if(!list) { unifi_error(priv,"List is not specified\n"); return CSR_RESULT_FAILURE; } /* Removes aged packets & adds the incoming packet */ if (priv->noOfPktQueuedInDriver >= CSR_WIFI_DRIVER_SUPPORT_FOR_MAX_PKT_QUEUEING) { unifi_trace(priv,UDBG3,"number of pkts queued= %d \n", priv->noOfPktQueuedInDriver); verify_and_accomodate_tx_packet(priv); } tx_q_item = kmalloc(sizeof(tx_buffered_packets_t), GFP_ATOMIC); if (tx_q_item == NULL) { unifi_error(priv, "Failed to allocate %d bytes for tx packet record\n", sizeof(tx_buffered_packets_t)); return CSR_RESULT_FAILURE; } /* disable the preemption */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); INIT_LIST_HEAD(&tx_q_item->q); /* fill the tx_q structure members */ tx_q_item->bulkdata.os_data_ptr = bulkdata->d[0].os_data_ptr; tx_q_item->bulkdata.data_length = bulkdata->d[0].data_length; tx_q_item->bulkdata.os_net_buf_ptr = bulkdata->d[0].os_net_buf_ptr; tx_q_item->bulkdata.net_buf_length = bulkdata->d[0].net_buf_length; tx_q_item->interfaceTag = req->VirtualInterfaceIdentifier & 0xff; tx_q_item->hostTag = req->HostTag; tx_q_item->leSenderProcessId = signal->SignalPrimitiveHeader.SenderProcessId; tx_q_item->transmissionControl = req->TransmissionControl; tx_q_item->priority = req->Priority; tx_q_item->rate = req->TransmitRate; memcpy(tx_q_item->peerMacAddress.a, req->Ra.x, ETH_ALEN); if (requeueOnSamePos) { list_add(&tx_q_item->q, list); } else { list_add_tail(&tx_q_item->q, list); } /* Count of packet queued in driver */ priv->noOfPktQueuedInDriver++; spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); unifi_trace(priv, UDBG5, "leaving enque_tx_data_pdu\n"); return CSR_RESULT_SUCCESS; } #ifdef CSR_WIFI_REQUEUE_PACKET_TO_HAL CsrResult unifi_reque_ma_packet_request (void *ospriv, u32 host_tag, u16 txStatus, bulk_data_desc_t *bulkDataDesc) { CsrResult status = CSR_RESULT_SUCCESS; unifi_priv_t *priv = (unifi_priv_t*)ospriv; netInterface_priv_t *interfacePriv; struct list_head *list = NULL; CsrWifiRouterCtrlStaInfo_t *staRecord = NULL; bulk_data_param_t bulkData; CSR_SIGNAL signal; CSR_PRIORITY priority = 0; u16 interfaceTag = 0; unifi_TrafficQueue priority_q; u16 frameControl = 0, frameType = 0; unsigned long lock_flags; interfacePriv = priv->interfacePriv[interfaceTag]; /* If the current mode is not AP or P2PGO then just return failure * to clear the hip slot */ if(!((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP) || (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO))) { return CSR_RESULT_FAILURE; } unifi_trace(priv, UDBG6, "unifi_reque_ma_packet_request: host_tag = 0x%x\n", host_tag); staRecord = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, (((u8 *) bulkDataDesc->os_data_ptr) + 4), interfaceTag); if (NULL == staRecord) { unifi_trace(priv, UDBG5, "unifi_reque_ma_packet_request: Invalid STA record \n"); return CSR_RESULT_FAILURE; } /* Update TIM if MA-PACKET.cfm fails with status as Tx-retry-limit or No-BSS and then just return failure * to clear the hip slot associated with the Packet */ if (CSR_TX_RETRY_LIMIT == txStatus || CSR_TX_NO_BSS == txStatus) { if (staRecord->timSet == CSR_WIFI_TIM_RESET || staRecord->timSet == CSR_WIFI_TIM_RESETTING) { unifi_trace(priv, UDBG2, "unifi_reque_ma_packet_request: CFM failed with Retry Limit or No BSS-->update TIM\n"); if (!staRecord->timRequestPendingFlag) { update_tim(priv, staRecord->aid, 1, interfaceTag, staRecord->assignedHandle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 1; unifi_trace(priv, UDBG6, "unifi_reque_ma_packet_request: One more UpdateTim Request(:%d)Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } } return CSR_RESULT_FAILURE; } else if ((CSR_TX_LIFETIME == txStatus) || (CSR_TX_BLOCK_ACK_TIMEOUT == txStatus) || (CSR_TX_FAIL_TRANSMISSION_VIF_INTERRUPTED == txStatus) || (CSR_TX_REJECTED_PEER_STATION_SLEEPING == txStatus) || (CSR_TX_REJECTED_DTIM_STARTED == txStatus)) { /* Extract the Frame control and the frame type */ frameControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(bulkDataDesc->os_data_ptr); frameType = ((frameControl & IEEE80211_FC_TYPE_MASK) >> FRAME_CONTROL_TYPE_FIELD_OFFSET); /* Mgmt frames will not be re-queued for Tx * so just return failure to clear the hip slot */ if (IEEE802_11_FRAMETYPE_MANAGEMENT == frameType) { return CSR_RESULT_FAILURE; } else if (IEEE802_11_FRAMETYPE_DATA == frameType) { /* QOS NULL and DATA NULL frames will not be re-queued for Tx * so just return failure to clear the hip slot */ if ((((frameControl & IEEE80211_FC_SUBTYPE_MASK) >> FRAME_CONTROL_SUBTYPE_FIELD_OFFSET) == QOS_DATA_NULL) || (((frameControl & IEEE80211_FC_SUBTYPE_MASK) >> FRAME_CONTROL_SUBTYPE_FIELD_OFFSET)== DATA_NULL )) { return CSR_RESULT_FAILURE; } } /* Extract the Packet priority */ if (TRUE == staRecord->wmmOrQosEnabled) { u16 qosControl = 0; u8 dataFrameType = 0; dataFrameType =((frameControl & IEEE80211_FC_SUBTYPE_MASK) >> 4); if (dataFrameType == QOS_DATA) { /* QoS control field is offset from frame control by 2 (frame control) * + 2 (duration/ID) + 2 (sequence control) + 3*ETH_ALEN or 4*ETH_ALEN */ if((frameControl & IEEE802_11_FC_TO_DS_MASK) && (frameControl & IEEE802_11_FC_FROM_DS_MASK)) { qosControl= CSR_GET_UINT16_FROM_LITTLE_ENDIAN(bulkDataDesc->os_data_ptr + 30); } else { qosControl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(bulkDataDesc->os_data_ptr + 24); } } priority = (CSR_PRIORITY)(qosControl & IEEE802_11_QC_TID_MASK); if (priority < CSR_QOS_UP0 || priority > CSR_QOS_UP7) { unifi_trace(priv, UDBG5, "unifi_reque_ma_packet_request: Invalid priority:%x \n", priority); return CSR_RESULT_FAILURE; } } else { priority = CSR_CONTENTION; } /* Frame Bulk data to requeue it back to HAL Queues */ bulkData.d[0].os_data_ptr = bulkDataDesc->os_data_ptr; bulkData.d[0].data_length = bulkDataDesc->data_length; bulkData.d[0].os_net_buf_ptr = bulkDataDesc->os_net_buf_ptr; bulkData.d[0].net_buf_length = bulkDataDesc->net_buf_length; bulkData.d[1].os_data_ptr = NULL; bulkData.d[1].os_net_buf_ptr = NULL; bulkData.d[1].data_length = bulkData.d[1].net_buf_length = 0; /* Initialize signal to zero */ memset(&signal, 0, sizeof(CSR_SIGNAL)); /* Frame MA Packet Req */ unifi_frame_ma_packet_req(priv, priority, 0, host_tag, interfaceTag, CSR_NO_CONFIRM_REQUIRED, priv->netdev_client->sender_id, staRecord->peerMacAddress.a, &signal); /* Find the Q-Priority */ priority_q = unifi_frame_priority_to_queue(priority); list = &staRecord->dataPdu[priority_q]; /* Place the Packet on to HAL Queue */ status = enque_tx_data_pdu(priv, &bulkData, list, &signal, TRUE); /* Update the Per-station queued packet counter */ if (!status) { spin_lock_irqsave(&priv->staRecord_lock, lock_flags); staRecord->noOfPktQueued++; spin_unlock_irqrestore(&priv->staRecord_lock, lock_flags); } } else { /* Packet will not be re-queued for any of the other MA Packet Tx failure * reasons so just return failure to clear the hip slot */ return CSR_RESULT_FAILURE; } return status; } #endif static void is_all_ac_deliver_enabled_and_moredata(CsrWifiRouterCtrlStaInfo_t *staRecord, u8 *allDeliveryEnabled, u8 *dataAvailable) { u8 i; *allDeliveryEnabled = TRUE; for (i = 0 ;i < MAX_ACCESS_CATOGORY; i++) { if (!IS_DELIVERY_ENABLED(staRecord->powersaveMode[i])) { /* One is is not Delivery Enabled */ *allDeliveryEnabled = FALSE; break; } } if (*allDeliveryEnabled) { *dataAvailable = (!list_empty(&staRecord->dataPdu[0]) || !list_empty(&staRecord->dataPdu[1]) ||!list_empty(&staRecord->dataPdu[2]) ||!list_empty(&staRecord->dataPdu[3]) ||!list_empty(&staRecord->mgtFrames)); } } /* * --------------------------------------------------------------------------- * uf_handle_tim_cfm * * * This function updates tim status in host depending confirm status from firmware * * Arguments: * priv Pointer to device private context struct * cfm CSR_MLME_SET_TIM_CONFIRM * receiverProcessId SenderProcessID to fetch handle & timSet status * * --------------------------------------------------------------------------- */ void uf_handle_tim_cfm(unifi_priv_t *priv, CSR_MLME_SET_TIM_CONFIRM *cfm, u16 receiverProcessId) { u8 handle = CSR_WIFI_GET_STATION_HANDLE_FROM_RECEIVER_ID(receiverProcessId); u8 timSetStatus = CSR_WIFI_GET_TIMSET_STATE_FROM_RECEIVER_ID(receiverProcessId); u16 interfaceTag = (cfm->VirtualInterfaceIdentifier & 0xff); netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; CsrWifiRouterCtrlStaInfo_t *staRecord = NULL; /* This variable holds what TIM value we wanted to set in firmware */ u16 timSetValue = 0; /* Irrespective of interface the count maintained */ static u8 retryCount = 0; unsigned long lock_flags; unifi_trace(priv, UDBG3, "entering %s, handle = %x, timSetStatus = %x\n", __FUNCTION__, handle, timSetStatus); if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) { unifi_warning(priv, "bad interfaceTag = %x\n", interfaceTag); return; } if ((handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) && (handle >= UNIFI_MAX_CONNECTIONS)) { unifi_warning(priv, "bad station Handle = %x\n", handle); return; } if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) { spin_lock_irqsave(&priv->staRecord_lock,lock_flags); if ((staRecord = ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[handle]))) == NULL) { spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); unifi_warning(priv, "uf_handle_tim_cfm: station record is NULL handle = %x\n", handle); return; } spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } switch(timSetStatus) { case CSR_WIFI_TIM_SETTING: timSetValue = CSR_WIFI_TIM_SET; break; case CSR_WIFI_TIM_RESETTING: timSetValue = CSR_WIFI_TIM_RESET; break; default: unifi_warning(priv, "timSet state is %x: Debug\n", timSetStatus); return; } /* check TIM confirm for success/failures */ switch(cfm->ResultCode) { case CSR_RC_SUCCESS: if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) { /* Unicast frame & station record available */ if (timSetStatus == staRecord->timSet) { staRecord->timSet = timSetValue; /* fh_cmd_q can also be full at some point of time!, * resetting count as queue is cleaned by firmware at this point */ retryCount = 0; unifi_trace(priv, UDBG2, "tim (%s) successfully in firmware\n", (timSetValue)?"SET":"RESET"); } else { unifi_trace(priv, UDBG3, "receiver processID = %x, success: request & confirm states are not matching in TIM cfm: Debug status = %x, staRecord->timSet = %x, handle = %x\n", receiverProcessId, timSetStatus, staRecord->timSet, handle); } /* Reset TIM pending flag to send next TIM request */ staRecord->timRequestPendingFlag = FALSE; /* Make sure that one more UpdateTim request is queued, if Queued its value * should be CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET */ if (0xFF != staRecord->updateTimReqQueued) { /* Process the UpdateTim Request which is queued while previous UpdateTim was in progress */ if (staRecord->timSet != staRecord->updateTimReqQueued) { unifi_trace(priv, UDBG2, "uf_handle_tim_cfm : Processing Queued UpdateTimReq \n"); update_tim(priv, staRecord->aid, staRecord->updateTimReqQueued, interfaceTag, handle); staRecord->updateTimReqQueued = 0xFF; } } } else { interfacePriv->bcTimSet = timSetValue; /* fh_cmd_q can also be full at some point of time!, * resetting count as queue is cleaned by firmware at this point */ retryCount = 0; unifi_trace(priv, UDBG3, "tim (%s) successfully for broadcast frame in firmware\n", (timSetValue)?"SET":"RESET"); /* Reset DTIM pending flag to send next DTIM request */ interfacePriv->bcTimSetReqPendingFlag = FALSE; /* Make sure that one more UpdateDTim request is queued, if Queued its value * should be CSR_WIFI_TIM_SET or CSR_WIFI_TIM_RESET */ if (0xFF != interfacePriv->bcTimSetReqQueued) { /* Process the UpdateTim Request which is queued while previous UpdateTim was in progress */ if (interfacePriv->bcTimSet != interfacePriv->bcTimSetReqQueued) { unifi_trace(priv, UDBG2, "uf_handle_tim_cfm : Processing Queued UpdateDTimReq \n"); update_tim(priv, 0, interfacePriv->bcTimSetReqQueued, interfaceTag, 0xFFFFFFFF); interfacePriv->bcTimSetReqQueued = 0xFF; } } } break; case CSR_RC_INVALID_PARAMETERS: case CSR_RC_INSUFFICIENT_RESOURCE: /* check for max retry limit & send again * MAX_RETRY_LIMIT is not maintained for each set of transactions..Its generic * If failure crosses this Limit, we have to take a call to FIX */ if (retryCount > UNIFI_MAX_RETRY_LIMIT) { u8 moreData = FALSE; retryCount = 0; /* Because of continuos traffic in fh_cmd_q the tim set request is failing (exceeding retry limit) * but if we didn't synchronize our timSet varible state with firmware then it can cause below issues * cond 1. We want to SET tim in firmware if its fails & max retry limit reached * -> If host set's the timSet to 1, we wont try to send(as max retry reached) update tim but * firmware is not updated with queue(TIM) status so it wont set TIM in beacon finally host start piling * up data & wont try to set tim in firmware (This can cause worser performance) * cond 2. We want to reset tim in firmware it fails & reaches max retry limit * -> If host sets the timSet to Zero, it wont try to set a TIM request unless we wont have any packets * to be queued, so beacon unnecessarily advertizes the TIM */ if(staRecord) { if(!staRecord->wmmOrQosEnabled) { moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) || !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]) || !list_empty(&staRecord->mgtFrames)); } else { /* Peer is QSTA */ u8 allDeliveryEnabled = 0, dataAvailable = 0; /* Check if all AC's are Delivery Enabled */ is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable); /*check for more data in non-delivery enabled queues*/ moreData = (uf_is_more_data_for_non_delivery_ac(staRecord) || (allDeliveryEnabled && dataAvailable)); } /* To avoid cond 1 & 2, check internal Queues status, if we have more Data then set RESET the timSet(0), * so we are trying to be in sync with firmware & next packets before queuing atleast try to * set TIM in firmware otherwise it SET timSet(1) */ if (moreData) { staRecord->timSet = CSR_WIFI_TIM_RESET; } else { staRecord->timSet = CSR_WIFI_TIM_SET; } } else { /* Its a broadcast frames */ moreData = (!list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) || !list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)); if (moreData) { update_tim(priv, 0, CSR_WIFI_TIM_SET, interfaceTag, 0xFFFFFFFF); } else { update_tim(priv, 0, CSR_WIFI_TIM_RESET, interfaceTag, 0xFFFFFFFF); } } unifi_error(priv, "no of error's for TIM setting crossed the Limit: verify\n"); return; } retryCount++; if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) { if (timSetStatus == staRecord->timSet) { unifi_warning(priv, "tim request failed, retry for AID = %x\n", staRecord->aid); update_tim(priv, staRecord->aid, timSetValue, interfaceTag, handle); } else { unifi_trace(priv, UDBG1, "failure: request & confirm states are not matching in TIM cfm: Debug status = %x, staRecord->timSet = %x\n", timSetStatus, staRecord->timSet); } } else { unifi_warning(priv, "tim request failed, retry for broadcast frames\n"); update_tim(priv, 0, timSetValue, interfaceTag, 0xFFFFFFFF); } break; default: unifi_warning(priv, "tim update request failed resultcode = %x\n", cfm->ResultCode); } unifi_trace(priv, UDBG2, "leaving %s\n", __FUNCTION__); } /* * --------------------------------------------------------------------------- * update_tim * * * This function updates tim status in firmware for AID[1 to UNIFI_MAX_CONNECTIONS] or * AID[0] for broadcast/multicast packets. * * NOTE: The LSB (least significant BYTE) of senderId while sending this MLME premitive * has been modified(utilized) as below * * SenderID in signal's SignalPrimitiveHeader is 2 byte the lowe byte bitmap is below * * station handle(6 bits) timSet Status (2 bits) * --------------------- ---------------------- * 0 0 0 0 0 0 | 0 0 * * timSet Status can be one of below: * * CSR_WIFI_TIM_RESET * CSR_WIFI_TIM_RESETTING * CSR_WIFI_TIM_SET * CSR_WIFI_TIM_SETTING * * Arguments: * priv Pointer to device private context struct * aid can be 1 t0 UNIFI_MAX_CONNECTIONS & 0 means multicast/broadcast * setTim value SET(1) / RESET(0) * interfaceTag the interfaceID on which activity going on * handle from (0 <= handle < UNIFI_MAX_CONNECTIONS) * * --------------------------------------------------------------------------- */ void update_tim(unifi_priv_t * priv, u16 aid, u8 setTim, u16 interfaceTag, u32 handle) { CSR_SIGNAL signal; s32 r; CSR_MLME_SET_TIM_REQUEST *req = &signal.u.MlmeSetTimRequest; bulk_data_param_t *bulkdata = NULL; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; u8 senderIdLsb = 0; CsrWifiRouterCtrlStaInfo_t *staRecord = NULL; u32 oldTimSetStatus = 0, timSetStatus = 0; unifi_trace(priv, UDBG5, "entering the update_tim routine\n"); if (handle == 0xFFFFFFFF) { handle &= CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE; if (setTim == interfacePriv->bcTimSet) { unifi_trace(priv, UDBG3, "update_tim, Drop:Hdl=%x, timval=%d, globalTim=%d\n", handle, setTim, interfacePriv->bcTimSet); return; } } else if ((handle != 0xFFFFFFFF) && (handle >= UNIFI_MAX_CONNECTIONS)) { unifi_warning(priv, "bad station Handle = %x\n", handle); return; } if (setTim) { timSetStatus = CSR_WIFI_TIM_SETTING; } else { timSetStatus = CSR_WIFI_TIM_RESETTING; } if (handle != CSR_WIFI_BROADCAST_OR_MULTICAST_HANDLE) { if ((staRecord = ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[handle]))) == NULL) { unifi_warning(priv, "station record is NULL in update_tim: handle = %x :debug\n", handle); return; } /* In case of signal sending failed, revert back to old state */ oldTimSetStatus = staRecord->timSet; staRecord->timSet = timSetStatus; } /* pack senderID LSB */ senderIdLsb = CSR_WIFI_PACK_SENDER_ID_LSB_FOR_TIM_REQ(handle, timSetStatus); /* initialize signal to zero */ memset(&signal, 0, sizeof(CSR_SIGNAL)); /* Frame the MLME-SET-TIM request */ signal.SignalPrimitiveHeader.SignalId = CSR_MLME_SET_TIM_REQUEST_ID; signal.SignalPrimitiveHeader.ReceiverProcessId = 0; CSR_COPY_UINT16_TO_LITTLE_ENDIAN(((priv->netdev_client->sender_id & 0xff00) | senderIdLsb), (u8*)&signal.SignalPrimitiveHeader.SenderProcessId); /* set The virtual interfaceIdentifier, aid, tim value */ req->VirtualInterfaceIdentifier = uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag); req->AssociationId = aid; req->TimValue = setTim; unifi_trace(priv, UDBG2, "update_tim:AID %x,senderIdLsb = 0x%x, handle = 0x%x, timSetStatus = %x, sender proceesID = %x \n", aid,senderIdLsb, handle, timSetStatus, signal.SignalPrimitiveHeader.SenderProcessId); /* Send the signal to UniFi */ r = ul_send_signal_unpacked(priv, &signal, bulkdata); if (r) { /* No need to free bulk data, as TIM request doesn't carries any data */ unifi_error(priv, "Error queueing CSR_MLME_SET_TIM_REQUEST signal\n"); if (staRecord) { staRecord->timSet = oldTimSetStatus ; } else { /* MLME_SET_TIM.req sending failed here for AID0, so revert back our bcTimSet status */ interfacePriv->bcTimSet = !setTim; } } else { /* Update tim request pending flag and ensure no more TIM set requests are send for the same station until TIM confirm is received */ if (staRecord) { staRecord->timRequestPendingFlag = TRUE; } else { /* Update tim request (for AID 0) pending flag and ensure no more DTIM set requests are send * for the same station until TIM confirm is received */ interfacePriv->bcTimSetReqPendingFlag = TRUE; } } unifi_trace(priv, UDBG5, "leaving the update_tim routine\n"); } static void process_peer_active_transition(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t *staRecord, u16 interfaceTag) { int r,i; u8 spaceAvail[4] = {TRUE,TRUE,TRUE,TRUE}; tx_buffered_packets_t * buffered_pkt = NULL; unsigned long lock_flags; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unifi_trace(priv, UDBG5, "entering process_peer_active_transition\n"); if(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) { /* giving more priority to multicast packets so delaying unicast packets*/ unifi_trace(priv,UDBG2, "Multicast transmission is going on so resume unicast transmission after DTIM over\n"); /* As station is active now, even though AP is not able to send frames to it * because of DTIM, it needs to reset the TIM here */ if (!staRecord->timRequestPendingFlag){ if((staRecord->timSet == CSR_WIFI_TIM_SET) || (staRecord->timSet == CSR_WIFI_TIM_SETTING)){ update_tim(priv, staRecord->aid, 0, interfaceTag, staRecord->assignedHandle); } } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 0; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } return; } while((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->mgtFrames))) { buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,0,FALSE)) == -ENOSPC) { unifi_trace(priv, UDBG2, "p_p_a_t:(ENOSPC) Mgt Frame queueing \n"); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->mgtFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[3]=(u8)(staRecord->assignedHandle); spaceAvail[3] = FALSE; break; } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } if (!staRecord->timRequestPendingFlag) { if (staRecord->txSuspend) { if(staRecord->timSet == CSR_WIFI_TIM_SET) { update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle); } return; } } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 0; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } for(i=3;i>=0;i--) { if(!spaceAvail[i]) continue; unifi_trace(priv, UDBG6, "p_p_a_t:data pkt sending for AC %d \n",i); while((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[i]))) { buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,0,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->dataPdu[i]); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[i]=(u8)(staRecord->assignedHandle); break; } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } } if (!staRecord->timRequestPendingFlag){ if((staRecord->timSet == CSR_WIFI_TIM_SET) || (staRecord->timSet == CSR_WIFI_TIM_SETTING)) { unifi_trace(priv, UDBG3, "p_p_a_t:resetting tim .....\n"); update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle); } } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 0; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } unifi_trace(priv, UDBG5, "leaving process_peer_active_transition\n"); } void uf_process_ma_pkt_cfm_for_ap(unifi_priv_t *priv,u16 interfaceTag, const CSR_MA_PACKET_CONFIRM *pkt_cfm) { netInterface_priv_t *interfacePriv; u8 i; CsrWifiRouterCtrlStaInfo_t *staRecord = NULL; interfacePriv = priv->interfacePriv[interfaceTag]; if(pkt_cfm->HostTag == interfacePriv->multicastPduHostTag) { unifi_trace(priv,UDBG2,"CFM for marked Multicast Tag = %x\n",interfacePriv->multicastPduHostTag); interfacePriv->multicastPduHostTag = 0xffffffff; resume_suspended_uapsd(priv,interfaceTag); resume_unicast_buffered_frames(priv,interfaceTag); if(list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) && list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)) { unifi_trace(priv,UDBG1,"Resetting multicastTIM"); if (!interfacePriv->bcTimSetReqPendingFlag) { update_tim(priv,0,CSR_WIFI_TIM_RESET,interfaceTag, 0xFFFFFFFF); } else { /* Cache the DTimSet value so that it will processed immidiatly after * completing the current setDTim Request */ interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_RESET; unifi_trace(priv, UDBG2, "uf_process_ma_pkt_cfm_for_ap : One more UpdateDTim Request(%d) Queued \n", interfacePriv->bcTimSetReqQueued); } } return; } /* Check if it is a Confirm for null data frame used * for probing station activity */ for(i =0; i < UNIFI_MAX_CONNECTIONS; i++) { staRecord = (CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i]); if (staRecord && (staRecord->nullDataHostTag == pkt_cfm->HostTag)) { unifi_trace(priv, UDBG1, "CFM for Inactive probe Null frame (tag = %x, status = %d)\n", pkt_cfm->HostTag, pkt_cfm->TransmissionStatus ); staRecord->nullDataHostTag = INVALID_HOST_TAG; if(pkt_cfm->TransmissionStatus == CSR_TX_RETRY_LIMIT){ u32 now; u32 inactive_time; unifi_trace(priv, UDBG1, "Nulldata to probe STA ALIVE Failed with retry limit\n"); /* Recheck if there is some activity after null data is sent. * * If still there is no activity then send a disconnected indication * to SME to delete the station record. */ if (staRecord->activity_flag){ return; } now = CsrTimeGet(NULL); if (staRecord->lastActivity > now) { /* simple timer wrap (for 1 wrap) */ inactive_time = CsrTimeAdd((u32)CsrTimeSub(CSR_SCHED_TIME_MAX, staRecord->lastActivity), now); } else { inactive_time = (u32)CsrTimeSub(now, staRecord->lastActivity); } if (inactive_time >= STA_INACTIVE_TIMEOUT_VAL) { struct list_head send_cfm_list; u8 j; /* The SME/NME may be waiting for confirmation for requested frames to this station. * Though this is --VERY UNLIKELY-- in case of station in active mode. But still as a * a defensive check, it loops through buffered frames for this station and if confirmation * is requested, send auto confirmation with failure status. Also flush the frames so * that these are not processed again in PEER_DEL_REQ handler. */ INIT_LIST_HEAD(&send_cfm_list); uf_prepare_send_cfm_list_for_queued_pkts(priv, &send_cfm_list, &(staRecord->mgtFrames)); uf_flush_list(priv, &(staRecord->mgtFrames)); for(j = 0; j < MAX_ACCESS_CATOGORY; j++){ uf_prepare_send_cfm_list_for_queued_pkts(priv, &send_cfm_list, &(staRecord->dataPdu[j])); uf_flush_list(priv,&(staRecord->dataPdu[j])); } send_auto_ma_packet_confirm(priv, staRecord->interfacePriv, &send_cfm_list); unifi_warning(priv, "uf_process_ma_pkt_cfm_for_ap: Router Disconnected IND Peer (%x-%x-%x-%x-%x-%x)\n", staRecord->peerMacAddress.a[0], staRecord->peerMacAddress.a[1], staRecord->peerMacAddress.a[2], staRecord->peerMacAddress.a[3], staRecord->peerMacAddress.a[4], staRecord->peerMacAddress.a[5]); CsrWifiRouterCtrlConnectedIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, staRecord->interfacePriv->InterfaceTag, staRecord->peerMacAddress, CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED); } } else if (pkt_cfm->TransmissionStatus == CSR_TX_SUCCESSFUL) { staRecord->activity_flag = TRUE; } } } } #endif u16 uf_get_vif_identifier (CsrWifiRouterCtrlMode mode, u16 tag) { switch(mode) { case CSR_WIFI_ROUTER_CTRL_MODE_STA: case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI: return (0x02<<8|tag); case CSR_WIFI_ROUTER_CTRL_MODE_AP: case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO: return (0x03<<8|tag); case CSR_WIFI_ROUTER_CTRL_MODE_IBSS: return (0x01<<8|tag); case CSR_WIFI_ROUTER_CTRL_MODE_MONITOR: return (0x04<<8|tag); case CSR_WIFI_ROUTER_CTRL_MODE_AMP: return (0x05<<8|tag); default: return tag; } } #ifdef CSR_SUPPORT_SME /* * --------------------------------------------------------------------------- * update_macheader * * * These functions updates mac header for intra BSS packet * routing. * NOTE: This function always has to be called in rx context which * is in bh thread context since GFP_KERNEL is used. In soft IRQ/ Interrupt * context shouldn't be used * * Arguments: * priv Pointer to device private context struct * skb Socket buffer containing data packet to transmit * newSkb Socket buffer containing data packet + Mac header if no sufficient headroom in skb * priority to append QOS control header in Mac header * bulkdata if newSkb allocated then bulkdata updated to send to unifi * interfaceTag the interfaceID on which activity going on * macHeaderLengthInBytes no. of bytes of mac header in received frame * qosDestination used to append Qos control field * * Returns: * Zero on success or -1 on error. * --------------------------------------------------------------------------- */ static int update_macheader(unifi_priv_t *priv, struct sk_buff *skb, struct sk_buff *newSkb, CSR_PRIORITY *priority, bulk_data_param_t *bulkdata, u16 interfaceTag, u8 macHeaderLengthInBytes, u8 qosDestination) { u16 *fc = NULL; u8 direction = 0, toDs, fromDs; u8 *bufPtr = NULL; u8 sa[ETH_ALEN], da[ETH_ALEN]; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; int headroom; u8 macHeaderBuf[IEEE802_11_DATA_FRAME_MAC_HEADER_SIZE] = {0}; unifi_trace(priv, UDBG5, "entering the update_macheader function\n"); /* temporary buffer for the Mac header storage */ memcpy(macHeaderBuf, skb->data, macHeaderLengthInBytes); /* remove the Macheader from the skb */ skb_pull(skb, macHeaderLengthInBytes); /* get the skb headroom for skb_push check */ headroom = skb_headroom(skb); /* pointer to frame control field */ fc = (u16*) macHeaderBuf; toDs = (*fc & cpu_to_le16(IEEE802_11_FC_TO_DS_MASK))?1 : 0; fromDs = (*fc & cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK))? 1: 0; unifi_trace(priv, UDBG5, "In update_macheader function, fromDs = %x, toDs = %x\n", fromDs, toDs); direction = ((fromDs | (toDs << 1)) & 0x3); /* Address1 or 3 from the macheader */ memcpy(da, macHeaderBuf+4+toDs*12, ETH_ALEN); /* Address2, 3 or 4 from the mac header */ memcpy(sa, macHeaderBuf+10+fromDs*(6+toDs*8), ETH_ALEN); unifi_trace(priv, UDBG3, "update_macheader:direction = %x\n", direction); /* update the toDs, fromDs & address fields in Mac header */ switch(direction) { case 2: /* toDs = 1 & fromDs = 0 , toAp when frames received from peer * while sending this packet to Destination the Mac header changed * as fromDs = 1 & toDs = 0, fromAp */ *fc &= cpu_to_le16(~IEEE802_11_FC_TO_DS_MASK); *fc |= cpu_to_le16(IEEE802_11_FC_FROM_DS_MASK); /* Address1: MAC address of the actual destination (4 = 2+2) */ memcpy(macHeaderBuf + 4, da, ETH_ALEN); /* Address2: The MAC address of the AP (10 = 2+2+6) */ memcpy(macHeaderBuf + 10, &interfacePriv->bssid, ETH_ALEN); /* Address3: MAC address of the actual source from mac header (16 = 2+2+6+6) */ memcpy(macHeaderBuf + 16, sa, ETH_ALEN); break; case 3: unifi_trace(priv, UDBG3, "when both the toDs & fromDS set, NOT SUPPORTED\n"); break; default: unifi_trace(priv, UDBG3, "problem in decoding packet in update_macheader \n"); return -1; } /* frameType is Data always, Validation is done before calling this function */ /* check for the souce station type */ switch(le16_to_cpu(*fc) & IEEE80211_FC_SUBTYPE_MASK) { case IEEE802_11_FC_TYPE_QOS_DATA & IEEE80211_FC_SUBTYPE_MASK: /* No need to modify the qos control field */ if (!qosDestination) { /* If source Sta is QOS enabled & if this bit set, then HTC is supported by * peer station & htc field present in macHeader */ if (*fc & cpu_to_le16(IEEE80211_FC_ORDER_MASK)) { /* HT control field present in Mac header * 6 = sizeof(qosControl) + sizeof(htc) */ macHeaderLengthInBytes -= 6; } else { macHeaderLengthInBytes -= 2; } /* Destination STA is non qos so change subtype to DATA */ *fc &= cpu_to_le16(~IEEE80211_FC_SUBTYPE_MASK); *fc |= cpu_to_le16(IEEE802_11_FC_TYPE_DATA); /* remove the qos control field & HTC(if present). new macHeaderLengthInBytes is less than old * macHeaderLengthInBytes so no need to verify skb headroom */ if (headroom < macHeaderLengthInBytes) { unifi_trace(priv, UDBG1, " sufficient headroom not there to push updated mac header \n"); return -1; } bufPtr = (u8 *) skb_push(skb, macHeaderLengthInBytes); /* update bulk data os_data_ptr */ bulkdata->d[0].os_data_ptr = skb->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb; bulkdata->d[0].data_length = skb->len; } else { /* pointing to QOS control field */ u8 qc; if (*fc & cpu_to_le16(IEEE80211_FC_ORDER_MASK)) { qc = *((u8*)(macHeaderBuf + (macHeaderLengthInBytes - 4 - 2))); } else { qc = *((u8*)(macHeaderBuf + (macHeaderLengthInBytes - 2))); } if ((qc & IEEE802_11_QC_TID_MASK) > 7) { *priority = 7; } else { *priority = qc & IEEE802_11_QC_TID_MASK; } unifi_trace(priv, UDBG1, "Incoming packet priority from QSTA is %x\n", *priority); if (headroom < macHeaderLengthInBytes) { unifi_trace(priv, UDBG3, " sufficient headroom not there to push updated mac header \n"); return -1; } bufPtr = (u8 *) skb_push(skb, macHeaderLengthInBytes); } break; default: { bulk_data_param_t data_ptrs; CsrResult csrResult; unifi_trace(priv, UDBG5, "normal Data packet, NO QOS \n"); if (qosDestination) { u8 qc = 0; unifi_trace(priv, UDBG3, "destination is QOS station \n"); /* Set Ma-Packet.req UP to UP0 */ *priority = CSR_QOS_UP0; /* prepare the qos control field */ qc |= CSR_QOS_UP0; /* no Amsdu is in ap buffer so eosp is left 0 */ if (da[0] & 0x1) { /* multicast/broadcast frames, no acknowledgement needed */ qc |= 1 << 5; } /* update new Mac header Length with 2 = sizeof(qos control) */ macHeaderLengthInBytes += 2; /* received DATA frame but destiantion is QOS station so update subtype to QOS*/ *fc &= cpu_to_le16(~IEEE80211_FC_SUBTYPE_MASK); *fc |= cpu_to_le16(IEEE802_11_FC_TYPE_QOS_DATA); /* appendQosControlOffset = macHeaderLengthInBytes - 2, since source sta is not QOS */ macHeaderBuf[macHeaderLengthInBytes - 2] = qc; /* txopLimit is 0 */ macHeaderBuf[macHeaderLengthInBytes - 1] = 0; if (headroom < macHeaderLengthInBytes) { csrResult = unifi_net_data_malloc(priv, &data_ptrs.d[0], skb->len + macHeaderLengthInBytes); if (csrResult != CSR_RESULT_SUCCESS) { unifi_error(priv, " failed to allocate request_data. in update_macheader func\n"); return -1; } newSkb = (struct sk_buff *)(data_ptrs.d[0].os_net_buf_ptr); newSkb->len = skb->len + macHeaderLengthInBytes; memcpy((void*)data_ptrs.d[0].os_data_ptr + macHeaderLengthInBytes, skb->data, skb->len); bulkdata->d[0].os_data_ptr = newSkb->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)newSkb; bulkdata->d[0].data_length = newSkb->len; bufPtr = (u8*)data_ptrs.d[0].os_data_ptr; /* The old skb will not be used again */ kfree_skb(skb); } else { /* skb headroom is sufficient to append Macheader */ bufPtr = (u8*)skb_push(skb, macHeaderLengthInBytes); bulkdata->d[0].os_data_ptr = skb->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb; bulkdata->d[0].data_length = skb->len; } } else { unifi_trace(priv, UDBG3, "destination is not a QSTA\n"); if (headroom < macHeaderLengthInBytes) { csrResult = unifi_net_data_malloc(priv, &data_ptrs.d[0], skb->len + macHeaderLengthInBytes); if (csrResult != CSR_RESULT_SUCCESS) { unifi_error(priv, " failed to allocate request_data. in update_macheader func\n"); return -1; } newSkb = (struct sk_buff *)(data_ptrs.d[0].os_net_buf_ptr); newSkb->len = skb->len + macHeaderLengthInBytes; memcpy((void*)data_ptrs.d[0].os_data_ptr + macHeaderLengthInBytes, skb->data, skb->len); bulkdata->d[0].os_data_ptr = newSkb->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)newSkb; bulkdata->d[0].data_length = newSkb->len; bufPtr = (u8*)data_ptrs.d[0].os_data_ptr; /* The old skb will not be used again */ kfree_skb(skb); } else { /* skb headroom is sufficient to append Macheader */ bufPtr = (u8*)skb_push(skb, macHeaderLengthInBytes); bulkdata->d[0].os_data_ptr = skb->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skb; bulkdata->d[0].data_length = skb->len; } } } } /* prepare the complete skb, by pushing the MAC header to the beginning of the skb->data */ unifi_trace(priv, UDBG5, "updated Mac Header: %d \n",macHeaderLengthInBytes); memcpy(bufPtr, macHeaderBuf, macHeaderLengthInBytes); unifi_trace(priv, UDBG5, "leaving the update_macheader function\n"); return 0; } /* * --------------------------------------------------------------------------- * uf_ap_process_data_pdu * * * Takes care of intra BSS admission control & routing packets within BSS * * Arguments: * priv Pointer to device private context struct * skb Socket buffer containing data packet to transmit * ehdr ethernet header to fetch priority of packet * srcStaInfo source stations record for connection verification * packed_signal * signal_len * signal MA-PACKET.indication signal * bulkdata if newSkb allocated then bulkdata updated to send to unifi * macHeaderLengthInBytes no. of bytes of mac header in received frame * * Returns: * Zero on success(ap processing complete) or -1 if packet also have to be sent to NETDEV. * --------------------------------------------------------------------------- */ int uf_ap_process_data_pdu(unifi_priv_t *priv, struct sk_buff *skb, struct ethhdr *ehdr, CsrWifiRouterCtrlStaInfo_t * srcStaInfo, const CSR_SIGNAL *signal, bulk_data_param_t *bulkdata, u8 macHeaderLengthInBytes) { const CSR_MA_PACKET_INDICATION *ind = &(signal->u.MaPacketIndication); u16 interfaceTag = (ind->VirtualInterfaceIdentifier & 0x00ff); struct sk_buff *newSkb = NULL; /* pointer to skb or private skb created using skb_copy() */ struct sk_buff *skbPtr = skb; u8 sendToNetdev = FALSE; u8 qosDestination = FALSE; CSR_PRIORITY priority = CSR_CONTENTION; CsrWifiRouterCtrlStaInfo_t *dstStaInfo = NULL; netInterface_priv_t *interfacePriv; unifi_trace(priv, UDBG5, "entering uf_ap_process_data_pdu %d\n",macHeaderLengthInBytes); /* InterfaceTag validation from MA_PACKET.indication */ if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) { unifi_trace(priv, UDBG1, "Interface Tag is Invalid in uf_ap_process_data_pdu\n"); unifi_net_data_free(priv, &bulkdata->d[0]); return 0; } interfacePriv = priv->interfacePriv[interfaceTag]; if((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO) && (interfacePriv->intraBssEnabled == FALSE)) { unifi_trace(priv, UDBG2, "uf_ap_process_data_pdu:P2P GO intrabssEnabled?= %d\n", interfacePriv->intraBssEnabled); /*In P2P GO case, if intraBSS distribution Disabled then don't do IntraBSS routing */ /* If destination in our BSS then drop otherwise give packet to netdev */ dstStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, ehdr->h_dest, interfaceTag); if (dstStaInfo) { unifi_net_data_free(priv, &bulkdata->d[0]); return 0; } /* May be associated P2PCLI trying to send the packets on backbone (Netdev) */ return -1; } if(!memcmp(ehdr->h_dest, interfacePriv->bssid.a, ETH_ALEN)) { /* This packet will be given to the TCP/IP stack since this packet is for us(AP) * No routing needed */ unifi_trace(priv, UDBG4, "destination address is csr_ap\n"); return -1; } /* fetch the destination record from station record database */ dstStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, ehdr->h_dest, interfaceTag); /* AP mode processing, & if packet is unicast */ if(!dstStaInfo) { if (!(ehdr->h_dest[0] & 0x1)) { /* destination not in station record & its a unicast packet, so pass the packet to network stack */ unifi_trace(priv, UDBG3, "unicast frame & destination record not exist, send to netdev proto = %x\n", htons(skb->protocol)); return -1; } else { /* packet is multicast/broadcast */ /* copy the skb to skbPtr, send skb to netdev & skbPtr to multicast/broad cast list */ unifi_trace(priv, UDBG5, "skb_copy, in uf_ap_process_data_pdu, protocol = %x\n", htons(skb->protocol)); skbPtr = skb_copy(skb, GFP_KERNEL); if(skbPtr == NULL) { /* We don't have memory to don't send the frame in BSS*/ unifi_notice(priv, "broacast/multicast frame can't be sent in BSS No memeory: proto = %x\n", htons(skb->protocol)); return -1; } sendToNetdev = TRUE; } } else { /* validate the Peer & Destination Station record */ if (uf_process_station_records_for_sending_data(priv, interfaceTag, srcStaInfo, dstStaInfo)) { unifi_notice(priv, "uf_ap_process_data_pdu: station record validation failed \n"); interfacePriv->stats.rx_errors++; unifi_net_data_free(priv, &bulkdata->d[0]); return 0; } } /* BroadCast packet received and it's been sent as non QOS packets. * Since WMM spec not mandates broadcast/multicast to be sent as QOS data only, * if all Peers are QSTA */ if(sendToNetdev) { /* BroadCast packet and it's been sent as non QOS packets */ qosDestination = FALSE; } else if(dstStaInfo && (dstStaInfo->wmmOrQosEnabled == TRUE)) { qosDestination = TRUE; } unifi_trace(priv, UDBG3, "uf_ap_process_data_pdu QoS destination = %s\n", (qosDestination)? "TRUE": "FALSE"); /* packet is allowed to send to unifi, update the Mac header */ if (update_macheader(priv, skbPtr, newSkb, &priority, bulkdata, interfaceTag, macHeaderLengthInBytes, qosDestination)) { interfacePriv->stats.rx_errors++; unifi_notice(priv, "(Packet Drop) failed to update the Mac header in uf_ap_process_data_pdu\n"); if (sendToNetdev) { /* Free's the skb_copy(skbPtr) data since packet processing failed */ bulkdata->d[0].os_data_ptr = skbPtr->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skbPtr; bulkdata->d[0].data_length = skbPtr->len; unifi_net_data_free(priv, &bulkdata->d[0]); } return -1; } unifi_trace(priv, UDBG3, "Mac Header updated...calling uf_process_ma_packet_req \n"); /* Packet is ready to send to unifi ,transmissionControl = 0x0004, confirmation is not needed for data packets */ if (uf_process_ma_packet_req(priv, ehdr->h_dest, 0xffffffff, interfaceTag, CSR_NO_CONFIRM_REQUIRED, (CSR_RATE)0,priority, priv->netdev_client->sender_id, bulkdata)) { if (sendToNetdev) { unifi_trace(priv, UDBG1, "In uf_ap_process_data_pdu, (Packet Drop) uf_process_ma_packet_req failed. freeing skb_copy data (original data sent to Netdev)\n"); /* Free's the skb_copy(skbPtr) data since packet processing failed */ bulkdata->d[0].os_data_ptr = skbPtr->data; bulkdata->d[0].os_net_buf_ptr = (unsigned char*)skbPtr; bulkdata->d[0].data_length = skbPtr->len; unifi_net_data_free(priv, &bulkdata->d[0]); } else { /* This free's the skb data */ unifi_trace(priv, UDBG1, "In uf_ap_process_data_pdu, (Packet Drop). Unicast data so freeing original skb \n"); unifi_net_data_free(priv, &bulkdata->d[0]); } } unifi_trace(priv, UDBG5, "leaving uf_ap_process_data_pdu\n"); if (sendToNetdev) { /* The packet is multicast/broadcast, so after AP processing packet has to * be sent to netdev, if peer port state is open */ unifi_trace(priv, UDBG4, "Packet will be routed to NetDev\n"); return -1; } /* Ap handled the packet & its a unicast packet, no need to send to netdev */ return 0; } #endif CsrResult uf_process_ma_packet_req(unifi_priv_t *priv, u8 *peerMacAddress, CSR_CLIENT_TAG hostTag, u16 interfaceTag, CSR_TRANSMISSION_CONTROL transmissionControl, CSR_RATE TransmitRate, CSR_PRIORITY priority, CSR_PROCESS_ID leSenderProcessId, bulk_data_param_t *bulkdata) { CsrResult status = CSR_RESULT_SUCCESS; CSR_SIGNAL signal; int result; #ifdef CSR_SUPPORT_SME CsrWifiRouterCtrlStaInfo_t *staRecord = NULL; const u8 *macHdrLocation = bulkdata->d[0].os_data_ptr; CsrWifiPacketType pktType; int frameType = 0; u8 queuePacketDozing = FALSE; u32 priority_q; u16 frmCtrl; struct list_head * list = NULL; /* List to which buffered PDUs are to be enqueued*/ u8 setBcTim=FALSE; netInterface_priv_t *interfacePriv; u8 requeueOnSamePos = FALSE; u32 handle = 0xFFFFFFFF; unsigned long lock_flags; unifi_trace(priv, UDBG5, "entering uf_process_ma_packet_req, peer: %pMF\n", peerMacAddress); if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) { unifi_error(priv, "interfaceTag >= CSR_WIFI_NUM_INTERFACES, interfacetag = %d\n", interfaceTag); return CSR_RESULT_FAILURE; } interfacePriv = priv->interfacePriv[interfaceTag]; /* fetch the station record for corresponding peer mac address */ if ((staRecord = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, peerMacAddress, interfaceTag))) { handle = staRecord->assignedHandle; } /* Frame ma-packet.req, this is saved/transmitted depend on queue state */ unifi_frame_ma_packet_req(priv, priority, TransmitRate, hostTag, interfaceTag, transmissionControl, leSenderProcessId, peerMacAddress, &signal); /* Since it's common path between STA & AP mode, in case of STA packet * need not to be queued but in AP case we have to queue PDU's in * different scenarios */ switch(interfacePriv->interfaceMode) { case CSR_WIFI_ROUTER_CTRL_MODE_AP: case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO: /* For this mode processing done below */ break; default: /* In case of STA/IBSS/P2PCLI/AMP, no checks needed send the packet down & return */ unifi_trace(priv, UDBG5, "In %s, interface mode is %x \n", __FUNCTION__, interfacePriv->interfaceMode); if (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_NONE) { unifi_warning(priv, "In %s, interface mode NONE \n", __FUNCTION__); } if ((result = ul_send_signal_unpacked(priv, &signal, bulkdata))) { status = CSR_RESULT_FAILURE; } return status; } /* -----Only AP/P2pGO mode handling falls below----- */ /* convert priority to queue */ priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority); /* check the powersave status of the peer */ if (staRecord && (staRecord->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE)) { /* Peer is dozing & packet have to be delivered, so buffer the packet & * update the TIM */ queuePacketDozing = TRUE; } /* find the type of frame unicast or mulicast/broadcast */ if (*peerMacAddress & 0x1) { /* Multicast/broadCast data are always triggered by vif_availability.ind * at the DTIM */ pktType = CSR_WIFI_MULTICAST_PDU; } else { pktType = CSR_WIFI_UNICAST_PDU; } /* Fetch the frame control field from mac header & check for frame type */ frmCtrl = CSR_GET_UINT16_FROM_LITTLE_ENDIAN(macHdrLocation); /* Processing done according to Frame/Packet type */ frameType = ((frmCtrl & 0x000c) >> FRAME_CONTROL_TYPE_FIELD_OFFSET); switch(frameType) { case IEEE802_11_FRAMETYPE_MANAGEMENT: switch(pktType) { case CSR_WIFI_UNICAST_PDU: unifi_trace(priv, UDBG5, "management unicast PDU in uf_process_ma_packet_req \n"); /* push the packet in to the queue with appropriate mgt list */ if (!staRecord) { /* push the packet to the unifi if list is empty (if packet lost how to re-enque) */ if (list_empty(&interfacePriv->genericMgtFrames)) { #ifdef CSR_SUPPORT_SME if(!(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) { #endif unifi_trace(priv, UDBG3, "genericMgtFrames list is empty uf_process_ma_packet_req \n"); result = ul_send_signal_unpacked(priv, &signal, bulkdata); /* reque only on ENOSPC */ if(result == -ENOSPC) { /* requeue the failed packet to genericMgtFrame with same position */ unifi_trace(priv, UDBG1, "(ENOSPC) Sending genericMgtFrames Failed so buffering\n"); list = &interfacePriv->genericMgtFrames; requeueOnSamePos = TRUE; } #ifdef CSR_SUPPORT_SME }else{ list = &interfacePriv->genericMgtFrames; unifi_trace(priv, UDBG3, "genericMgtFrames queue empty and dtim started\n hosttag is 0x%x,\n",signal.u.MaPacketRequest.HostTag); update_eosp_to_head_of_broadcast_list_head(priv,interfaceTag); } #endif } else { /* Queue the packet to genericMgtFrame of unifi_priv_t data structure */ list = &interfacePriv->genericMgtFrames; unifi_trace(priv, UDBG2, "genericMgtFrames queue not empty\n"); } } else { /* check peer power state */ if (queuePacketDozing || !list_empty(&staRecord->mgtFrames) || IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) { /* peer is in dozing mode, so queue packet in mgt frame list of station record */ /*if multicast traffic is going on, buffer the unicast packets*/ list = &staRecord->mgtFrames; unifi_trace(priv, UDBG1, "staRecord->MgtFrames list empty? = %s, handle = %d, queuePacketDozing = %d\n", (list_empty(&staRecord->mgtFrames))? "YES": "NO", staRecord->assignedHandle, queuePacketDozing); if(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)){ update_eosp_to_head_of_broadcast_list_head(priv,interfaceTag); } } else { unifi_trace(priv, UDBG5, "staRecord->mgtFrames list is empty uf_process_ma_packet_req \n"); result = ul_send_signal_unpacked(priv, &signal, bulkdata); if(result == -ENOSPC) { /* requeue the failed packet to staRecord->mgtFrames with same position */ list = &staRecord->mgtFrames; requeueOnSamePos = TRUE; unifi_trace(priv, UDBG1, "(ENOSPC) Sending MgtFrames Failed handle = %d so buffering\n",staRecord->assignedHandle); priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle); } else if (result) { status = CSR_RESULT_FAILURE; } } } break; case CSR_WIFI_MULTICAST_PDU: unifi_trace(priv, UDBG5, "management multicast/broadcast PDU in uf_process_ma_packet_req 'QUEUE it' \n"); /* Queue the packet to genericMulticastOrBroadCastMgtFrames of unifi_priv_t data structure * will be sent when we receive VIF AVAILABILITY from firmware as part of DTIM */ list = &interfacePriv->genericMulticastOrBroadCastMgtFrames; if((interfacePriv->interfaceMode != CSR_WIFI_ROUTER_CTRL_MODE_IBSS) && (list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames))) { setBcTim=TRUE; } break; default: unifi_error(priv, "condition never meets: packet type unrecognized\n"); } break; case IEEE802_11_FRAMETYPE_DATA: switch(pktType) { case CSR_WIFI_UNICAST_PDU: unifi_trace(priv, UDBG5, "data unicast PDU in uf_process_ma_packet_req \n"); /* check peer power state, list status & peer port status */ if(!staRecord) { unifi_error(priv, "In %s unicast but staRecord = NULL\n", __FUNCTION__); return CSR_RESULT_FAILURE; } else if (queuePacketDozing || isRouterBufferEnabled(priv,priority_q)|| !list_empty(&staRecord->dataPdu[priority_q]) || IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) { /* peer is in dozing mode, so queue packet in mgt frame list of station record */ /* if multicast traffic is going on, buffet the unicast packets */ unifi_trace(priv, UDBG2, "Enqueued to staRecord->dataPdu[%d] queuePacketDozing=%d,\ Buffering enabled = %d \n", priority_q,queuePacketDozing,isRouterBufferEnabled(priv,priority_q)); list = &staRecord->dataPdu[priority_q]; } else { unifi_trace(priv, UDBG5, "staRecord->dataPdu[%d] list is empty uf_process_ma_packet_req \n", priority_q); /* Pdu allowed to send to unifi */ result = ul_send_signal_unpacked(priv, &signal, bulkdata); if(result == -ENOSPC) { /* requeue the failed packet to staRecord->dataPdu[priority_q] with same position */ unifi_trace(priv, UDBG1, "(ENOSPC) Sending Unicast DataPDU to queue %d Failed so buffering\n",priority_q); requeueOnSamePos = TRUE; list = &staRecord->dataPdu[priority_q]; priv->pausedStaHandle[priority_q]=(u8)(staRecord->assignedHandle); if(!isRouterBufferEnabled(priv,priority_q)) { unifi_error(priv,"Buffering Not enabled for queue %d \n",priority_q); } } else if (result) { status = CSR_RESULT_FAILURE; } } break; case CSR_WIFI_MULTICAST_PDU: unifi_trace(priv, UDBG5, "data multicast/broadcast PDU in uf_process_ma_packet_req \n"); /* Queue the packet to genericMulticastOrBroadCastFrames list of unifi_priv_t data structure * will be sent when we receive VIF AVAILABILITY from firmware as part of DTIM */ list = &interfacePriv->genericMulticastOrBroadCastFrames; if(list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)) { setBcTim = TRUE; } break; default: unifi_error(priv, "condition never meets: packet type un recognized\n"); } break; default: unifi_error(priv, "unrecognized frame type\n"); } if(list) { status = enque_tx_data_pdu(priv, bulkdata,list, &signal,requeueOnSamePos); /* Record no. of packet queued for each peer */ if (staRecord && (pktType == CSR_WIFI_UNICAST_PDU) && (!status)) { spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staRecord->noOfPktQueued++; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } else if ((pktType == CSR_WIFI_MULTICAST_PDU) && (!status)) { /* If broadcast Tim is set && queuing is successful, then only update TIM */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); interfacePriv->noOfbroadcastPktQueued++; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } } /* If broadcast Tim is set && queuing is successful, then only update TIM */ if(setBcTim && !status) { unifi_trace(priv, UDBG3, "tim set due to broadcast pkt\n"); if (!interfacePriv->bcTimSetReqPendingFlag) { update_tim(priv,0,CSR_WIFI_TIM_SET,interfaceTag, handle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_SET; unifi_trace(priv, UDBG2, "uf_process_ma_packet_req : One more UpdateDTim Request(:%d) Queued \n", interfacePriv->bcTimSetReqQueued); } } else if(staRecord && staRecord->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) { if(staRecord->timSet == CSR_WIFI_TIM_RESET || staRecord->timSet == CSR_WIFI_TIM_RESETTING) { if(!staRecord->wmmOrQosEnabled) { if(!list_empty(&staRecord->mgtFrames) || !list_empty(&staRecord->dataPdu[3]) || !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION])) { unifi_trace(priv, UDBG3, "tim set due to unicast pkt & peer in powersave\n"); if (!staRecord->timRequestPendingFlag){ update_tim(priv,staRecord->aid,1,interfaceTag, handle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 1; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } } } else { /* Check for non delivery enable(i.e trigger enable), all delivery enable & legacy AC for TIM update in firmware */ u8 allDeliveryEnabled = 0, dataAvailable = 0; /* Check if all AC's are Delivery Enabled */ is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable); if (uf_is_more_data_for_non_delivery_ac(staRecord) || (allDeliveryEnabled && dataAvailable) || (!list_empty(&staRecord->mgtFrames))) { if (!staRecord->timRequestPendingFlag) { update_tim(priv,staRecord->aid,1,interfaceTag, handle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 1; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } } } } } if((list) && (pktType == CSR_WIFI_UNICAST_PDU && !queuePacketDozing) && !(isRouterBufferEnabled(priv,priority_q)) && !(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) { unifi_trace(priv, UDBG2, "buffering cleared for queue = %d So resending buffered frames\n",priority_q); uf_send_buffered_frames(priv, priority_q); } unifi_trace(priv, UDBG5, "leaving uf_process_ma_packet_req \n"); return status; #else #ifdef CSR_NATIVE_LINUX if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) { unifi_error(priv, "interfaceTag >= CSR_WIFI_NUM_INTERFACES, interfacetag = %d\n", interfaceTag); return CSR_RESULT_FAILURE; } /* Frame ma-packet.req, this is saved/transmitted depend on queue state */ unifi_frame_ma_packet_req(priv, priority, TransmitRate, hostTag, interfaceTag, transmissionControl, leSenderProcessId, peerMacAddress, &signal); result = ul_send_signal_unpacked(priv, &signal, bulkdata); if (result) { return CSR_RESULT_FAILURE; } #endif return status; #endif } #ifdef CSR_SUPPORT_SME s8 uf_get_protection_bit_from_interfacemode(unifi_priv_t *priv, u16 interfaceTag, const u8 *daddr) { s8 protection = 0; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; switch(interfacePriv->interfaceMode) { case CSR_WIFI_ROUTER_CTRL_MODE_STA: case CSR_WIFI_ROUTER_CTRL_MODE_P2PCLI: case CSR_WIFI_ROUTER_CTRL_MODE_AMP: case CSR_WIFI_ROUTER_CTRL_MODE_IBSS: protection = interfacePriv->protect; break; case CSR_WIFI_ROUTER_CTRL_MODE_AP: case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO: { CsrWifiRouterCtrlStaInfo_t *dstStaInfo = NULL; if (daddr[0] & 0x1) { unifi_trace(priv, UDBG3, "broadcast/multicast packet in send_ma_pkt_request\n"); /* In this mode, the protect member of priv structure has an information of how * AP/P2PGO has started, & the member updated in set mode request for AP/P2PGO */ protection = interfacePriv->protect; } else { /* fetch the destination record from station record database */ dstStaInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, daddr, interfaceTag); if (!dstStaInfo) { unifi_trace(priv, UDBG3, "peer not found in station record in send_ma_pkt_request\n"); return -1; } protection = dstStaInfo->protection; } } break; default: unifi_trace(priv, UDBG2, "mode unknown in send_ma_pkt_request\n"); } return protection; } #endif #ifdef CSR_SUPPORT_SME u8 send_multicast_frames(unifi_priv_t *priv, u16 interfaceTag) { int r; tx_buffered_packets_t * buffered_pkt = NULL; u8 moreData = FALSE; u8 pduSent =0; unsigned long lock_flags; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; u32 hostTag = 0xffffffff; if(!isRouterBufferEnabled(priv,UNIFI_TRAFFIC_Q_VO)) { while((interfacePriv->dtimActive)&& (buffered_pkt=dequeue_tx_data_pdu(priv,&interfacePriv->genericMulticastOrBroadCastMgtFrames))) { buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK); moreData = (buffered_pkt->transmissionControl & TRANSMISSION_CONTROL_EOSP_MASK)?FALSE:TRUE; unifi_trace(priv,UDBG2,"DTIM Occurred for interface:sending Mgt packet %d\n",interfaceTag); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,NULL,moreData,FALSE)) == -ENOSPC) { unifi_trace(priv,UDBG1,"frame_and_send_queued_pdu failed with ENOSPC for host tag = %x\n", buffered_pkt->hostTag); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &interfacePriv->genericMulticastOrBroadCastMgtFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); break; } else { unifi_trace(priv,UDBG1,"send_multicast_frames: Send genericMulticastOrBroadCastMgtFrames (%x, %x)\n", buffered_pkt->hostTag, r); if(r) { unifi_net_data_free(priv, &buffered_pkt->bulkdata); } if(!moreData) { interfacePriv->dtimActive = FALSE; if(!r) { hostTag = buffered_pkt->hostTag; pduSent++; } else { send_vif_availibility_rsp(priv,uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag),CSR_RC_UNSPECIFIED_FAILURE); } } /* Buffered frame sent successfully */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); interfacePriv->noOfbroadcastPktQueued--; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); kfree(buffered_pkt); } } } if(!isRouterBufferEnabled(priv,UNIFI_TRAFFIC_Q_CONTENTION)) { while((interfacePriv->dtimActive)&& (buffered_pkt=dequeue_tx_data_pdu(priv,&interfacePriv->genericMulticastOrBroadCastFrames))) { buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK; moreData = (buffered_pkt->transmissionControl & TRANSMISSION_CONTROL_EOSP_MASK)?FALSE:TRUE; if((r=frame_and_send_queued_pdu(priv,buffered_pkt,NULL,moreData,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &interfacePriv->genericMulticastOrBroadCastFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); break; } else { if(r) { unifi_trace(priv,UDBG1,"send_multicast_frames: Send genericMulticastOrBroadCastFrame failed (%x, %x)\n", buffered_pkt->hostTag, r); unifi_net_data_free(priv, &buffered_pkt->bulkdata); } if(!moreData) { interfacePriv->dtimActive = FALSE; if(!r) { pduSent ++; hostTag = buffered_pkt->hostTag; } else { send_vif_availibility_rsp(priv,uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag),CSR_RC_UNSPECIFIED_FAILURE); } } /* Buffered frame sent successfully */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); interfacePriv->noOfbroadcastPktQueued--; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); kfree(buffered_pkt); } } } if((interfacePriv->dtimActive == FALSE)) { /* Record the host Tag*/ unifi_trace(priv,UDBG2,"send_multicast_frames: Recorded hostTag of EOSP packet: = 0x%x\n",hostTag); interfacePriv->multicastPduHostTag = hostTag; } return pduSent; } #endif void uf_process_ma_vif_availibility_ind(unifi_priv_t *priv,u8 *sigdata, u32 siglen) { #ifdef CSR_SUPPORT_SME CSR_SIGNAL signal; CSR_MA_VIF_AVAILABILITY_INDICATION *ind; int r; u16 interfaceTag; u8 pduSent =0; CSR_RESULT_CODE resultCode = CSR_RC_SUCCESS; netInterface_priv_t *interfacePriv; unifi_trace(priv, UDBG3, "uf_process_ma_vif_availibility_ind: Process signal 0x%.4X\n", *((u16*)sigdata)); r = read_unpack_signal(sigdata, &signal); if (r) { unifi_error(priv, "uf_process_ma_vif_availibility_ind: Received unknown signal 0x%.4X.\n", CSR_GET_UINT16_FROM_LITTLE_ENDIAN(sigdata)); return; } ind = &signal.u.MaVifAvailabilityIndication; interfaceTag=ind->VirtualInterfaceIdentifier & 0xff; if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) { unifi_error(priv, "in vif_availability_ind interfaceTag is wrong\n"); return; } interfacePriv = priv->interfacePriv[interfaceTag]; if(ind->Multicast) { if(list_empty(&interfacePriv->genericMulticastOrBroadCastFrames) && list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames)) { /* This condition can occur because of a potential race where the TIM is not yet reset as host is waiting for confirm but it is sent by firmware and DTIM occurs*/ unifi_notice(priv,"ma_vif_availibility_ind recevied for multicast but queues are empty%d\n",interfaceTag); send_vif_availibility_rsp(priv,ind->VirtualInterfaceIdentifier,CSR_RC_NO_BUFFERED_BROADCAST_MULTICAST_FRAMES); interfacePriv->dtimActive = FALSE; if(interfacePriv->multicastPduHostTag == 0xffffffff) { unifi_notice(priv,"ma_vif_availibility_ind recevied for multicast but queues are empty%d\n",interfaceTag); /* This may be an extra request in very rare race conditions but it is fine as it would atleast remove the potential lock up */ if (!interfacePriv->bcTimSetReqPendingFlag) { update_tim(priv,0,CSR_WIFI_TIM_RESET,interfaceTag, 0xFFFFFFFF); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_RESET; unifi_trace(priv, UDBG2, "uf_process_ma_vif_availibility_ind : One more UpdateDTim Request(%d) Queued \n", interfacePriv->bcTimSetReqQueued); } } return; } if(interfacePriv->dtimActive) { unifi_trace(priv,UDBG2,"DTIM Occurred for already active DTIM interface %d\n",interfaceTag); return; } else { unifi_trace(priv,UDBG2,"DTIM Occurred for interface %d\n",interfaceTag); if(list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)) { set_eosp_transmit_ctrl(priv,&interfacePriv->genericMulticastOrBroadCastMgtFrames); } else { set_eosp_transmit_ctrl(priv,&interfacePriv->genericMulticastOrBroadCastFrames); } } interfacePriv->dtimActive = TRUE; pduSent = send_multicast_frames(priv,interfaceTag); } else { unifi_error(priv,"Interface switching is not supported %d\n",interfaceTag); resultCode = CSR_RC_NOT_SUPPORTED; send_vif_availibility_rsp(priv,ind->VirtualInterfaceIdentifier,CSR_RC_NOT_SUPPORTED); } #endif } #ifdef CSR_SUPPORT_SME #define GET_ACTIVE_INTERFACE_TAG(priv) 0 static u8 uf_is_more_data_for_delivery_ac(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t *staRecord) { s8 i; for(i=UNIFI_TRAFFIC_Q_VO; i >= UNIFI_TRAFFIC_Q_BK; i--) { if(((staRecord->powersaveMode[i]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE) ||(staRecord->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)) &&(!list_empty(&staRecord->dataPdu[i]))) { unifi_trace(priv,UDBG2,"uf_is_more_data_for_delivery_ac: Data Available AC = %d\n", i); return TRUE; } } unifi_trace(priv,UDBG2,"uf_is_more_data_for_delivery_ac: Data NOT Available \n"); return FALSE; } static u8 uf_is_more_data_for_usp_delivery(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t *staRecord, unifi_TrafficQueue queue) { s8 i; for(i = queue; i >= UNIFI_TRAFFIC_Q_BK; i--) { if(((staRecord->powersaveMode[i]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE) ||(staRecord->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)) &&(!list_empty(&staRecord->dataPdu[i]))) { unifi_trace(priv,UDBG2,"uf_is_more_data_for_usp_delivery: Data Available AC = %d\n", i); return TRUE; } } unifi_trace(priv,UDBG2,"uf_is_more_data_for_usp_delivery: Data NOT Available \n"); return FALSE; } /* * --------------------------------------------------------------------------- * uf_send_buffered_data_from_delivery_ac * * This function takes care of * -> Parsing the delivery enabled queue & sending frame down to HIP * -> Setting EOSP=1 when USP to be terminated * -> Depending on MAX SP length services the USP * * NOTE:This function always called from uf_handle_uspframes_delivery(), Dont * call this function from any other location in code * * Arguments: * priv Pointer to device private context struct * vif interface specific HIP vif instance * staInfo peer for which UAPSD to be scheduled * queue AC from which Data to be sent in USP * txList access category for processing list * --------------------------------------------------------------------------- */ void uf_send_buffered_data_from_delivery_ac(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t * staInfo, u8 queue, struct list_head *txList) { u16 interfaceTag = GET_ACTIVE_INTERFACE_TAG(priv); tx_buffered_packets_t * buffered_pkt = NULL; unsigned long lock_flags; u8 eosp=FALSE; s8 r =0; u8 moreData = FALSE; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unifi_trace(priv, UDBG2, "++uf_send_buffered_data_from_delivery_ac, active=%x\n", staInfo->uapsdActive); if (queue > UNIFI_TRAFFIC_Q_VO) { return; } while((buffered_pkt=dequeue_tx_data_pdu(priv, txList))) { if((IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) { unifi_trace(priv, UDBG2, "uf_send_buffered_data_from_delivery_ac: DTIM Active, suspend UAPSD, staId: 0x%x\n", staInfo->aid); /* Once resume called, the U-APSD delivery operation will resume */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staInfo->uspSuspend = TRUE; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); /* re-queueing the packet as DTIM started */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q,txList); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); break; } buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); if((staInfo->wmmOrQosEnabled == TRUE)&&(staInfo->uapsdActive == TRUE)) { buffered_pkt->transmissionControl = TRANSMISSION_CONTROL_TRIGGER_MASK; /* Check All delivery enables Ac for more data, because caller of this * function not aware about last packet * (First check in moreData fetching helps in draining out Mgt frames Q) */ moreData = (!list_empty(txList) || uf_is_more_data_for_usp_delivery(priv, staInfo, queue)); if(staInfo->noOfSpFramesSent == (staInfo->maxSpLength - 1)) { moreData = FALSE; } if(moreData == FALSE) { eosp = TRUE; buffered_pkt->transmissionControl = (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); } } else { /* Non QoS and non U-APSD */ unifi_warning(priv, "uf_send_buffered_data_from_delivery_ac: non U-APSD !!! \n"); } unifi_trace(priv,UDBG2,"uf_send_buffered_data_from_delivery_ac : MoreData:%d, EOSP:%d\n",moreData,eosp); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,moreData,eosp)) == -ENOSPC) { unifi_trace(priv, UDBG2, "uf_send_buffered_data_from_delivery_ac: UASPD suspended, ENOSPC in hipQ=%x\n", queue); /* Once resume called, the U-APSD delivery operation will resume */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staInfo->uspSuspend = TRUE; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q,txList); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[queue]=(u8)(staInfo->assignedHandle); break; } else { if(r){ /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staInfo->noOfSpFramesSent++; if((!moreData) || (staInfo->noOfSpFramesSent == staInfo->maxSpLength)) { unifi_trace(priv, UDBG2, "uf_send_buffered_data_from_delivery_ac: Terminating USP\n"); staInfo->uapsdActive = FALSE; staInfo->uspSuspend = FALSE; staInfo->noOfSpFramesSent = 0; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); break; } spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } } unifi_trace(priv, UDBG2, "--uf_send_buffered_data_from_delivery_ac, active=%x\n", staInfo->uapsdActive); } void uf_send_buffered_data_from_ac(unifi_priv_t *priv, CsrWifiRouterCtrlStaInfo_t * staInfo, u8 queue, struct list_head *txList) { tx_buffered_packets_t * buffered_pkt = NULL; unsigned long lock_flags; u8 eosp=FALSE; u8 moreData = FALSE; s8 r =0; unifi_trace(priv,UDBG2,"uf_send_buffered_data_from_ac :\n"); while(!isRouterBufferEnabled(priv,queue) && ((buffered_pkt=dequeue_tx_data_pdu(priv, txList))!=NULL)){ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK); unifi_trace(priv,UDBG3,"uf_send_buffered_data_from_ac : MoreData:%d, EOSP:%d\n",moreData,eosp); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,moreData,eosp)) == -ENOSPC) { /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q,txList); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); if(staInfo != NULL){ priv->pausedStaHandle[queue]=(u8)(staInfo->assignedHandle); } unifi_trace(priv,UDBG3," uf_send_buffered_data_from_ac: PDU sending failed .. no space for queue %d \n",queue); } else { if(r){ /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } } void uf_send_buffered_frames(unifi_priv_t *priv,unifi_TrafficQueue q) { u16 interfaceTag = GET_ACTIVE_INTERFACE_TAG(priv); u32 startIndex=0,endIndex=0; CsrWifiRouterCtrlStaInfo_t * staInfo = NULL; u8 queue; u8 moreData = FALSE; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; if(!((interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_AP) || (interfacePriv->interfaceMode == CSR_WIFI_ROUTER_CTRL_MODE_P2PGO))) return; queue = (q<=3)?q:0; if(interfacePriv->dtimActive) { /* this function updates dtimActive*/ send_multicast_frames(priv,interfaceTag); if(!interfacePriv->dtimActive) { moreData = (!list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) || !list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)); if(!moreData) { if (!interfacePriv->bcTimSetReqPendingFlag) { update_tim(priv,0,CSR_WIFI_TIM_RESET,interfaceTag, 0XFFFFFFFF); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ interfacePriv->bcTimSetReqQueued = CSR_WIFI_TIM_RESET; unifi_trace(priv, UDBG2, "uf_send_buffered_frames : One more UpdateDTim Request(%d) Queued \n", interfacePriv->bcTimSetReqQueued); } } } else { moreData = (!list_empty(&interfacePriv->genericMulticastOrBroadCastMgtFrames) || !list_empty(&interfacePriv->genericMulticastOrBroadCastFrames)); if(!moreData) { /* This should never happen but if it happens, we need a way out */ unifi_error(priv,"ERROR: No More Data but DTIM is active sending Response\n"); send_vif_availibility_rsp(priv,uf_get_vif_identifier(interfacePriv->interfaceMode,interfaceTag),CSR_RC_NO_BUFFERED_BROADCAST_MULTICAST_FRAMES); interfacePriv->dtimActive = FALSE; } } return; } if(priv->pausedStaHandle[queue] > 7) { priv->pausedStaHandle[queue] = 0; } if(queue == UNIFI_TRAFFIC_Q_VO) { unifi_trace(priv,UDBG2,"uf_send_buffered_frames : trying mgt from queue=%d\n",queue); for(startIndex= 0; startIndex < UNIFI_MAX_CONNECTIONS;startIndex++) { staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv,startIndex,interfaceTag); if(!staInfo ) { continue; } else if((staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) &&(staInfo->uapsdActive == FALSE) ) { continue; } if((staInfo != NULL)&&(staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) &&(staInfo->uapsdActive == FALSE)){ /*Non-UAPSD case push the management frames out*/ if(!list_empty(&staInfo->mgtFrames)){ uf_send_buffered_data_from_ac(priv,staInfo, UNIFI_TRAFFIC_Q_VO, &staInfo->mgtFrames); } } if(isRouterBufferEnabled(priv,queue)) { unifi_notice(priv,"uf_send_buffered_frames : No space Left for queue = %d\n",queue); break; } } /*push generic management frames out*/ if(!list_empty(&interfacePriv->genericMgtFrames)) { unifi_trace(priv,UDBG2,"uf_send_buffered_frames : trying generic mgt from queue=%d\n",queue); uf_send_buffered_data_from_ac(priv,staInfo, UNIFI_TRAFFIC_Q_VO, &interfacePriv->genericMgtFrames); } } unifi_trace(priv,UDBG2,"uf_send_buffered_frames : Resume called for Queue=%d\n",queue); unifi_trace(priv,UDBG2,"uf_send_buffered_frames : start=%d end=%d\n",startIndex,endIndex); startIndex = priv->pausedStaHandle[queue]; endIndex = (startIndex + UNIFI_MAX_CONNECTIONS -1) % UNIFI_MAX_CONNECTIONS; while(startIndex != endIndex) { staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv,startIndex,interfaceTag); if(!staInfo) { startIndex ++; if(startIndex >= UNIFI_MAX_CONNECTIONS) { startIndex = 0; } continue; } else if((staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) &&(staInfo->uapsdActive == FALSE)) { startIndex ++; if(startIndex >= UNIFI_MAX_CONNECTIONS) { startIndex = 0; } continue; } /* Peer is active or U-APSD is active so send PDUs to the peer */ unifi_trace(priv,UDBG2,"uf_send_buffered_frames : trying data from queue=%d\n",queue); if((staInfo != NULL)&&(staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) &&(staInfo->uapsdActive == FALSE)) { if(!list_empty(&staInfo->dataPdu[queue])) { /*Non-UAPSD case push the AC frames out*/ uf_send_buffered_data_from_ac(priv, staInfo, queue, (&staInfo->dataPdu[queue])); } } startIndex ++; if(startIndex >= UNIFI_MAX_CONNECTIONS) { startIndex = 0; } } if(isRouterBufferEnabled(priv,queue)) { priv->pausedStaHandle[queue] = endIndex; } else { priv->pausedStaHandle[queue] = 0; } /* U-APSD might have stopped because of ENOSPC in lib_hip (pause activity). * So restart it if U-APSD was active with any of the station */ unifi_trace(priv, UDBG4, "csrWifiHipSendBufferedFrames: UAPSD Resume Q=%x\n", queue); resume_suspended_uapsd(priv, interfaceTag); } u8 uf_is_more_data_for_non_delivery_ac(CsrWifiRouterCtrlStaInfo_t *staRecord) { u8 i; for(i=0;i<=3;i++) { if(((staRecord->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_ONLY_ENABLED) ||(staRecord->powersaveMode[i]==CSR_WIFI_AC_LEGACY_POWER_SAVE)) &&(!list_empty(&staRecord->dataPdu[i]))){ return TRUE; } } if(((staRecord->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_TRIGGER_ONLY_ENABLED) ||(staRecord->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_LEGACY_POWER_SAVE)) &&(!list_empty(&staRecord->mgtFrames))){ return TRUE; } return FALSE; } int uf_process_station_records_for_sending_data(unifi_priv_t *priv,u16 interfaceTag, CsrWifiRouterCtrlStaInfo_t *srcStaInfo, CsrWifiRouterCtrlStaInfo_t *dstStaInfo) { netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unifi_trace(priv, UDBG5, "entering uf_process_station_records_for_sending_data\n"); if (srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_DISCONNECTED) { unifi_error(priv, "Peer State not connected AID = %x, handle = %x, control port state = %x\n", srcStaInfo->aid, srcStaInfo->assignedHandle, srcStaInfo->peerControlledPort->port_action); return -1; } switch (interfacePriv->interfaceMode) { case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO: case CSR_WIFI_ROUTER_CTRL_MODE_AP: unifi_trace(priv, UDBG5, "mode is AP/P2PGO\n"); break; default: unifi_warning(priv, "mode is nor AP neither P2PGO, packet cant be xmit\n"); return -1; } switch(dstStaInfo->peerControlledPort->port_action) { case CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_DISCARD: case CSR_WIFI_ROUTER_CTRL_PORT_ACTION_8021X_PORT_CLOSED_BLOCK: unifi_trace(priv, UDBG5, "destination port is closed/blocked, discarding the packet\n"); return -1; default: unifi_trace(priv, UDBG5, "destination port state is open\n"); } /* port state is open, destination station record is valid, Power save state is * validated in uf_process_ma_packet_req function */ unifi_trace(priv, UDBG5, "leaving uf_process_station_records_for_sending_data\n"); return 0; } /* * --------------------------------------------------------------------------- * uf_handle_uspframes_delivery * * This function takes care of handling USP session for peer, when * -> trigger frame from peer * -> suspended USP to be processed (resumed) * * NOTE: uf_send_buffered_data_from_delivery_ac() always called from this function, Dont * make a direct call to uf_send_buffered_data_from_delivery_ac() from any other part of * code * * Arguments: * priv Pointer to device private context struct * staInfo peer for which UAPSD to be scheduled * interfaceTag virtual interface tag * --------------------------------------------------------------------------- */ static void uf_handle_uspframes_delivery(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t *staInfo, u16 interfaceTag) { s8 i; u8 allDeliveryEnabled = 0, dataAvailable = 0; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unsigned long lock_flags; unifi_trace(priv, UDBG2, " ++ uf_handle_uspframes_delivery, uapsd active=%x, suspended?=%x\n", staInfo->uapsdActive, staInfo->uspSuspend); /* Check for Buffered frames according to priority order & deliver it * 1. AC_VO delivery enable & Mgt frames available * 2. Process remaining Ac's from order AC_VO to AC_BK */ /* USP initiated by WMMPS enabled peer & SET the status flag to TRUE */ if (!staInfo->uspSuspend && staInfo->uapsdActive) { unifi_notice(priv, "uf_handle_uspframes_delivery: U-APSD already active! STA=%x:%x:%x:%x:%x:%x\n", staInfo->peerMacAddress.a[0], staInfo->peerMacAddress.a[1], staInfo->peerMacAddress.a[2], staInfo->peerMacAddress.a[3], staInfo->peerMacAddress.a[4], staInfo->peerMacAddress.a[5]); return; } spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staInfo->uapsdActive = TRUE; staInfo->uspSuspend = FALSE; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); if(((staInfo->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)|| (staInfo->powersaveMode[UNIFI_TRAFFIC_Q_VO]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE)) && (!list_empty(&staInfo->mgtFrames))) { /* Management queue has data && UNIFI_TRAFFIC_Q_VO is delivery enable */ unifi_trace(priv, UDBG4, "uf_handle_uspframes_delivery: Sending buffered management frames\n"); uf_send_buffered_data_from_delivery_ac(priv, staInfo, UNIFI_TRAFFIC_Q_VO, &staInfo->mgtFrames); } if (!uf_is_more_data_for_delivery_ac(priv, staInfo)) { /* All delivery enable AC's are empty, so QNULL to be sent to terminate the USP * NOTE: If we have sent Mgt frame also, we must send QNULL followed to terminate USP */ if (!staInfo->uspSuspend) { spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staInfo->uapsdActive = FALSE; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); unifi_trace(priv, UDBG2, "uf_handle_uspframes_delivery: sending QNull for trigger\n"); uf_send_qos_null(priv, interfaceTag, staInfo->peerMacAddress.a, (CSR_PRIORITY) staInfo->triggerFramePriority, staInfo); staInfo->triggerFramePriority = CSR_QOS_UP0; } else { unifi_trace(priv, UDBG2, "uf_handle_uspframes_delivery: MgtQ xfer suspended\n"); } } else { for(i = UNIFI_TRAFFIC_Q_VO; i >= UNIFI_TRAFFIC_Q_BK; i--) { if(((staInfo->powersaveMode[i]==CSR_WIFI_AC_DELIVERY_ONLY_ENABLE) ||(staInfo->powersaveMode[i]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)) && (!list_empty(&staInfo->dataPdu[i]))) { /* Deliver Data according to AC priority (from VO to BK) as part of USP */ unifi_trace(priv, UDBG4, "uf_handle_uspframes_delivery: Buffered data frames from Queue (%d) for USP\n", i); uf_send_buffered_data_from_delivery_ac(priv, staInfo, i, &staInfo->dataPdu[i]); } if ((!staInfo->uapsdActive) || (staInfo->uspSuspend && IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag))) { /* If DTIM active found on one AC, No need to parse the remaining AC's * as USP suspended. Break out of loop */ unifi_trace(priv, UDBG2, "uf_handle_uspframes_delivery: suspend=%x, DTIM=%x, USP terminated=%s\n", staInfo->uspSuspend, IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag), staInfo->uapsdActive?"NO":"YES"); break; } } } /* Depending on the USP status, update the TIM accordingly for delivery enabled AC only * (since we are not manipulating any Non-delivery list(AC)) */ is_all_ac_deliver_enabled_and_moredata(staInfo, &allDeliveryEnabled, &dataAvailable); if ((allDeliveryEnabled && !dataAvailable)) { if ((staInfo->timSet != CSR_WIFI_TIM_RESET) || (staInfo->timSet != CSR_WIFI_TIM_RESETTING)) { staInfo->updateTimReqQueued = (u8) CSR_WIFI_TIM_RESET; unifi_trace(priv, UDBG4, " --uf_handle_uspframes_delivery, UAPSD timset\n"); if (!staInfo->timRequestPendingFlag) { update_tim(priv, staInfo->aid, 0, interfaceTag, staInfo->assignedHandle); } } } unifi_trace(priv, UDBG2, " --uf_handle_uspframes_delivery, uapsd active=%x, suspend?=%x\n", staInfo->uapsdActive, staInfo->uspSuspend); } void uf_process_wmm_deliver_ac_uapsd(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t * srcStaInfo, u16 qosControl, u16 interfaceTag) { CSR_PRIORITY priority; unifi_TrafficQueue priority_q; unsigned long lock_flags; unifi_trace(priv, UDBG2, "++uf_process_wmm_deliver_ac_uapsd: uapsdactive?=%x\n", srcStaInfo->uapsdActive); /* If recceived Frames trigger Frame and Devlivery enabled AC has data * then transmit from High priorty delivery enabled AC */ priority = (CSR_PRIORITY)(qosControl & IEEE802_11_QC_TID_MASK); priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority); if((srcStaInfo->powersaveMode[priority_q]==CSR_WIFI_AC_TRIGGER_ONLY_ENABLED) ||(srcStaInfo->powersaveMode[priority_q]==CSR_WIFI_AC_TRIGGER_AND_DELIVERY_ENABLED)) { spin_lock_irqsave(&priv->staRecord_lock,lock_flags); srcStaInfo->triggerFramePriority = priority; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); unifi_trace(priv, UDBG2, "uf_process_wmm_deliver_ac_uapsd: trigger frame, Begin U-APSD, triggerQ=%x\n", priority_q); uf_handle_uspframes_delivery(priv, srcStaInfo, interfaceTag); } unifi_trace(priv, UDBG2, "--uf_process_wmm_deliver_ac_uapsd: uapsdactive?=%x\n", srcStaInfo->uapsdActive); } void uf_send_qos_null(unifi_priv_t * priv,u16 interfaceTag, const u8 *da,CSR_PRIORITY priority,CsrWifiRouterCtrlStaInfo_t * srcStaInfo) { bulk_data_param_t bulkdata; CsrResult csrResult; struct sk_buff *skb, *newSkb = NULL; CsrWifiMacAddress peerAddress; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; CSR_TRANSMISSION_CONTROL transmissionControl = (TRANSMISSION_CONTROL_EOSP_MASK | TRANSMISSION_CONTROL_TRIGGER_MASK); int r; CSR_SIGNAL signal; u32 priority_q; CSR_RATE transmitRate = 0; /* Send a Null Frame to Peer, * 32= size of mac header */ csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], MAC_HEADER_SIZE + QOS_CONTROL_HEADER_SIZE); if (csrResult != CSR_RESULT_SUCCESS) { unifi_error(priv, " failed to allocate request_data. in uf_send_qos_null func\n"); return ; } skb = (struct sk_buff *)(bulkdata.d[0].os_net_buf_ptr); skb->len = 0; bulkdata.d[0].os_data_ptr = skb->data; bulkdata.d[0].os_net_buf_ptr = (unsigned char*)skb; bulkdata.d[0].net_buf_length = bulkdata.d[0].data_length = skb->len; bulkdata.d[1].os_data_ptr = NULL; bulkdata.d[1].os_net_buf_ptr = NULL; bulkdata.d[1].net_buf_length = bulkdata.d[1].data_length = 0; /* For null frames protection bit should not be set in MAC header, so passing value 0 below for protection field */ if (prepare_and_add_macheader(priv, skb, newSkb, priority, &bulkdata, interfaceTag, da, interfacePriv->bssid.a, 0)) { unifi_error(priv, "failed to create MAC header\n"); unifi_net_data_free(priv, &bulkdata.d[0]); return; } memcpy(peerAddress.a, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN); /* convert priority to queue */ priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority); /* Frame ma-packet.req, this is saved/transmitted depend on queue state * send the null frame at data rate of 1 Mb/s for AP or 6 Mb/s for P2PGO */ switch (interfacePriv->interfaceMode) { case CSR_WIFI_ROUTER_CTRL_MODE_AP: transmitRate = 2; break; case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO: transmitRate = 12; break; default: transmitRate = 0; } unifi_frame_ma_packet_req(priv, priority, transmitRate, 0xffffffff, interfaceTag, transmissionControl, priv->netdev_client->sender_id, peerAddress.a, &signal); r = ul_send_signal_unpacked(priv, &signal, &bulkdata); if(r) { unifi_error(priv, "failed to send QOS data null packet result: %d\n",r); unifi_net_data_free(priv, &bulkdata.d[0]); } return; } void uf_send_nulldata(unifi_priv_t * priv,u16 interfaceTag, const u8 *da,CSR_PRIORITY priority,CsrWifiRouterCtrlStaInfo_t * srcStaInfo) { bulk_data_param_t bulkdata; CsrResult csrResult; struct sk_buff *skb, *newSkb = NULL; CsrWifiMacAddress peerAddress; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; CSR_TRANSMISSION_CONTROL transmissionControl = 0; int r; CSR_SIGNAL signal; u32 priority_q; CSR_RATE transmitRate = 0; CSR_MA_PACKET_REQUEST *req = &signal.u.MaPacketRequest; unsigned long lock_flags; /* Send a Null Frame to Peer, size = 24 for MAC header */ csrResult = unifi_net_data_malloc(priv, &bulkdata.d[0], MAC_HEADER_SIZE); if (csrResult != CSR_RESULT_SUCCESS) { unifi_error(priv, "uf_send_nulldata: Failed to allocate memory for NULL frame\n"); return ; } skb = (struct sk_buff *)(bulkdata.d[0].os_net_buf_ptr); skb->len = 0; bulkdata.d[0].os_data_ptr = skb->data; bulkdata.d[0].os_net_buf_ptr = (unsigned char*)skb; bulkdata.d[0].net_buf_length = bulkdata.d[0].data_length = skb->len; bulkdata.d[1].os_data_ptr = NULL; bulkdata.d[1].os_net_buf_ptr = NULL; bulkdata.d[1].net_buf_length = bulkdata.d[1].data_length = 0; /* For null frames protection bit should not be set in MAC header, so passing value 0 below for protection field */ if (prepare_and_add_macheader(priv, skb, newSkb, priority, &bulkdata, interfaceTag, da, interfacePriv->bssid.a, 0)) { unifi_error(priv, "uf_send_nulldata: Failed to create MAC header\n"); unifi_net_data_free(priv, &bulkdata.d[0]); return; } memcpy(peerAddress.a, ((u8 *) bulkdata.d[0].os_data_ptr) + 4, ETH_ALEN); /* convert priority to queue */ priority_q = unifi_frame_priority_to_queue((CSR_PRIORITY) priority); transmissionControl &= ~(CSR_NO_CONFIRM_REQUIRED); /* Frame ma-packet.req, this is saved/transmitted depend on queue state * send the null frame at data rate of 1 Mb/s for AP or 6 Mb/s for P2PGO */ switch (interfacePriv->interfaceMode) { case CSR_WIFI_ROUTER_CTRL_MODE_AP: transmitRate = 2; break; case CSR_WIFI_ROUTER_CTRL_MODE_P2PGO: transmitRate = 12; break; default: transmitRate = 0; } unifi_frame_ma_packet_req(priv, priority, transmitRate, INVALID_HOST_TAG, interfaceTag, transmissionControl, priv->netdev_client->sender_id, peerAddress.a, &signal); /* Save host tag to check the status on reception of MA packet confirm */ srcStaInfo->nullDataHostTag = req->HostTag; unifi_trace(priv, UDBG1, "uf_send_nulldata: STA AID = %d hostTag = %x\n", srcStaInfo->aid, req->HostTag); r = ul_send_signal_unpacked(priv, &signal, &bulkdata); if(r == -ENOSPC) { unifi_trace(priv, UDBG1, "uf_send_nulldata: ENOSPC Requeue the Null frame\n"); enque_tx_data_pdu(priv, &bulkdata, &srcStaInfo->dataPdu[priority_q], &signal, 1); spin_lock_irqsave(&priv->staRecord_lock,lock_flags); srcStaInfo->noOfPktQueued++; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } if(r && r != -ENOSPC){ unifi_error(priv, "uf_send_nulldata: Failed to send Null frame Error = %d\n",r); unifi_net_data_free(priv, &bulkdata.d[0]); srcStaInfo->nullDataHostTag = INVALID_HOST_TAG; } return; } u8 uf_check_broadcast_bssid(unifi_priv_t *priv, const bulk_data_param_t *bulkdata) { u8 *bssid = NULL; static const CsrWifiMacAddress broadcast_address = {{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}}; u8 toDs, fromDs; toDs = (((bulkdata->d[0].os_data_ptr)[1]) & 0x01) ? 1 : 0; fromDs =(((bulkdata->d[0].os_data_ptr)[1]) & 0x02) ? 1 : 0; if (toDs && fromDs) { unifi_trace(priv, UDBG6, "Address 4 present, Don't try to find BSSID\n"); bssid = NULL; } else if((toDs == 0) && (fromDs ==0)) { /* BSSID is Address 3 */ bssid = (u8 *) (bulkdata->d[0].os_data_ptr + 4 + (2 * ETH_ALEN)); } else if(toDs) { /* BSSID is Address 1 */ bssid = (u8 *) (bulkdata->d[0].os_data_ptr + 4); } else if(fromDs) { /* BSSID is Address 2 */ bssid = (u8 *) (bulkdata->d[0].os_data_ptr + 4 + ETH_ALEN); } if (memcmp(broadcast_address.a, bssid, ETH_ALEN)== 0) { return TRUE; } else { return FALSE; } } u8 uf_process_pm_bit_for_peer(unifi_priv_t * priv, CsrWifiRouterCtrlStaInfo_t * srcStaInfo, u8 pmBit,u16 interfaceTag) { u8 moreData = FALSE; u8 powerSaveChanged = FALSE; unsigned long lock_flags; unifi_trace(priv, UDBG3, "entering uf_process_pm_bit_for_peer\n"); if (pmBit) { priv->allPeerDozing |= (0x01 << (srcStaInfo->assignedHandle)); } else { priv->allPeerDozing &= ~(0x01 << (srcStaInfo->assignedHandle)); } if(pmBit) { if(srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) { /* disable the preemption */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); srcStaInfo->currentPeerState =CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE; powerSaveChanged = TRUE; /* enable the preemption */ spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } else { return powerSaveChanged; } } else { if(srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) { /* disable the preemption */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); srcStaInfo->currentPeerState = CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE; powerSaveChanged = TRUE; /* enable the preemption */ spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); }else { return powerSaveChanged; } } if(srcStaInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) { unifi_trace(priv,UDBG3, "Peer with AID = %d is active now\n",srcStaInfo->aid); process_peer_active_transition(priv,srcStaInfo,interfaceTag); } else { unifi_trace(priv,UDBG3, "Peer with AID = %d is in PS Now\n",srcStaInfo->aid); /* Set TIM if needed */ if(!srcStaInfo->wmmOrQosEnabled) { moreData = (!list_empty(&srcStaInfo->mgtFrames) || !list_empty(&srcStaInfo->dataPdu[UNIFI_TRAFFIC_Q_VO])|| !list_empty(&srcStaInfo->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION])); if(moreData && (srcStaInfo->timSet == CSR_WIFI_TIM_RESET)) { unifi_trace(priv, UDBG3, "This condition should not occur\n"); if (!srcStaInfo->timRequestPendingFlag){ update_tim(priv,srcStaInfo->aid,1,interfaceTag, srcStaInfo->assignedHandle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ srcStaInfo->updateTimReqQueued = 1; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", srcStaInfo->updateTimReqQueued, srcStaInfo->aid); } } } else { u8 allDeliveryEnabled = 0, dataAvailable = 0; unifi_trace(priv, UDBG5, "Qos in AP Mode\n"); /* Check if all AC's are Delivery Enabled */ is_all_ac_deliver_enabled_and_moredata(srcStaInfo, &allDeliveryEnabled, &dataAvailable); /*check for more data in non-delivery enabled queues*/ moreData = (uf_is_more_data_for_non_delivery_ac(srcStaInfo) || (allDeliveryEnabled && dataAvailable)); if(moreData && (srcStaInfo->timSet == CSR_WIFI_TIM_RESET)) { if (!srcStaInfo->timRequestPendingFlag){ update_tim(priv,srcStaInfo->aid,1,interfaceTag, srcStaInfo->assignedHandle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ srcStaInfo->updateTimReqQueued = 1; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", srcStaInfo->updateTimReqQueued, srcStaInfo->aid); } } } } unifi_trace(priv, UDBG3, "leaving uf_process_pm_bit_for_peer\n"); return powerSaveChanged; } void uf_process_ps_poll(unifi_priv_t *priv,u8* sa,u8* da,u8 pmBit,u16 interfaceTag) { CsrWifiRouterCtrlStaInfo_t *staRecord = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, sa, interfaceTag); tx_buffered_packets_t * buffered_pkt = NULL; CsrWifiMacAddress peerMacAddress; unsigned long lock_flags; s8 r =0; u8 moreData = FALSE; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unifi_trace(priv, UDBG3, "entering uf_process_ps_poll\n"); if(!staRecord) { memcpy(peerMacAddress.a,sa,ETH_ALEN); unifi_trace(priv, UDBG3, "In uf_process_ps_poll, sta record not found:unexpected frame addr = %x:%x:%x:%x:%x:%x\n", sa[0], sa[1],sa[2], sa[3], sa[4],sa[5]); CsrWifiRouterCtrlUnexpectedFrameIndSend(priv->CSR_WIFI_SME_IFACEQUEUE,0,interfaceTag,peerMacAddress); return; } uf_process_pm_bit_for_peer(priv,staRecord,pmBit,interfaceTag); /* Update station last activity time */ staRecord->activity_flag = TRUE; /* This should not change the PM bit as PS-POLL has PM bit always set */ if(!pmBit) { unifi_notice (priv," PM bit reset in PS-POLL\n"); return; } if(IS_DTIM_ACTIVE(interfacePriv->dtimActive,interfacePriv->multicastPduHostTag)) { /* giving more priority to multicast packets so dropping ps-poll*/ unifi_notice (priv," multicast transmission is going on so don't take action on PS-POLL\n"); return; } if(!staRecord->wmmOrQosEnabled) { if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->mgtFrames))) { buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK; moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) || !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]) || !list_empty(&staRecord->mgtFrames)); buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->mgtFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n"); priv->pausedStaHandle[3]=(u8)(staRecord->assignedHandle); } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } else if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]))) { buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK; moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) || !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO])); buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[3]=(u8)(staRecord->assignedHandle); unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n"); } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } else if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]))) { buffered_pkt->transmissionControl |= TRANSMISSION_CONTROL_TRIGGER_MASK; moreData = !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]); buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle); unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n"); } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } else { /* Actually since we have sent an ACK, there * there is no need to send a NULL frame*/ } moreData = (!list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_VO]) || !list_empty(&staRecord->dataPdu[UNIFI_TRAFFIC_Q_CONTENTION]) || !list_empty(&staRecord->mgtFrames)); if(!moreData && (staRecord->timSet == CSR_WIFI_TIM_SET)) { unifi_trace(priv, UDBG3, "more data = NULL, set tim to 0 in uf_process_ps_poll\n"); if (!staRecord->timRequestPendingFlag){ update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 0; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } } } else { u8 allDeliveryEnabled = 0, dataAvailable = 0; unifi_trace(priv, UDBG3,"Qos Support station.Processing PS-Poll\n"); /*Send Data From Management Frames*/ /* Priority orders for delivering the buffered packets are * 1. Deliver the Management frames if there * 2. Other access category frames which are non deliver enable including UNIFI_TRAFFIC_Q_VO * priority is from VO->BK */ /* Check if all AC's are Delivery Enabled */ is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable); if (allDeliveryEnabled) { unifi_trace(priv, UDBG3, "uf_process_ps_poll: All ACs are delivery enable so Sending QOS Null in response of Ps-poll\n"); uf_send_qos_null(priv,interfaceTag,sa,CSR_QOS_UP0,staRecord); return; } if (!list_empty(&staRecord->mgtFrames)) { if ((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->mgtFrames))) { /* We dont have packets in non delivery enabled UNIFI_TRAFFIC_Q_VO, So we are looking in management * queue of the station record */ moreData = uf_is_more_data_for_non_delivery_ac(staRecord); buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Last parameter is EOSP & its false always for PS-POLL processing */ if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->mgtFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle); unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n"); } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } else { unifi_error(priv, "uf_process_ps_poll: Mgt frame list empty!! \n"); } } else { s8 i; /* We dont have buffered packet in mangement frame queue (1 failed), So proceed with condition 2 * UNIFI_TRAFFIC_Q_VO -> VI -> BE -> BK */ for(i= 3; i>=0; i--) { if (!IS_DELIVERY_ENABLED(staRecord->powersaveMode[i])) { /* Send One packet, if queue is NULL then continue */ if((buffered_pkt=dequeue_tx_data_pdu(priv, &staRecord->dataPdu[i]))) { moreData = uf_is_more_data_for_non_delivery_ac(staRecord); buffered_pkt->transmissionControl |= (TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Last parameter is EOSP & its false always for PS-POLL processing */ if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staRecord,moreData,FALSE)) == -ENOSPC) { /* Clear the trigger bit transmission control*/ buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK | TRANSMISSION_CONTROL_EOSP_MASK); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staRecord->dataPdu[i]); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[0]=(u8)(staRecord->assignedHandle); unifi_trace(priv, UDBG1, "(ENOSPC) PS-POLL received : PDU sending failed \n"); } else { if(r) { unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } break; } } } } /* Check if all AC's are Delivery Enabled */ is_all_ac_deliver_enabled_and_moredata(staRecord, &allDeliveryEnabled, &dataAvailable); /*check for more data in non-delivery enabled queues*/ moreData = (uf_is_more_data_for_non_delivery_ac(staRecord) || (allDeliveryEnabled && dataAvailable)); if(!moreData && (staRecord->timSet == CSR_WIFI_TIM_SET)) { unifi_trace(priv, UDBG3, "more data = NULL, set tim to 0 in uf_process_ps_poll\n"); if (!staRecord->timRequestPendingFlag){ update_tim(priv,staRecord->aid,0,interfaceTag, staRecord->assignedHandle); } else { /* Cache the TimSet value so that it will processed immidiatly after * completing the current setTim Request */ staRecord->updateTimReqQueued = 0; unifi_trace(priv, UDBG6, "update_tim : One more UpdateTim Request (Tim value:%d) Queued for AID %x\n", staRecord->updateTimReqQueued, staRecord->aid); } } } unifi_trace(priv, UDBG3, "leaving uf_process_ps_poll\n"); } void add_to_send_cfm_list(unifi_priv_t * priv, tx_buffered_packets_t *tx_q_item, struct list_head *frames_need_cfm_list) { tx_buffered_packets_t *send_cfm_list_item = NULL; send_cfm_list_item = kmalloc(sizeof(tx_buffered_packets_t), GFP_ATOMIC); if(send_cfm_list_item == NULL){ unifi_warning(priv, "%s: Failed to allocate memory for new list item \n"); return; } INIT_LIST_HEAD(&send_cfm_list_item->q); send_cfm_list_item->hostTag = tx_q_item->hostTag; send_cfm_list_item->interfaceTag = tx_q_item->interfaceTag; send_cfm_list_item->transmissionControl = tx_q_item->transmissionControl; send_cfm_list_item->leSenderProcessId = tx_q_item->leSenderProcessId; send_cfm_list_item->rate = tx_q_item->rate; memcpy(send_cfm_list_item->peerMacAddress.a, tx_q_item->peerMacAddress.a, ETH_ALEN); send_cfm_list_item->priority = tx_q_item->priority; list_add_tail(&send_cfm_list_item->q, frames_need_cfm_list); } void uf_prepare_send_cfm_list_for_queued_pkts(unifi_priv_t * priv, struct list_head *frames_need_cfm_list, struct list_head * list) { tx_buffered_packets_t *tx_q_item = NULL; struct list_head *listHead; struct list_head *placeHolder; unsigned long lock_flags; spin_lock_irqsave(&priv->tx_q_lock,lock_flags); /* Search through the list and if confirmation required for any frames, add it to the send_cfm list */ list_for_each_safe(listHead, placeHolder, list) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); if(!tx_q_item) { unifi_error(priv, "Entry should exist, otherwise it is a (BUG)\n"); continue; } /* check if confirmation is requested and if the sender ID is not netdevice client then save the entry in the list for need cfms */ if (!(tx_q_item->transmissionControl & CSR_NO_CONFIRM_REQUIRED) && (tx_q_item->leSenderProcessId != priv->netdev_client->sender_id)){ unifi_trace(priv, UDBG1, "%s: SenderProcessID=%x host tag=%x transmission control=%x\n", __FUNCTION__, tx_q_item->leSenderProcessId, tx_q_item->hostTag, tx_q_item->transmissionControl); add_to_send_cfm_list(priv, tx_q_item, frames_need_cfm_list); } } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); } void uf_flush_list(unifi_priv_t * priv, struct list_head * list) { tx_buffered_packets_t *tx_q_item; struct list_head *listHead; struct list_head *placeHolder; unsigned long lock_flags; unifi_trace(priv, UDBG5, "entering the uf_flush_list \n"); spin_lock_irqsave(&priv->tx_q_lock,lock_flags); /* go through list, delete & free memory */ list_for_each_safe(listHead, placeHolder, list) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); if(!tx_q_item) { unifi_error(priv, "entry should exists, otherwise crashes (bug)\n"); } unifi_trace(priv, UDBG5, "proccess_tx: in uf_flush_list peerMacAddress=%02X%02X%02X%02X%02X%02X senderProcessId=%x\n", tx_q_item->peerMacAddress.a[0], tx_q_item->peerMacAddress.a[1], tx_q_item->peerMacAddress.a[2], tx_q_item->peerMacAddress.a[3], tx_q_item->peerMacAddress.a[4], tx_q_item->peerMacAddress.a[5], tx_q_item->leSenderProcessId); list_del(listHead); /* free the allocated memory */ unifi_net_data_free(priv, &tx_q_item->bulkdata); kfree(tx_q_item); tx_q_item = NULL; if (!priv->noOfPktQueuedInDriver) { unifi_error(priv, "packets queued in driver 0 still decrementing in %s\n", __FUNCTION__); } else { priv->noOfPktQueuedInDriver--; } } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); } tx_buffered_packets_t *dequeue_tx_data_pdu(unifi_priv_t *priv, struct list_head *txList) { /* dequeue the tx data packets from the appropriate queue */ tx_buffered_packets_t *tx_q_item = NULL; struct list_head *listHead; struct list_head *placeHolder; unsigned long lock_flags; unifi_trace(priv, UDBG5, "entering dequeue_tx_data_pdu\n"); /* check for list empty */ if (list_empty(txList)) { unifi_trace(priv, UDBG5, "In dequeue_tx_data_pdu, the list is empty\n"); return NULL; } /* Verification, if packet count is negetive */ if (priv->noOfPktQueuedInDriver == 0xFFFF) { unifi_warning(priv, "no packet available in queue: debug"); return NULL; } /* return first node after header, & delete from the list && atleast one item exist */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_for_each_safe(listHead, placeHolder, txList) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); list_del(listHead); break; } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); if (tx_q_item) { unifi_trace(priv, UDBG5, "proccess_tx: In dequeue_tx_data_pdu peerMacAddress=%02X%02X%02X%02X%02X%02X senderProcessId=%x\n", tx_q_item->peerMacAddress.a[0], tx_q_item->peerMacAddress.a[1], tx_q_item->peerMacAddress.a[2], tx_q_item->peerMacAddress.a[3], tx_q_item->peerMacAddress.a[4], tx_q_item->peerMacAddress.a[5], tx_q_item->leSenderProcessId); } unifi_trace(priv, UDBG5, "leaving dequeue_tx_data_pdu\n"); return tx_q_item; } /* generic function to get the station record handler */ CsrWifiRouterCtrlStaInfo_t *CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(unifi_priv_t *priv, const u8 *peerMacAddress, u16 interfaceTag) { u8 i; netInterface_priv_t *interfacePriv; unsigned long lock_flags; if (interfaceTag >= CSR_WIFI_NUM_INTERFACES) { unifi_error(priv, "interfaceTag is not proper, interfaceTag = %d\n", interfaceTag); return NULL; } interfacePriv = priv->interfacePriv[interfaceTag]; /* disable the preemption until station record is fetched */ spin_lock_irqsave(&priv->staRecord_lock,lock_flags); for (i = 0; i < UNIFI_MAX_CONNECTIONS; i++) { if (interfacePriv->staInfo[i]!= NULL) { if (!memcmp(((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i]))->peerMacAddress.a, peerMacAddress, ETH_ALEN)) { /* enable the preemption as station record is fetched */ spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); unifi_trace(priv, UDBG5, "peer entry found in station record\n"); return ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[i])); } } } /* enable the preemption as station record is fetched */ spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); unifi_trace(priv, UDBG5, "peer entry not found in station record\n"); return NULL; } /* generic function to get the station record handler from the handle */ CsrWifiRouterCtrlStaInfo_t * CsrWifiRouterCtrlGetStationRecordFromHandle(unifi_priv_t *priv, u32 handle, u16 interfaceTag) { netInterface_priv_t *interfacePriv; if ((handle >= UNIFI_MAX_CONNECTIONS) || (interfaceTag >= CSR_WIFI_NUM_INTERFACES)) { unifi_error(priv, "handle/interfaceTag is not proper, handle = %d, interfaceTag = %d\n", handle, interfaceTag); return NULL; } interfacePriv = priv->interfacePriv[interfaceTag]; return ((CsrWifiRouterCtrlStaInfo_t *) (interfacePriv->staInfo[handle])); } /* Function to do inactivity */ void uf_check_inactivity(unifi_priv_t *priv, u16 interfaceTag, u32 currentTime) { u32 i; CsrWifiRouterCtrlStaInfo_t *staInfo; u32 elapsedTime; /* Time in microseconds */ netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; CsrWifiMacAddress peerMacAddress; unsigned long lock_flags; if (interfacePriv == NULL) { unifi_trace(priv, UDBG3, "uf_check_inactivity: Interface priv is NULL \n"); return; } spin_lock_irqsave(&priv->staRecord_lock,lock_flags); /* Go through the list of stations to check for inactivity */ for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) { staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv, i, interfaceTag); if(!staInfo ) { continue; } unifi_trace(priv, UDBG3, "Running Inactivity handler Time %xus station's last activity %xus\n", currentTime, staInfo->lastActivity); elapsedTime = (currentTime >= staInfo->lastActivity)? (currentTime - staInfo->lastActivity): (~((u32)0) - staInfo->lastActivity + currentTime); spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); if (elapsedTime > MAX_INACTIVITY_INTERVAL) { memcpy((u8*)&peerMacAddress, (u8*)&staInfo->peerMacAddress, sizeof(CsrWifiMacAddress)); /* Indicate inactivity for the station */ unifi_trace(priv, UDBG3, "Station %x:%x:%x:%x:%x:%x inactive since %xus\n sending Inactive Ind\n", peerMacAddress.a[0], peerMacAddress.a[1], peerMacAddress.a[2], peerMacAddress.a[3], peerMacAddress.a[4], peerMacAddress.a[5], elapsedTime); CsrWifiRouterCtrlStaInactiveIndSend(priv->CSR_WIFI_SME_IFACEQUEUE, 0, interfaceTag, peerMacAddress); } } interfacePriv->last_inactivity_check = currentTime; } /* Function to update activity of a station */ void uf_update_sta_activity(unifi_priv_t *priv, u16 interfaceTag, const u8 *peerMacAddress) { u32 elapsedTime, currentTime; /* Time in microseconds */ u32 timeHi; /* Not used - Time in microseconds */ CsrWifiRouterCtrlStaInfo_t *staInfo; netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unsigned long lock_flags; if (interfacePriv == NULL) { unifi_trace(priv, UDBG3, "uf_check_inactivity: Interface priv is NULL \n"); return; } currentTime = CsrTimeGet(&timeHi); staInfo = CsrWifiRouterCtrlGetStationRecordFromPeerMacAddress(priv, peerMacAddress, interfaceTag); if (staInfo == NULL) { unifi_trace(priv, UDBG4, "Sta does not exist yet"); return; } spin_lock_irqsave(&priv->staRecord_lock,lock_flags); /* Update activity */ staInfo->lastActivity = currentTime; /* See if inactivity handler needs to be run * Here it is theoretically possible that the counter may have wrapped around. But * since we just want to know when to run the inactivity handler it does not really matter. * Especially since this is data path it makes sense in keeping it simple and avoiding * 64 bit handling */ elapsedTime = (currentTime >= interfacePriv->last_inactivity_check)? (currentTime - interfacePriv->last_inactivity_check): (~((u32)0) - interfacePriv->last_inactivity_check + currentTime); spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); /* Check if it is time to run the inactivity handler */ if (elapsedTime > INACTIVITY_CHECK_INTERVAL) { uf_check_inactivity(priv, interfaceTag, currentTime); } } void resume_unicast_buffered_frames(unifi_priv_t *priv, u16 interfaceTag) { netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; u8 i; int j; tx_buffered_packets_t * buffered_pkt = NULL; u8 hipslotFree[4] = {TRUE,TRUE,TRUE,TRUE}; int r; unsigned long lock_flags; while(!isRouterBufferEnabled(priv,3) && ((buffered_pkt=dequeue_tx_data_pdu(priv,&interfacePriv->genericMgtFrames))!=NULL)) { buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,NULL,0,FALSE)) == -ENOSPC) { /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &interfacePriv->genericMgtFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); hipslotFree[3]=FALSE; break; }else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } for(i = 0; i < UNIFI_MAX_CONNECTIONS; i++) { CsrWifiRouterCtrlStaInfo_t *staInfo = interfacePriv->staInfo[i]; if(!hipslotFree[0] && !hipslotFree[1] && !hipslotFree[2] && !hipslotFree[3]) { unifi_trace(priv, UDBG3, "(ENOSPC) in resume_unicast_buffered_frames:: hip slots are full \n"); break; } if (staInfo && (staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE)) { while((( TRUE == hipslotFree[3] ) && (buffered_pkt=dequeue_tx_data_pdu(priv, &staInfo->mgtFrames)))) { buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,0,FALSE)) == -ENOSPC) { unifi_trace(priv, UDBG3, "(ENOSPC) in resume_unicast_buffered_frames:: hip slots are full for voice queue\n"); /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staInfo->mgtFrames); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[3]=(u8)(staInfo->assignedHandle); hipslotFree[3] = FALSE; break; } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } for(j=3;j>=0;j--) { if(!hipslotFree[j]) continue; while((buffered_pkt=dequeue_tx_data_pdu(priv, &staInfo->dataPdu[j]))) { buffered_pkt->transmissionControl &= ~(TRANSMISSION_CONTROL_TRIGGER_MASK|TRANSMISSION_CONTROL_EOSP_MASK); if((r=frame_and_send_queued_pdu(priv,buffered_pkt,staInfo,0,FALSE)) == -ENOSPC) { /* Enqueue at the head of the queue */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_add(&buffered_pkt->q, &staInfo->dataPdu[j]); spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); priv->pausedStaHandle[j]=(u8)(staInfo->assignedHandle); hipslotFree[j]=FALSE; break; } else { if(r){ unifi_trace (priv, UDBG1, " HIP validation failure : PDU sending failed \n"); /* the PDU failed where we can't do any thing so free the storage */ unifi_net_data_free(priv, &buffered_pkt->bulkdata); } kfree(buffered_pkt); } } } } } } void update_eosp_to_head_of_broadcast_list_head(unifi_priv_t *priv,u16 interfaceTag) { netInterface_priv_t *interfacePriv = priv->interfacePriv[interfaceTag]; unsigned long lock_flags; struct list_head *listHead; struct list_head *placeHolder; tx_buffered_packets_t *tx_q_item; if (interfacePriv->noOfbroadcastPktQueued) { /* Update the EOSP to the HEAD of b/c list * because we have received any mgmt packet so it should not hold for long time * peer may time out. */ spin_lock_irqsave(&priv->tx_q_lock,lock_flags); list_for_each_safe(listHead, placeHolder, &interfacePriv->genericMulticastOrBroadCastFrames) { tx_q_item = list_entry(listHead, tx_buffered_packets_t, q); tx_q_item->transmissionControl |= TRANSMISSION_CONTROL_EOSP_MASK; tx_q_item->transmissionControl = (tx_q_item->transmissionControl & ~(CSR_NO_CONFIRM_REQUIRED)); unifi_trace(priv, UDBG1,"updating eosp for list Head hostTag:= 0x%x ",tx_q_item->hostTag); break; } spin_unlock_irqrestore(&priv->tx_q_lock,lock_flags); } } /* * --------------------------------------------------------------------------- * resume_suspended_uapsd * * This function takes care processing packets of Unscheduled Service Period, * which been suspended earlier due to DTIM/HIP ENOSPC scenarios * * Arguments: * priv Pointer to device private context struct * interfaceTag For which resume should happen * --------------------------------------------------------------------------- */ void resume_suspended_uapsd(unifi_priv_t* priv,u16 interfaceTag) { u8 startIndex; CsrWifiRouterCtrlStaInfo_t * staInfo = NULL; unsigned long lock_flags; unifi_trace(priv, UDBG2, "++resume_suspended_uapsd: \n"); for(startIndex= 0; startIndex < UNIFI_MAX_CONNECTIONS;startIndex++) { staInfo = CsrWifiRouterCtrlGetStationRecordFromHandle(priv,startIndex,interfaceTag); if(!staInfo || !staInfo->wmmOrQosEnabled) { continue; } else if((staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_POWER_SAVE) &&staInfo->uapsdActive && staInfo->uspSuspend) { /* U-APSD Still active & previously suspended either ENOSPC of FH queues OR * due to DTIM activity */ uf_handle_uspframes_delivery(priv, staInfo, interfaceTag); } else { unifi_trace(priv, UDBG2, "resume_suspended_uapsd: PS state=%x, uapsdActive?=%x, suspend?=%x\n", staInfo->currentPeerState, staInfo->uapsdActive, staInfo->uspSuspend); if (staInfo->currentPeerState == CSR_WIFI_ROUTER_CTRL_PEER_CONNECTED_ACTIVE) { spin_lock_irqsave(&priv->staRecord_lock,lock_flags); staInfo->uapsdActive = FALSE; staInfo->uspSuspend = FALSE; spin_unlock_irqrestore(&priv->staRecord_lock,lock_flags); } } } unifi_trace(priv, UDBG2, "--resume_suspended_uapsd:\n"); } #endif
gpl-2.0
qizy09/PMFS-forked
sound/drivers/opl4/opl4_synth.c
3368
23322
/* * OPL4 MIDI synthesizer functions * * Copyright (c) 2003 by Clemens Ladisch <clemens@ladisch.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) any later * version. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opl4_local.h" #include <linux/delay.h> #include <asm/io.h> #include <sound/asoundef.h> /* GM2 controllers */ #ifndef MIDI_CTL_RELEASE_TIME #define MIDI_CTL_RELEASE_TIME 0x48 #define MIDI_CTL_ATTACK_TIME 0x49 #define MIDI_CTL_DECAY_TIME 0x4b #define MIDI_CTL_VIBRATO_RATE 0x4c #define MIDI_CTL_VIBRATO_DEPTH 0x4d #define MIDI_CTL_VIBRATO_DELAY 0x4e #endif /* * This table maps 100/128 cents to F_NUMBER. */ static const s16 snd_opl4_pitch_map[0x600] = { 0x000,0x000,0x001,0x001,0x002,0x002,0x003,0x003, 0x004,0x004,0x005,0x005,0x006,0x006,0x006,0x007, 0x007,0x008,0x008,0x009,0x009,0x00a,0x00a,0x00b, 0x00b,0x00c,0x00c,0x00d,0x00d,0x00d,0x00e,0x00e, 0x00f,0x00f,0x010,0x010,0x011,0x011,0x012,0x012, 0x013,0x013,0x014,0x014,0x015,0x015,0x015,0x016, 0x016,0x017,0x017,0x018,0x018,0x019,0x019,0x01a, 0x01a,0x01b,0x01b,0x01c,0x01c,0x01d,0x01d,0x01e, 0x01e,0x01e,0x01f,0x01f,0x020,0x020,0x021,0x021, 0x022,0x022,0x023,0x023,0x024,0x024,0x025,0x025, 0x026,0x026,0x027,0x027,0x028,0x028,0x029,0x029, 0x029,0x02a,0x02a,0x02b,0x02b,0x02c,0x02c,0x02d, 0x02d,0x02e,0x02e,0x02f,0x02f,0x030,0x030,0x031, 0x031,0x032,0x032,0x033,0x033,0x034,0x034,0x035, 0x035,0x036,0x036,0x037,0x037,0x038,0x038,0x038, 0x039,0x039,0x03a,0x03a,0x03b,0x03b,0x03c,0x03c, 0x03d,0x03d,0x03e,0x03e,0x03f,0x03f,0x040,0x040, 0x041,0x041,0x042,0x042,0x043,0x043,0x044,0x044, 0x045,0x045,0x046,0x046,0x047,0x047,0x048,0x048, 0x049,0x049,0x04a,0x04a,0x04b,0x04b,0x04c,0x04c, 0x04d,0x04d,0x04e,0x04e,0x04f,0x04f,0x050,0x050, 0x051,0x051,0x052,0x052,0x053,0x053,0x054,0x054, 0x055,0x055,0x056,0x056,0x057,0x057,0x058,0x058, 0x059,0x059,0x05a,0x05a,0x05b,0x05b,0x05c,0x05c, 0x05d,0x05d,0x05e,0x05e,0x05f,0x05f,0x060,0x060, 0x061,0x061,0x062,0x062,0x063,0x063,0x064,0x064, 0x065,0x065,0x066,0x066,0x067,0x067,0x068,0x068, 0x069,0x069,0x06a,0x06a,0x06b,0x06b,0x06c,0x06c, 0x06d,0x06d,0x06e,0x06e,0x06f,0x06f,0x070,0x071, 0x071,0x072,0x072,0x073,0x073,0x074,0x074,0x075, 0x075,0x076,0x076,0x077,0x077,0x078,0x078,0x079, 0x079,0x07a,0x07a,0x07b,0x07b,0x07c,0x07c,0x07d, 0x07d,0x07e,0x07e,0x07f,0x07f,0x080,0x081,0x081, 0x082,0x082,0x083,0x083,0x084,0x084,0x085,0x085, 0x086,0x086,0x087,0x087,0x088,0x088,0x089,0x089, 0x08a,0x08a,0x08b,0x08b,0x08c,0x08d,0x08d,0x08e, 0x08e,0x08f,0x08f,0x090,0x090,0x091,0x091,0x092, 0x092,0x093,0x093,0x094,0x094,0x095,0x096,0x096, 0x097,0x097,0x098,0x098,0x099,0x099,0x09a,0x09a, 0x09b,0x09b,0x09c,0x09c,0x09d,0x09d,0x09e,0x09f, 0x09f,0x0a0,0x0a0,0x0a1,0x0a1,0x0a2,0x0a2,0x0a3, 0x0a3,0x0a4,0x0a4,0x0a5,0x0a6,0x0a6,0x0a7,0x0a7, 0x0a8,0x0a8,0x0a9,0x0a9,0x0aa,0x0aa,0x0ab,0x0ab, 0x0ac,0x0ad,0x0ad,0x0ae,0x0ae,0x0af,0x0af,0x0b0, 0x0b0,0x0b1,0x0b1,0x0b2,0x0b2,0x0b3,0x0b4,0x0b4, 0x0b5,0x0b5,0x0b6,0x0b6,0x0b7,0x0b7,0x0b8,0x0b8, 0x0b9,0x0ba,0x0ba,0x0bb,0x0bb,0x0bc,0x0bc,0x0bd, 0x0bd,0x0be,0x0be,0x0bf,0x0c0,0x0c0,0x0c1,0x0c1, 0x0c2,0x0c2,0x0c3,0x0c3,0x0c4,0x0c4,0x0c5,0x0c6, 0x0c6,0x0c7,0x0c7,0x0c8,0x0c8,0x0c9,0x0c9,0x0ca, 0x0cb,0x0cb,0x0cc,0x0cc,0x0cd,0x0cd,0x0ce,0x0ce, 0x0cf,0x0d0,0x0d0,0x0d1,0x0d1,0x0d2,0x0d2,0x0d3, 0x0d3,0x0d4,0x0d5,0x0d5,0x0d6,0x0d6,0x0d7,0x0d7, 0x0d8,0x0d8,0x0d9,0x0da,0x0da,0x0db,0x0db,0x0dc, 0x0dc,0x0dd,0x0de,0x0de,0x0df,0x0df,0x0e0,0x0e0, 0x0e1,0x0e1,0x0e2,0x0e3,0x0e3,0x0e4,0x0e4,0x0e5, 0x0e5,0x0e6,0x0e7,0x0e7,0x0e8,0x0e8,0x0e9,0x0e9, 0x0ea,0x0eb,0x0eb,0x0ec,0x0ec,0x0ed,0x0ed,0x0ee, 0x0ef,0x0ef,0x0f0,0x0f0,0x0f1,0x0f1,0x0f2,0x0f3, 0x0f3,0x0f4,0x0f4,0x0f5,0x0f5,0x0f6,0x0f7,0x0f7, 0x0f8,0x0f8,0x0f9,0x0f9,0x0fa,0x0fb,0x0fb,0x0fc, 0x0fc,0x0fd,0x0fd,0x0fe,0x0ff,0x0ff,0x100,0x100, 0x101,0x101,0x102,0x103,0x103,0x104,0x104,0x105, 0x106,0x106,0x107,0x107,0x108,0x108,0x109,0x10a, 0x10a,0x10b,0x10b,0x10c,0x10c,0x10d,0x10e,0x10e, 0x10f,0x10f,0x110,0x111,0x111,0x112,0x112,0x113, 0x114,0x114,0x115,0x115,0x116,0x116,0x117,0x118, 0x118,0x119,0x119,0x11a,0x11b,0x11b,0x11c,0x11c, 0x11d,0x11e,0x11e,0x11f,0x11f,0x120,0x120,0x121, 0x122,0x122,0x123,0x123,0x124,0x125,0x125,0x126, 0x126,0x127,0x128,0x128,0x129,0x129,0x12a,0x12b, 0x12b,0x12c,0x12c,0x12d,0x12e,0x12e,0x12f,0x12f, 0x130,0x131,0x131,0x132,0x132,0x133,0x134,0x134, 0x135,0x135,0x136,0x137,0x137,0x138,0x138,0x139, 0x13a,0x13a,0x13b,0x13b,0x13c,0x13d,0x13d,0x13e, 0x13e,0x13f,0x140,0x140,0x141,0x141,0x142,0x143, 0x143,0x144,0x144,0x145,0x146,0x146,0x147,0x148, 0x148,0x149,0x149,0x14a,0x14b,0x14b,0x14c,0x14c, 0x14d,0x14e,0x14e,0x14f,0x14f,0x150,0x151,0x151, 0x152,0x153,0x153,0x154,0x154,0x155,0x156,0x156, 0x157,0x157,0x158,0x159,0x159,0x15a,0x15b,0x15b, 0x15c,0x15c,0x15d,0x15e,0x15e,0x15f,0x160,0x160, 0x161,0x161,0x162,0x163,0x163,0x164,0x165,0x165, 0x166,0x166,0x167,0x168,0x168,0x169,0x16a,0x16a, 0x16b,0x16b,0x16c,0x16d,0x16d,0x16e,0x16f,0x16f, 0x170,0x170,0x171,0x172,0x172,0x173,0x174,0x174, 0x175,0x175,0x176,0x177,0x177,0x178,0x179,0x179, 0x17a,0x17a,0x17b,0x17c,0x17c,0x17d,0x17e,0x17e, 0x17f,0x180,0x180,0x181,0x181,0x182,0x183,0x183, 0x184,0x185,0x185,0x186,0x187,0x187,0x188,0x188, 0x189,0x18a,0x18a,0x18b,0x18c,0x18c,0x18d,0x18e, 0x18e,0x18f,0x190,0x190,0x191,0x191,0x192,0x193, 0x193,0x194,0x195,0x195,0x196,0x197,0x197,0x198, 0x199,0x199,0x19a,0x19a,0x19b,0x19c,0x19c,0x19d, 0x19e,0x19e,0x19f,0x1a0,0x1a0,0x1a1,0x1a2,0x1a2, 0x1a3,0x1a4,0x1a4,0x1a5,0x1a6,0x1a6,0x1a7,0x1a8, 0x1a8,0x1a9,0x1a9,0x1aa,0x1ab,0x1ab,0x1ac,0x1ad, 0x1ad,0x1ae,0x1af,0x1af,0x1b0,0x1b1,0x1b1,0x1b2, 0x1b3,0x1b3,0x1b4,0x1b5,0x1b5,0x1b6,0x1b7,0x1b7, 0x1b8,0x1b9,0x1b9,0x1ba,0x1bb,0x1bb,0x1bc,0x1bd, 0x1bd,0x1be,0x1bf,0x1bf,0x1c0,0x1c1,0x1c1,0x1c2, 0x1c3,0x1c3,0x1c4,0x1c5,0x1c5,0x1c6,0x1c7,0x1c7, 0x1c8,0x1c9,0x1c9,0x1ca,0x1cb,0x1cb,0x1cc,0x1cd, 0x1cd,0x1ce,0x1cf,0x1cf,0x1d0,0x1d1,0x1d1,0x1d2, 0x1d3,0x1d3,0x1d4,0x1d5,0x1d5,0x1d6,0x1d7,0x1d7, 0x1d8,0x1d9,0x1d9,0x1da,0x1db,0x1db,0x1dc,0x1dd, 0x1dd,0x1de,0x1df,0x1df,0x1e0,0x1e1,0x1e1,0x1e2, 0x1e3,0x1e4,0x1e4,0x1e5,0x1e6,0x1e6,0x1e7,0x1e8, 0x1e8,0x1e9,0x1ea,0x1ea,0x1eb,0x1ec,0x1ec,0x1ed, 0x1ee,0x1ee,0x1ef,0x1f0,0x1f0,0x1f1,0x1f2,0x1f3, 0x1f3,0x1f4,0x1f5,0x1f5,0x1f6,0x1f7,0x1f7,0x1f8, 0x1f9,0x1f9,0x1fa,0x1fb,0x1fb,0x1fc,0x1fd,0x1fe, 0x1fe,0x1ff,0x200,0x200,0x201,0x202,0x202,0x203, 0x204,0x205,0x205,0x206,0x207,0x207,0x208,0x209, 0x209,0x20a,0x20b,0x20b,0x20c,0x20d,0x20e,0x20e, 0x20f,0x210,0x210,0x211,0x212,0x212,0x213,0x214, 0x215,0x215,0x216,0x217,0x217,0x218,0x219,0x21a, 0x21a,0x21b,0x21c,0x21c,0x21d,0x21e,0x21e,0x21f, 0x220,0x221,0x221,0x222,0x223,0x223,0x224,0x225, 0x226,0x226,0x227,0x228,0x228,0x229,0x22a,0x22b, 0x22b,0x22c,0x22d,0x22d,0x22e,0x22f,0x230,0x230, 0x231,0x232,0x232,0x233,0x234,0x235,0x235,0x236, 0x237,0x237,0x238,0x239,0x23a,0x23a,0x23b,0x23c, 0x23c,0x23d,0x23e,0x23f,0x23f,0x240,0x241,0x241, 0x242,0x243,0x244,0x244,0x245,0x246,0x247,0x247, 0x248,0x249,0x249,0x24a,0x24b,0x24c,0x24c,0x24d, 0x24e,0x24f,0x24f,0x250,0x251,0x251,0x252,0x253, 0x254,0x254,0x255,0x256,0x257,0x257,0x258,0x259, 0x259,0x25a,0x25b,0x25c,0x25c,0x25d,0x25e,0x25f, 0x25f,0x260,0x261,0x262,0x262,0x263,0x264,0x265, 0x265,0x266,0x267,0x267,0x268,0x269,0x26a,0x26a, 0x26b,0x26c,0x26d,0x26d,0x26e,0x26f,0x270,0x270, 0x271,0x272,0x273,0x273,0x274,0x275,0x276,0x276, 0x277,0x278,0x279,0x279,0x27a,0x27b,0x27c,0x27c, 0x27d,0x27e,0x27f,0x27f,0x280,0x281,0x282,0x282, 0x283,0x284,0x285,0x285,0x286,0x287,0x288,0x288, 0x289,0x28a,0x28b,0x28b,0x28c,0x28d,0x28e,0x28e, 0x28f,0x290,0x291,0x291,0x292,0x293,0x294,0x294, 0x295,0x296,0x297,0x298,0x298,0x299,0x29a,0x29b, 0x29b,0x29c,0x29d,0x29e,0x29e,0x29f,0x2a0,0x2a1, 0x2a1,0x2a2,0x2a3,0x2a4,0x2a5,0x2a5,0x2a6,0x2a7, 0x2a8,0x2a8,0x2a9,0x2aa,0x2ab,0x2ab,0x2ac,0x2ad, 0x2ae,0x2af,0x2af,0x2b0,0x2b1,0x2b2,0x2b2,0x2b3, 0x2b4,0x2b5,0x2b5,0x2b6,0x2b7,0x2b8,0x2b9,0x2b9, 0x2ba,0x2bb,0x2bc,0x2bc,0x2bd,0x2be,0x2bf,0x2c0, 0x2c0,0x2c1,0x2c2,0x2c3,0x2c4,0x2c4,0x2c5,0x2c6, 0x2c7,0x2c7,0x2c8,0x2c9,0x2ca,0x2cb,0x2cb,0x2cc, 0x2cd,0x2ce,0x2ce,0x2cf,0x2d0,0x2d1,0x2d2,0x2d2, 0x2d3,0x2d4,0x2d5,0x2d6,0x2d6,0x2d7,0x2d8,0x2d9, 0x2da,0x2da,0x2db,0x2dc,0x2dd,0x2dd,0x2de,0x2df, 0x2e0,0x2e1,0x2e1,0x2e2,0x2e3,0x2e4,0x2e5,0x2e5, 0x2e6,0x2e7,0x2e8,0x2e9,0x2e9,0x2ea,0x2eb,0x2ec, 0x2ed,0x2ed,0x2ee,0x2ef,0x2f0,0x2f1,0x2f1,0x2f2, 0x2f3,0x2f4,0x2f5,0x2f5,0x2f6,0x2f7,0x2f8,0x2f9, 0x2f9,0x2fa,0x2fb,0x2fc,0x2fd,0x2fd,0x2fe,0x2ff, 0x300,0x301,0x302,0x302,0x303,0x304,0x305,0x306, 0x306,0x307,0x308,0x309,0x30a,0x30a,0x30b,0x30c, 0x30d,0x30e,0x30f,0x30f,0x310,0x311,0x312,0x313, 0x313,0x314,0x315,0x316,0x317,0x318,0x318,0x319, 0x31a,0x31b,0x31c,0x31c,0x31d,0x31e,0x31f,0x320, 0x321,0x321,0x322,0x323,0x324,0x325,0x326,0x326, 0x327,0x328,0x329,0x32a,0x32a,0x32b,0x32c,0x32d, 0x32e,0x32f,0x32f,0x330,0x331,0x332,0x333,0x334, 0x334,0x335,0x336,0x337,0x338,0x339,0x339,0x33a, 0x33b,0x33c,0x33d,0x33e,0x33e,0x33f,0x340,0x341, 0x342,0x343,0x343,0x344,0x345,0x346,0x347,0x348, 0x349,0x349,0x34a,0x34b,0x34c,0x34d,0x34e,0x34e, 0x34f,0x350,0x351,0x352,0x353,0x353,0x354,0x355, 0x356,0x357,0x358,0x359,0x359,0x35a,0x35b,0x35c, 0x35d,0x35e,0x35f,0x35f,0x360,0x361,0x362,0x363, 0x364,0x364,0x365,0x366,0x367,0x368,0x369,0x36a, 0x36a,0x36b,0x36c,0x36d,0x36e,0x36f,0x370,0x370, 0x371,0x372,0x373,0x374,0x375,0x376,0x377,0x377, 0x378,0x379,0x37a,0x37b,0x37c,0x37d,0x37d,0x37e, 0x37f,0x380,0x381,0x382,0x383,0x383,0x384,0x385, 0x386,0x387,0x388,0x389,0x38a,0x38a,0x38b,0x38c, 0x38d,0x38e,0x38f,0x390,0x391,0x391,0x392,0x393, 0x394,0x395,0x396,0x397,0x398,0x398,0x399,0x39a, 0x39b,0x39c,0x39d,0x39e,0x39f,0x39f,0x3a0,0x3a1, 0x3a2,0x3a3,0x3a4,0x3a5,0x3a6,0x3a7,0x3a7,0x3a8, 0x3a9,0x3aa,0x3ab,0x3ac,0x3ad,0x3ae,0x3ae,0x3af, 0x3b0,0x3b1,0x3b2,0x3b3,0x3b4,0x3b5,0x3b6,0x3b6, 0x3b7,0x3b8,0x3b9,0x3ba,0x3bb,0x3bc,0x3bd,0x3be, 0x3bf,0x3bf,0x3c0,0x3c1,0x3c2,0x3c3,0x3c4,0x3c5, 0x3c6,0x3c7,0x3c7,0x3c8,0x3c9,0x3ca,0x3cb,0x3cc, 0x3cd,0x3ce,0x3cf,0x3d0,0x3d1,0x3d1,0x3d2,0x3d3, 0x3d4,0x3d5,0x3d6,0x3d7,0x3d8,0x3d9,0x3da,0x3da, 0x3db,0x3dc,0x3dd,0x3de,0x3df,0x3e0,0x3e1,0x3e2, 0x3e3,0x3e4,0x3e4,0x3e5,0x3e6,0x3e7,0x3e8,0x3e9, 0x3ea,0x3eb,0x3ec,0x3ed,0x3ee,0x3ef,0x3ef,0x3f0, 0x3f1,0x3f2,0x3f3,0x3f4,0x3f5,0x3f6,0x3f7,0x3f8, 0x3f9,0x3fa,0x3fa,0x3fb,0x3fc,0x3fd,0x3fe,0x3ff }; /* * Attenuation according to GM recommendations, in -0.375 dB units. * table[v] = 40 * log(v / 127) / -0.375 */ static unsigned char snd_opl4_volume_table[128] = { 255,224,192,173,160,150,141,134, 128,122,117,113,109,105,102, 99, 96, 93, 90, 88, 85, 83, 81, 79, 77, 75, 73, 71, 70, 68, 67, 65, 64, 62, 61, 59, 58, 57, 56, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 39, 39, 38, 37, 36, 35, 34, 34, 33, 32, 31, 31, 30, 29, 29, 28, 27, 27, 26, 25, 25, 24, 24, 23, 22, 22, 21, 21, 20, 19, 19, 18, 18, 17, 17, 16, 16, 15, 15, 14, 14, 13, 13, 12, 12, 11, 11, 10, 10, 9, 9, 9, 8, 8, 7, 7, 6, 6, 6, 5, 5, 4, 4, 4, 3, 3, 2, 2, 2, 1, 1, 0, 0, 0 }; /* * Initializes all voices. */ void snd_opl4_synth_reset(struct snd_opl4 *opl4) { unsigned long flags; int i; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) snd_opl4_write(opl4, OPL4_REG_MISC + i, OPL4_DAMP_BIT); spin_unlock_irqrestore(&opl4->reg_lock, flags); INIT_LIST_HEAD(&opl4->off_voices); INIT_LIST_HEAD(&opl4->on_voices); memset(opl4->voices, 0, sizeof(opl4->voices)); for (i = 0; i < OPL4_MAX_VOICES; i++) { opl4->voices[i].number = i; list_add_tail(&opl4->voices[i].list, &opl4->off_voices); } snd_midi_channel_set_clear(opl4->chset); } /* * Shuts down all voices. */ void snd_opl4_synth_shutdown(struct snd_opl4 *opl4) { unsigned long flags; int i; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) snd_opl4_write(opl4, OPL4_REG_MISC + i, opl4->voices[i].reg_misc & ~OPL4_KEY_ON_BIT); spin_unlock_irqrestore(&opl4->reg_lock, flags); } /* * Executes the callback for all voices playing the specified note. */ static void snd_opl4_do_for_note(struct snd_opl4 *opl4, int note, struct snd_midi_channel *chan, void (*func)(struct snd_opl4 *opl4, struct opl4_voice *voice)) { int i; unsigned long flags; struct opl4_voice *voice; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) { voice = &opl4->voices[i]; if (voice->chan == chan && voice->note == note) { func(opl4, voice); } } spin_unlock_irqrestore(&opl4->reg_lock, flags); } /* * Executes the callback for all voices of to the specified channel. */ static void snd_opl4_do_for_channel(struct snd_opl4 *opl4, struct snd_midi_channel *chan, void (*func)(struct snd_opl4 *opl4, struct opl4_voice *voice)) { int i; unsigned long flags; struct opl4_voice *voice; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) { voice = &opl4->voices[i]; if (voice->chan == chan) { func(opl4, voice); } } spin_unlock_irqrestore(&opl4->reg_lock, flags); } /* * Executes the callback for all active voices. */ static void snd_opl4_do_for_all(struct snd_opl4 *opl4, void (*func)(struct snd_opl4 *opl4, struct opl4_voice *voice)) { int i; unsigned long flags; struct opl4_voice *voice; spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < OPL4_MAX_VOICES; i++) { voice = &opl4->voices[i]; if (voice->chan) func(opl4, voice); } spin_unlock_irqrestore(&opl4->reg_lock, flags); } static void snd_opl4_update_volume(struct snd_opl4 *opl4, struct opl4_voice *voice) { int att; att = voice->sound->tone_attenuate; att += snd_opl4_volume_table[opl4->chset->gs_master_volume & 0x7f]; att += snd_opl4_volume_table[voice->chan->gm_volume & 0x7f]; att += snd_opl4_volume_table[voice->chan->gm_expression & 0x7f]; att += snd_opl4_volume_table[voice->velocity]; att = 0x7f - (0x7f - att) * (voice->sound->volume_factor) / 0xfe - volume_boost; if (att < 0) att = 0; else if (att > 0x7e) att = 0x7e; snd_opl4_write(opl4, OPL4_REG_LEVEL + voice->number, (att << 1) | voice->level_direct); voice->level_direct = 0; } static void snd_opl4_update_pan(struct snd_opl4 *opl4, struct opl4_voice *voice) { int pan = voice->sound->panpot; if (!voice->chan->drum_channel) pan += (voice->chan->control[MIDI_CTL_MSB_PAN] - 0x40) >> 3; if (pan < -7) pan = -7; else if (pan > 7) pan = 7; voice->reg_misc = (voice->reg_misc & ~OPL4_PAN_POT_MASK) | (pan & OPL4_PAN_POT_MASK); snd_opl4_write(opl4, OPL4_REG_MISC + voice->number, voice->reg_misc); } static void snd_opl4_update_vibrato_depth(struct snd_opl4 *opl4, struct opl4_voice *voice) { int depth; if (voice->chan->drum_channel) return; depth = (7 - voice->sound->vibrato) * (voice->chan->control[MIDI_CTL_VIBRATO_DEPTH] & 0x7f); depth = (depth >> 7) + voice->sound->vibrato; voice->reg_lfo_vibrato &= ~OPL4_VIBRATO_DEPTH_MASK; voice->reg_lfo_vibrato |= depth & OPL4_VIBRATO_DEPTH_MASK; snd_opl4_write(opl4, OPL4_REG_LFO_VIBRATO + voice->number, voice->reg_lfo_vibrato); } static void snd_opl4_update_pitch(struct snd_opl4 *opl4, struct opl4_voice *voice) { struct snd_midi_channel *chan = voice->chan; int note, pitch, octave; note = chan->drum_channel ? 60 : voice->note; /* * pitch is in 100/128 cents, so 0x80 is one semitone and * 0x600 is one octave. */ pitch = ((note - 60) << 7) * voice->sound->key_scaling / 100 + (60 << 7); pitch += voice->sound->pitch_offset; if (!chan->drum_channel) pitch += chan->gm_rpn_coarse_tuning; pitch += chan->gm_rpn_fine_tuning >> 7; pitch += chan->midi_pitchbend * chan->gm_rpn_pitch_bend_range / 0x2000; if (pitch < 0) pitch = 0; else if (pitch >= 0x6000) pitch = 0x5fff; octave = pitch / 0x600 - 8; pitch = snd_opl4_pitch_map[pitch % 0x600]; snd_opl4_write(opl4, OPL4_REG_OCTAVE + voice->number, (octave << 4) | ((pitch >> 7) & OPL4_F_NUMBER_HIGH_MASK)); voice->reg_f_number = (voice->reg_f_number & OPL4_TONE_NUMBER_BIT8) | ((pitch << 1) & OPL4_F_NUMBER_LOW_MASK); snd_opl4_write(opl4, OPL4_REG_F_NUMBER + voice->number, voice->reg_f_number); } static void snd_opl4_update_tone_parameters(struct snd_opl4 *opl4, struct opl4_voice *voice) { snd_opl4_write(opl4, OPL4_REG_ATTACK_DECAY1 + voice->number, voice->sound->reg_attack_decay1); snd_opl4_write(opl4, OPL4_REG_LEVEL_DECAY2 + voice->number, voice->sound->reg_level_decay2); snd_opl4_write(opl4, OPL4_REG_RELEASE_CORRECTION + voice->number, voice->sound->reg_release_correction); snd_opl4_write(opl4, OPL4_REG_TREMOLO + voice->number, voice->sound->reg_tremolo); } /* allocate one voice */ static struct opl4_voice *snd_opl4_get_voice(struct snd_opl4 *opl4) { /* first, try to get the oldest key-off voice */ if (!list_empty(&opl4->off_voices)) return list_entry(opl4->off_voices.next, struct opl4_voice, list); /* then get the oldest key-on voice */ snd_BUG_ON(list_empty(&opl4->on_voices)); return list_entry(opl4->on_voices.next, struct opl4_voice, list); } static void snd_opl4_wait_for_wave_headers(struct snd_opl4 *opl4) { int timeout = 200; while ((inb(opl4->fm_port) & OPL4_STATUS_LOAD) && --timeout > 0) udelay(10); } void snd_opl4_note_on(void *private_data, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; const struct opl4_region_ptr *regions; struct opl4_voice *voice[2]; const struct opl4_sound *sound[2]; int voices = 0, i; unsigned long flags; /* determine the number of voices and voice parameters */ i = chan->drum_channel ? 0x80 : (chan->midi_program & 0x7f); regions = &snd_yrw801_regions[i]; for (i = 0; i < regions->count; i++) { if (note >= regions->regions[i].key_min && note <= regions->regions[i].key_max) { sound[voices] = &regions->regions[i].sound; if (++voices >= 2) break; } } /* allocate and initialize the needed voices */ spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < voices; i++) { voice[i] = snd_opl4_get_voice(opl4); list_move_tail(&voice[i]->list, &opl4->on_voices); voice[i]->chan = chan; voice[i]->note = note; voice[i]->velocity = vel & 0x7f; voice[i]->sound = sound[i]; } /* set tone number (triggers header loading) */ for (i = 0; i < voices; i++) { voice[i]->reg_f_number = (sound[i]->tone >> 8) & OPL4_TONE_NUMBER_BIT8; snd_opl4_write(opl4, OPL4_REG_F_NUMBER + voice[i]->number, voice[i]->reg_f_number); snd_opl4_write(opl4, OPL4_REG_TONE_NUMBER + voice[i]->number, sound[i]->tone & 0xff); } /* set parameters which can be set while loading */ for (i = 0; i < voices; i++) { voice[i]->reg_misc = OPL4_LFO_RESET_BIT; snd_opl4_update_pan(opl4, voice[i]); snd_opl4_update_pitch(opl4, voice[i]); voice[i]->level_direct = OPL4_LEVEL_DIRECT_BIT; snd_opl4_update_volume(opl4, voice[i]); } spin_unlock_irqrestore(&opl4->reg_lock, flags); /* wait for completion of loading */ snd_opl4_wait_for_wave_headers(opl4); /* set remaining parameters */ spin_lock_irqsave(&opl4->reg_lock, flags); for (i = 0; i < voices; i++) { snd_opl4_update_tone_parameters(opl4, voice[i]); voice[i]->reg_lfo_vibrato = voice[i]->sound->reg_lfo_vibrato; snd_opl4_update_vibrato_depth(opl4, voice[i]); } /* finally, switch on all voices */ for (i = 0; i < voices; i++) { voice[i]->reg_misc = (voice[i]->reg_misc & 0x1f) | OPL4_KEY_ON_BIT; snd_opl4_write(opl4, OPL4_REG_MISC + voice[i]->number, voice[i]->reg_misc); } spin_unlock_irqrestore(&opl4->reg_lock, flags); } static void snd_opl4_voice_off(struct snd_opl4 *opl4, struct opl4_voice *voice) { list_move_tail(&voice->list, &opl4->off_voices); voice->reg_misc &= ~OPL4_KEY_ON_BIT; snd_opl4_write(opl4, OPL4_REG_MISC + voice->number, voice->reg_misc); } void snd_opl4_note_off(void *private_data, int note, int vel, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; snd_opl4_do_for_note(opl4, note, chan, snd_opl4_voice_off); } static void snd_opl4_terminate_voice(struct snd_opl4 *opl4, struct opl4_voice *voice) { list_move_tail(&voice->list, &opl4->off_voices); voice->reg_misc = (voice->reg_misc & ~OPL4_KEY_ON_BIT) | OPL4_DAMP_BIT; snd_opl4_write(opl4, OPL4_REG_MISC + voice->number, voice->reg_misc); } void snd_opl4_terminate_note(void *private_data, int note, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; snd_opl4_do_for_note(opl4, note, chan, snd_opl4_terminate_voice); } void snd_opl4_control(void *private_data, int type, struct snd_midi_channel *chan) { struct snd_opl4 *opl4 = private_data; switch (type) { case MIDI_CTL_MSB_MODWHEEL: chan->control[MIDI_CTL_VIBRATO_DEPTH] = chan->control[MIDI_CTL_MSB_MODWHEEL]; snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_vibrato_depth); break; case MIDI_CTL_MSB_MAIN_VOLUME: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_volume); break; case MIDI_CTL_MSB_PAN: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_pan); break; case MIDI_CTL_MSB_EXPRESSION: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_volume); break; case MIDI_CTL_VIBRATO_RATE: /* not yet supported */ break; case MIDI_CTL_VIBRATO_DEPTH: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_vibrato_depth); break; case MIDI_CTL_VIBRATO_DELAY: /* not yet supported */ break; case MIDI_CTL_E1_REVERB_DEPTH: /* * Each OPL4 voice has a bit called "Pseudo-Reverb", but * IMHO _not_ using it enhances the listening experience. */ break; case MIDI_CTL_PITCHBEND: snd_opl4_do_for_channel(opl4, chan, snd_opl4_update_pitch); break; } } void snd_opl4_sysex(void *private_data, unsigned char *buf, int len, int parsed, struct snd_midi_channel_set *chset) { struct snd_opl4 *opl4 = private_data; if (parsed == SNDRV_MIDI_SYSEX_GS_MASTER_VOLUME) snd_opl4_do_for_all(opl4, snd_opl4_update_volume); }
gpl-2.0
XperianPro/android_kernel_xiaomi_aries-port
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
3368
26640
/* * CAN driver for PEAK System PCAN-USB Pro adapter * Derived from the PCAN project file driver/src/pcan_usbpro.c * * Copyright (C) 2003-2011 PEAK System-Technik GmbH * Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published * by the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/module.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" #include "pcan_usb_pro.h" MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro adapter"); /* PCAN-USB Pro Endpoints */ #define PCAN_USBPRO_EP_CMDOUT 1 #define PCAN_USBPRO_EP_CMDIN (PCAN_USBPRO_EP_CMDOUT | USB_DIR_IN) #define PCAN_USBPRO_EP_MSGOUT_0 2 #define PCAN_USBPRO_EP_MSGIN (PCAN_USBPRO_EP_MSGOUT_0 | USB_DIR_IN) #define PCAN_USBPRO_EP_MSGOUT_1 3 #define PCAN_USBPRO_EP_UNUSED (PCAN_USBPRO_EP_MSGOUT_1 | USB_DIR_IN) #define PCAN_USBPRO_CHANNEL_COUNT 2 /* PCAN-USB Pro adapter internal clock (MHz) */ #define PCAN_USBPRO_CRYSTAL_HZ 56000000 /* PCAN-USB Pro command timeout (ms.) */ #define PCAN_USBPRO_COMMAND_TIMEOUT 1000 /* PCAN-USB Pro rx/tx buffers size */ #define PCAN_USBPRO_RX_BUFFER_SIZE 1024 #define PCAN_USBPRO_TX_BUFFER_SIZE 64 #define PCAN_USBPRO_MSG_HEADER_LEN 4 /* some commands responses need to be re-submitted */ #define PCAN_USBPRO_RSP_SUBMIT_MAX 2 #define PCAN_USBPRO_RTR 0x01 #define PCAN_USBPRO_EXT 0x02 #define PCAN_USBPRO_CMD_BUFFER_SIZE 512 /* handle device specific info used by the netdevices */ struct pcan_usb_pro_interface { struct peak_usb_device *dev[PCAN_USBPRO_CHANNEL_COUNT]; struct peak_time_ref time_ref; int cm_ignore_count; int dev_opened_count; }; /* device information */ struct pcan_usb_pro_device { struct peak_usb_device dev; struct pcan_usb_pro_interface *usb_if; u32 cached_ccbt; }; /* internal structure used to handle messages sent to bulk urb */ struct pcan_usb_pro_msg { u8 *rec_ptr; int rec_buffer_size; int rec_buffer_len; union { u16 *rec_cnt_rd; u32 *rec_cnt; u8 *rec_buffer; } u; }; /* records sizes table indexed on message id. (8-bits value) */ static u16 pcan_usb_pro_sizeof_rec[256] = { [PCAN_USBPRO_SETBTR] = sizeof(struct pcan_usb_pro_btr), [PCAN_USBPRO_SETBUSACT] = sizeof(struct pcan_usb_pro_busact), [PCAN_USBPRO_SETSILENT] = sizeof(struct pcan_usb_pro_silent), [PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter), [PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts), [PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid), [PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled), [PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg), [PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4, [PCAN_USBPRO_RXMSG0] = sizeof(struct pcan_usb_pro_rxmsg) - 8, [PCAN_USBPRO_RXRTR] = sizeof(struct pcan_usb_pro_rxmsg) - 8, [PCAN_USBPRO_RXSTATUS] = sizeof(struct pcan_usb_pro_rxstatus), [PCAN_USBPRO_RXTS] = sizeof(struct pcan_usb_pro_rxts), [PCAN_USBPRO_TXMSG8] = sizeof(struct pcan_usb_pro_txmsg), [PCAN_USBPRO_TXMSG4] = sizeof(struct pcan_usb_pro_txmsg) - 4, [PCAN_USBPRO_TXMSG0] = sizeof(struct pcan_usb_pro_txmsg) - 8, }; /* * initialize PCAN-USB Pro message data structure */ static u8 *pcan_msg_init(struct pcan_usb_pro_msg *pm, void *buffer_addr, int buffer_size) { if (buffer_size < PCAN_USBPRO_MSG_HEADER_LEN) return NULL; pm->u.rec_buffer = (u8 *)buffer_addr; pm->rec_buffer_size = pm->rec_buffer_len = buffer_size; pm->rec_ptr = pm->u.rec_buffer + PCAN_USBPRO_MSG_HEADER_LEN; return pm->rec_ptr; } static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm, void *buffer_addr, int buffer_size) { u8 *pr = pcan_msg_init(pm, buffer_addr, buffer_size); if (pr) { pm->rec_buffer_len = PCAN_USBPRO_MSG_HEADER_LEN; *pm->u.rec_cnt = 0; } return pr; } /* * add one record to a message being built */ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, u8 id, ...) { int len, i; u8 *pc; va_list ap; va_start(ap, id); pc = pm->rec_ptr + 1; i = 0; switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; case PCAN_USBPRO_TXMSG4: i += 4; case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; memcpy(pc, va_arg(ap, int *), i); pc += i; break; case PCAN_USBPRO_SETBTR: case PCAN_USBPRO_GETDEVID: *pc++ = va_arg(ap, int); pc += 2; *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETFILTR: case PCAN_USBPRO_SETBUSACT: case PCAN_USBPRO_SETSILENT: *pc++ = va_arg(ap, int); *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; case PCAN_USBPRO_SETLED: *pc++ = va_arg(ap, int); *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; *(u32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETTS: pc++; *(u16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; default: pr_err("%s: %s(): unknown data type %02Xh (%d)\n", PCAN_USB_DRIVER_NAME, __func__, id, id); pc--; break; } len = pc - pm->rec_ptr; if (len > 0) { *pm->u.rec_cnt = cpu_to_le32(*pm->u.rec_cnt+1); *pm->rec_ptr = id; pm->rec_ptr = pc; pm->rec_buffer_len += len; } va_end(ap); return len; } /* * send PCAN-USB Pro command synchronously */ static int pcan_usb_pro_send_cmd(struct peak_usb_device *dev, struct pcan_usb_pro_msg *pum) { int actual_length; int err; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), pum->u.rec_buffer, pum->rec_buffer_len, &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "sending command failure: %d\n", err); return err; } /* * wait for PCAN-USB Pro command response */ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, struct pcan_usb_pro_msg *pum) { u8 req_data_type, req_channel; int actual_length; int i, err = 0; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; req_data_type = pum->u.rec_buffer[4]; req_channel = pum->u.rec_buffer[5]; *pum->u.rec_cnt = 0; for (i = 0; !err && i < PCAN_USBPRO_RSP_SUBMIT_MAX; i++) { struct pcan_usb_pro_msg rsp; union pcan_usb_pro_rec *pr; u32 r, rec_cnt; u16 rec_len; u8 *pc; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDIN), pum->u.rec_buffer, pum->rec_buffer_len, &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT); if (err) { netdev_err(dev->netdev, "waiting rsp error %d\n", err); break; } if (actual_length == 0) continue; err = -EBADMSG; if (actual_length < PCAN_USBPRO_MSG_HEADER_LEN) { netdev_err(dev->netdev, "got abnormal too small rsp (len=%d)\n", actual_length); break; } pc = pcan_msg_init(&rsp, pum->u.rec_buffer, actual_length); rec_cnt = le32_to_cpu(*rsp.u.rec_cnt); /* loop on records stored into message */ for (r = 0; r < rec_cnt; r++) { pr = (union pcan_usb_pro_rec *)pc; rec_len = pcan_usb_pro_sizeof_rec[pr->data_type]; if (!rec_len) { netdev_err(dev->netdev, "got unprocessed record in msg\n"); dump_mem("rcvd rsp msg", pum->u.rec_buffer, actual_length); break; } /* check if response corresponds to request */ if (pr->data_type != req_data_type) netdev_err(dev->netdev, "got unwanted rsp %xh: ignored\n", pr->data_type); /* check if channel in response corresponds too */ else if ((req_channel != 0xff) && \ (pr->bus_act.channel != req_channel)) netdev_err(dev->netdev, "got rsp %xh but on chan%u: ignored\n", req_data_type, pr->bus_act.channel); /* got the response */ else return 0; /* otherwise, go on with next record in message */ pc += rec_len; } } return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err; } static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, int req_value, void *req_addr, int req_size) { int err; u8 req_type; unsigned int p; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; memset(req_addr, '\0', req_size); req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; switch (req_id) { case PCAN_USBPRO_REQ_FCT: p = usb_sndctrlpipe(dev->udev, 0); break; default: p = usb_rcvctrlpipe(dev->udev, 0); req_type |= USB_DIR_IN; break; } err = usb_control_msg(dev->udev, p, req_id, req_type, req_value, 0, req_addr, req_size, 2 * USB_CTRL_GET_TIMEOUT); if (err < 0) { netdev_info(dev->netdev, "unable to request usb[type=%d value=%d] err=%d\n", req_id, req_value, err); return err; } return 0; } static int pcan_usb_pro_set_ts(struct peak_usb_device *dev, u16 onoff) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETTS, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bitrate(struct peak_usb_device *dev, u32 ccbt) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBTR, dev->ctrl_idx, ccbt); /* cache the CCBT value to reuse it before next buson */ pdev->cached_ccbt = ccbt; return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bus(struct peak_usb_device *dev, u8 onoff) { struct pcan_usb_pro_msg um; /* if bus=on, be sure the bitrate being set before! */ if (onoff) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); pcan_usb_pro_set_bitrate(dev, pdev->cached_ccbt); } pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_silent(struct peak_usb_device *dev, u8 onoff) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETSILENT, dev->ctrl_idx, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_filter(struct peak_usb_device *dev, u16 filter_mode) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETFILTR, dev->ctrl_idx, filter_mode); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode, u32 timeout) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETLED, dev->ctrl_idx, mode, timeout); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev, u32 *device_id) { struct pcan_usb_pro_devid *pdn; struct pcan_usb_pro_msg um; int err; u8 *pc; pc = pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_GETDEVID, dev->ctrl_idx); err = pcan_usb_pro_send_cmd(dev, &um); if (err) return err; err = pcan_usb_pro_wait_rsp(dev, &um); if (err) return err; pdn = (struct pcan_usb_pro_devid *)pc; if (device_id) *device_id = le32_to_cpu(pdn->serial_num); return err; } static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev, struct can_bittiming *bt) { u32 ccbt; ccbt = (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 0x00800000 : 0; ccbt |= (bt->sjw - 1) << 24; ccbt |= (bt->phase_seg2 - 1) << 20; ccbt |= (bt->prop_seg + bt->phase_seg1 - 1) << 16; /* = tseg1 */ ccbt |= bt->brp - 1; netdev_info(dev->netdev, "setting ccbt=0x%08x\n", ccbt); return pcan_usb_pro_set_bitrate(dev, ccbt); } static void pcan_usb_pro_restart_complete(struct urb *urb) { /* can delete usb resources */ peak_usb_async_complete(urb); /* notify candev and netdev */ peak_usb_restart_complete(urb->context); } /* * handle restart but in asynchronously way */ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, 1); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), buf, PCAN_USB_MAX_CMD_LEN, pcan_usb_pro_restart_complete, dev); return usb_submit_urb(urb, GFP_ATOMIC); } static void pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) { u8 buffer[16]; buffer[0] = 0; buffer[1] = !!loaded; pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, PCAN_USBPRO_FCT_DRVLD, buffer, sizeof(buffer)); } static inline struct pcan_usb_pro_interface *pcan_usb_pro_dev_if(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); return pdev->usb_if; } static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxmsg *rx) { const unsigned int ctrl_idx = (rx->len >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; struct can_frame *can_frame; struct sk_buff *skb; struct timeval tv; skb = alloc_can_skb(netdev, &can_frame); if (!skb) return -ENOMEM; can_frame->can_id = le32_to_cpu(rx->id); can_frame->can_dlc = rx->len & 0x0f; if (rx->flags & PCAN_USBPRO_EXT) can_frame->can_id |= CAN_EFF_FLAG; if (rx->flags & PCAN_USBPRO_RTR) can_frame->can_id |= CAN_RTR_FLAG; else memcpy(can_frame->data, rx->data, can_frame->can_dlc); peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(rx->ts32), &tv); skb->tstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; return 0; } static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxstatus *er) { const u32 raw_status = le32_to_cpu(er->status); const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; struct can_frame *can_frame; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; u8 err_mask = 0; struct sk_buff *skb; struct timeval tv; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; if (!raw_status) { /* no error bit (back to active state) */ dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } if (raw_status & (PCAN_USBPRO_STATUS_OVERRUN | PCAN_USBPRO_STATUS_QOVERRUN)) { /* trick to bypass next comparison and process other errors */ new_state = CAN_STATE_MAX; } if (raw_status & PCAN_USBPRO_STATUS_BUS) { new_state = CAN_STATE_BUS_OFF; } else if (raw_status & PCAN_USBPRO_STATUS_ERROR) { u32 rx_err_cnt = (le32_to_cpu(er->err_frm) & 0x00ff0000) >> 16; u32 tx_err_cnt = (le32_to_cpu(er->err_frm) & 0xff000000) >> 24; if (rx_err_cnt > 127) err_mask |= CAN_ERR_CRTL_RX_PASSIVE; else if (rx_err_cnt > 96) err_mask |= CAN_ERR_CRTL_RX_WARNING; if (tx_err_cnt > 127) err_mask |= CAN_ERR_CRTL_TX_PASSIVE; else if (tx_err_cnt > 96) err_mask |= CAN_ERR_CRTL_TX_WARNING; if (err_mask & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) new_state = CAN_STATE_ERROR_WARNING; else if (err_mask & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) new_state = CAN_STATE_ERROR_PASSIVE; } /* donot post any error if current state didn't change */ if (dev->can.state == new_state) return 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &can_frame); if (!skb) return -ENOMEM; switch (new_state) { case CAN_STATE_BUS_OFF: can_frame->can_id |= CAN_ERR_BUSOFF; can_bus_off(netdev); break; case CAN_STATE_ERROR_PASSIVE: can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= err_mask; dev->can.can_stats.error_passive++; break; case CAN_STATE_ERROR_WARNING: can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= err_mask; dev->can.can_stats.error_warning++; break; case CAN_STATE_ERROR_ACTIVE: break; default: /* CAN_STATE_MAX (trick to handle other errors) */ if (raw_status & PCAN_USBPRO_STATUS_OVERRUN) { can_frame->can_id |= CAN_ERR_PROT; can_frame->data[2] |= CAN_ERR_PROT_OVERLOAD; netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; } if (raw_status & PCAN_USBPRO_STATUS_QOVERRUN) { can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; } new_state = CAN_STATE_ERROR_ACTIVE; break; } dev->can.state = new_state; peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); skb->tstamp = timeval_to_ktime(tv); netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; return 0; } static void pcan_usb_pro_handle_ts(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxts *ts) { /* should wait until clock is stabilized */ if (usb_if->cm_ignore_count > 0) usb_if->cm_ignore_count--; else peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts64[1])); } /* * callback for bulk IN urb */ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb) { struct pcan_usb_pro_interface *usb_if = pcan_usb_pro_dev_if(dev); struct net_device *netdev = dev->netdev; struct pcan_usb_pro_msg usb_msg; u8 *rec_ptr, *msg_end; u16 rec_cnt; int err = 0; rec_ptr = pcan_msg_init(&usb_msg, urb->transfer_buffer, urb->actual_length); if (!rec_ptr) { netdev_err(netdev, "bad msg hdr len %d\n", urb->actual_length); return -EINVAL; } /* loop reading all the records from the incoming message */ msg_end = urb->transfer_buffer + urb->actual_length; rec_cnt = le16_to_cpu(*usb_msg.u.rec_cnt_rd); for (; rec_cnt > 0; rec_cnt--) { union pcan_usb_pro_rec *pr = (union pcan_usb_pro_rec *)rec_ptr; u16 sizeof_rec = pcan_usb_pro_sizeof_rec[pr->data_type]; if (!sizeof_rec) { netdev_err(netdev, "got unsupported rec in usb msg:\n"); err = -ENOTSUPP; break; } /* check if the record goes out of current packet */ if (rec_ptr + sizeof_rec > msg_end) { netdev_err(netdev, "got frag rec: should inc usb rx buf size\n"); err = -EBADMSG; break; } switch (pr->data_type) { case PCAN_USBPRO_RXMSG8: case PCAN_USBPRO_RXMSG4: case PCAN_USBPRO_RXMSG0: case PCAN_USBPRO_RXRTR: err = pcan_usb_pro_handle_canmsg(usb_if, &pr->rx_msg); if (err < 0) goto fail; break; case PCAN_USBPRO_RXSTATUS: err = pcan_usb_pro_handle_error(usb_if, &pr->rx_status); if (err < 0) goto fail; break; case PCAN_USBPRO_RXTS: pcan_usb_pro_handle_ts(usb_if, &pr->rx_ts); break; default: netdev_err(netdev, "unhandled rec type 0x%02x (%d): ignored\n", pr->data_type, pr->data_type); break; } rec_ptr += sizeof_rec; } fail: if (err) dump_mem("received msg", urb->transfer_buffer, urb->actual_length); return err; } static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct can_frame *cf = (struct can_frame *)skb->data; u8 data_type, len, flags; struct pcan_usb_pro_msg usb_msg; pcan_msg_init_empty(&usb_msg, obuf, *size); if ((cf->can_id & CAN_RTR_FLAG) || (cf->can_dlc == 0)) data_type = PCAN_USBPRO_TXMSG0; else if (cf->can_dlc <= 4) data_type = PCAN_USBPRO_TXMSG4; else data_type = PCAN_USBPRO_TXMSG8; len = (dev->ctrl_idx << 4) | (cf->can_dlc & 0x0f); flags = 0; if (cf->can_id & CAN_EFF_FLAG) flags |= 0x02; if (cf->can_id & CAN_RTR_FLAG) flags |= 0x01; pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id, cf->data); *size = usb_msg.rec_buffer_len; return 0; } static int pcan_usb_pro_start(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); int err; err = pcan_usb_pro_set_silent(dev, dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY); if (err) return err; /* filter mode: 0-> All OFF; 1->bypass */ err = pcan_usb_pro_set_filter(dev, 1); if (err) return err; /* opening first device: */ if (pdev->usb_if->dev_opened_count == 0) { /* reset time_ref */ peak_usb_init_time_ref(&pdev->usb_if->time_ref, &pcan_usb_pro); /* ask device to send ts messages */ err = pcan_usb_pro_set_ts(dev, 1); } pdev->usb_if->dev_opened_count++; return err; } /* * stop interface * (last chance before set bus off) */ static int pcan_usb_pro_stop(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* turn off ts msgs for that interface if no other dev opened */ if (pdev->usb_if->dev_opened_count == 1) pcan_usb_pro_set_ts(dev, 0); pdev->usb_if->dev_opened_count--; return 0; } /* * called when probing to initialize a device object. */ static int pcan_usb_pro_init(struct peak_usb_device *dev) { struct pcan_usb_pro_interface *usb_if; struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* do this for 1st channel only */ if (!dev->prev_siblings) { struct pcan_usb_pro_fwinfo fi; struct pcan_usb_pro_blinfo bi; int err; /* allocate netdevices common structure attached to first one */ usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface), GFP_KERNEL); if (!usb_if) return -ENOMEM; /* number of ts msgs to ignore before taking one into account */ usb_if->cm_ignore_count = 5; /* * explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx netdevices. */ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, &fi, sizeof(fi)); if (err) { kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", pcan_usb_pro.name, err); return err; } err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_BL, &bi, sizeof(bi)); if (err) { kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s bootloader info (err %d)\n", pcan_usb_pro.name, err); return err; } dev_info(dev->netdev->dev.parent, "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n", pcan_usb_pro.name, bi.hw_rev, bi.serial_num_hi, bi.serial_num_lo, pcan_usb_pro.ctrl_count); /* tell the device the can driver is running */ pcan_usb_pro_drv_loaded(dev, 1); } else { usb_if = pcan_usb_pro_dev_if(dev->prev_siblings); } pdev->usb_if = usb_if; usb_if->dev[dev->ctrl_idx] = dev; /* set LED in default state (end of init phase) */ pcan_usb_pro_set_led(dev, 0, 1); return 0; } static void pcan_usb_pro_exit(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* * when rmmod called before unplug and if down, should reset things * before leaving */ if (dev->can.state != CAN_STATE_STOPPED) { /* set bus off on the corresponding channel */ pcan_usb_pro_set_bus(dev, 0); } /* if channel #0 (only) */ if (dev->ctrl_idx == 0) { /* turn off calibration message if any device were opened */ if (pdev->usb_if->dev_opened_count > 0) pcan_usb_pro_set_ts(dev, 0); /* tell the PCAN-USB Pro device the driver is being unloaded */ pcan_usb_pro_drv_loaded(dev, 0); } } /* * called when PCAN-USB Pro adapter is unplugged */ static void pcan_usb_pro_free(struct peak_usb_device *dev) { /* last device: can free pcan_usb_pro_interface object now */ if (!dev->prev_siblings && !dev->next_siblings) kfree(pcan_usb_pro_dev_if(dev)); } /* * probe function for new PCAN-USB Pro usb interface */ static int pcan_usb_pro_probe(struct usb_interface *intf) { struct usb_host_interface *if_desc; int i; if_desc = intf->altsetting; /* check interface endpoint addresses */ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc; /* * below is the list of valid ep addreses. Any other ep address * is considered as not-CAN interface address => no dev created */ switch (ep->bEndpointAddress) { case PCAN_USBPRO_EP_CMDOUT: case PCAN_USBPRO_EP_CMDIN: case PCAN_USBPRO_EP_MSGOUT_0: case PCAN_USBPRO_EP_MSGOUT_1: case PCAN_USBPRO_EP_MSGIN: case PCAN_USBPRO_EP_UNUSED: break; default: return -ENODEV; } } return 0; } /* * describe the PCAN-USB Pro adapter */ struct peak_usb_adapter pcan_usb_pro = { .name = "PCAN-USB Pro", .device_id = PCAN_USBPRO_PRODUCT_ID, .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, .clock = { .freq = PCAN_USBPRO_CRYSTAL_HZ, }, .bittiming_const = { .name = "pcan_usb_pro", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), /* timestamps usage */ .ts_used_bits = 32, .ts_period = 1000000, /* calibration period in ts. */ .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_USBPRO_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_USBPRO_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, .dev_init = pcan_usb_pro_init, .dev_exit = pcan_usb_pro_exit, .dev_free = pcan_usb_pro_free, .dev_set_bus = pcan_usb_pro_set_bus, .dev_set_bittiming = pcan_usb_pro_set_bittiming, .dev_get_device_id = pcan_usb_pro_get_device_id, .dev_decode_buf = pcan_usb_pro_decode_buf, .dev_encode_msg = pcan_usb_pro_encode_msg, .dev_start = pcan_usb_pro_start, .dev_stop = pcan_usb_pro_stop, .dev_restart_async = pcan_usb_pro_restart_async, };
gpl-2.0
allenway/PIS-kernel
mm/failslab.c
3624
1681
#include <linux/fault-inject.h> #include <linux/slab.h> static struct { struct fault_attr attr; u32 ignore_gfp_wait; int cache_filter; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *ignore_gfp_wait_file; struct dentry *cache_filter_file; #endif } failslab = { .attr = FAULT_ATTR_INITIALIZER, .ignore_gfp_wait = 1, .cache_filter = 0, }; bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags) { if (gfpflags & __GFP_NOFAIL) return false; if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT)) return false; if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB)) return false; return should_fail(&failslab.attr, size); } static int __init setup_failslab(char *str) { return setup_fault_attr(&failslab.attr, str); } __setup("failslab=", setup_failslab); #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS static int __init failslab_debugfs_init(void) { mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; struct dentry *dir; int err; err = init_fault_attr_dentries(&failslab.attr, "failslab"); if (err) return err; dir = failslab.attr.dentries.dir; failslab.ignore_gfp_wait_file = debugfs_create_bool("ignore-gfp-wait", mode, dir, &failslab.ignore_gfp_wait); failslab.cache_filter_file = debugfs_create_bool("cache-filter", mode, dir, &failslab.cache_filter); if (!failslab.ignore_gfp_wait_file || !failslab.cache_filter_file) { err = -ENOMEM; debugfs_remove(failslab.cache_filter_file); debugfs_remove(failslab.ignore_gfp_wait_file); cleanup_fault_attr_dentries(&failslab.attr); } return err; } late_initcall(failslab_debugfs_init); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
gpl-2.0
aeipDev/deka-kernel-msm7x30-3.4
arch/sh/mm/fault_32.c
3880
8649
/* * Page fault handler for SH with an MMU. * * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2003 - 2009 Paul Mundt * * Based on linux/arch/i386/mm/fault.c: * Copyright (C) 1995 Linus Torvalds * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/perf_event.h> #include <asm/io_trapped.h> #include <asm/mmu_context.h> #include <asm/tlbflush.h> #include <asm/traps.h> static inline int notify_page_fault(struct pt_regs *regs, int trap) { int ret = 0; if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, trap)) ret = 1; preempt_enable(); } return ret; } static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) { unsigned index = pgd_index(address); pgd_t *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pgd += index; pgd_k = init_mm.pgd + index; if (!pgd_present(*pgd_k)) return NULL; pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) return NULL; if (!pud_present(*pud)) set_pud(pud, *pud_k); pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) return NULL; if (!pmd_present(*pmd)) set_pmd(pmd, *pmd_k); else { /* * The page tables are fully synchronised so there must * be another reason for the fault. Return NULL here to * signal that we have not taken care of the fault. */ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); return NULL; } return pmd_k; } /* * Handle a fault on the vmalloc or module mapping area */ static noinline int vmalloc_fault(unsigned long address) { pgd_t *pgd_k; pmd_t *pmd_k; pte_t *pte_k; /* Make sure we are in vmalloc/module/P3 area: */ if (!(address >= P3SEG && address < P3_ADDR_MAX)) return -1; /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Do _not_ use "current" here. We might be inside * an interrupt in the middle of a task switch.. */ pgd_k = get_TTB(); pmd_k = vmalloc_sync_one(pgd_k, address); if (!pmd_k) return -1; pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) return -1; return 0; } static int fault_in_kernel_space(unsigned long address) { return address >= TASK_SIZE; } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { unsigned long vec; struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; int si_code; int fault; siginfo_t info; tsk = current; mm = tsk->mm; si_code = SEGV_MAPERR; vec = lookup_exception_vector(); /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. */ if (unlikely(fault_in_kernel_space(address))) { if (vmalloc_fault(address) >= 0) return; if (notify_page_fault(regs, vec)) return; goto bad_area_nosemaphore; } if (unlikely(notify_page_fault(regs, vec))) return; /* Only enable interrupts if they were on before the fault */ if ((regs->sr & SR_IMASK) != SR_IMASK) local_irq_enable(); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); /* * If we're in an interrupt, have no user context or are running * in an atomic region then we must not take the fault: */ if (in_atomic() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: si_code = SEGV_ACCERR; if (writeaccess) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; else if (fault & VM_FAULT_SIGBUS) goto do_sigbus; BUG(); } if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: if (user_mode(regs)) { info.si_signo = SIGSEGV; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void *) address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? */ if (fixup_exception(regs)) return; if (handle_trapped_io(regs, address)) return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. * */ bust_spinlocks(1); if (oops_may_print()) { unsigned long page; if (address < PAGE_SIZE) printk(KERN_ALERT "Unable to handle kernel NULL " "pointer dereference"); else printk(KERN_ALERT "Unable to handle kernel paging " "request"); printk(" at virtual address %08lx\n", address); printk(KERN_ALERT "pc = %08lx\n", regs->pc); page = (unsigned long)get_TTB(); if (page) { page = ((__typeof__(page) *)page)[address >> PGDIR_SHIFT]; printk(KERN_ALERT "*pde = %08lx\n", page); if (page & _PAGE_PRESENT) { page &= PAGE_MASK; address &= 0x003ff000; page = ((__typeof__(page) *) __va(page))[address >> PAGE_SHIFT]; printk(KERN_ALERT "*pte = %08lx\n", page); } } } die("Oops", regs, writeaccess); bust_spinlocks(0); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; } /* * Called with interrupts disabled. */ asmlinkage int __kprobes handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, unsigned long address) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pte_t entry; /* * We don't take page faults for P1, P2, and parts of P4, these * are always mapped, whether it be due to legacy behaviour in * 29-bit mode, or due to PMB configuration in 32-bit mode. */ if (address >= P3SEG && address < P3_ADDR_MAX) { pgd = pgd_offset_k(address); } else { if (unlikely(address >= TASK_SIZE || !current->mm)) return 1; pgd = pgd_offset(current->mm, address); } pud = pud_offset(pgd, address); if (pud_none_or_clear_bad(pud)) return 1; pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 1; pte = pte_offset_kernel(pmd, address); entry = *pte; if (unlikely(pte_none(entry) || pte_not_present(entry))) return 1; if (unlikely(writeaccess && !pte_write(entry))) return 1; if (writeaccess) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); set_pte(pte, entry); #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) /* * SH-4 does not set MMUCR.RC to the corresponding TLB entry in * the case of an initial page write exception, so we need to * flush it in order to avoid potential TLB entry duplication. */ if (writeaccess == 2) local_flush_tlb_one(get_asid(), address & PAGE_MASK); #endif update_mmu_cache(NULL, address, pte); return 0; }
gpl-2.0
aopp/android_kernel_lge_hammerhead
drivers/media/radio/radio-mr800.c
4904
17852
/* * A driver for the AverMedia MR 800 USB FM radio. This device plugs * into both the USB and an analog audio input, so this thing * only deals with initialization and frequency setting, the * audio data has to be handled by a sound driver. * * Copyright (c) 2008 Alexey Klimov <klimov.linux@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * Big thanks to authors and contributors of dsbr100.c and radio-si470x.c * * When work was looked pretty good, i discover this: * http://av-usbradio.sourceforge.net/index.php * http://sourceforge.net/projects/av-usbradio/ * Latest release of theirs project was in 2005. * Probably, this driver could be improved through using their * achievements (specifications given). * Also, Faidon Liambotis <paravoid@debian.org> wrote nice driver for this radio * in 2007. He allowed to use his driver to improve current mr800 radio driver. * http://kerneltrap.org/mailarchive/linux-usb-devel/2007/10/11/342492 * * Version 0.01: First working version. * It's required to blacklist AverMedia USB Radio * in usbhid/hid-quirks.c * Version 0.10: A lot of cleanups and fixes: unpluging the device, * few mutex locks were added, codinstyle issues, etc. * Added stereo support. Thanks to * Douglas Schilling Landgraf <dougsland@gmail.com> and * David Ellingsworth <david@identd.dyndns.org> * for discussion, help and support. * Version 0.11: Converted to v4l2_device. * * Many things to do: * - Correct power management of device (suspend & resume) * - Add code for scanning and smooth tuning * - Add code for sensitivity value * - Correct mistakes * - In Japan another FREQ_MIN and FREQ_MAX */ /* kernel includes */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <linux/usb.h> #include <linux/mutex.h> /* driver and module definitions */ #define DRIVER_AUTHOR "Alexey Klimov <klimov.linux@gmail.com>" #define DRIVER_DESC "AverMedia MR 800 USB FM radio driver" #define DRIVER_VERSION "0.1.2" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); #define USB_AMRADIO_VENDOR 0x07ca #define USB_AMRADIO_PRODUCT 0xb800 /* dev_warn macro with driver name */ #define MR800_DRIVER_NAME "radio-mr800" #define amradio_dev_warn(dev, fmt, arg...) \ dev_warn(dev, MR800_DRIVER_NAME " - " fmt, ##arg) #define amradio_dev_err(dev, fmt, arg...) \ dev_err(dev, MR800_DRIVER_NAME " - " fmt, ##arg) /* Probably USB_TIMEOUT should be modified in module parameter */ #define BUFFER_LENGTH 8 #define USB_TIMEOUT 500 /* Frequency limits in MHz -- these are European values. For Japanese devices, that would be 76 and 91. */ #define FREQ_MIN 87.5 #define FREQ_MAX 108.0 #define FREQ_MUL 16000 /* * Commands that device should understand * List isn't full and will be updated with implementation of new functions */ #define AMRADIO_SET_FREQ 0xa4 #define AMRADIO_SET_MUTE 0xab #define AMRADIO_SET_MONO 0xae /* Comfortable defines for amradio_set_mute */ #define AMRADIO_START 0x00 #define AMRADIO_STOP 0x01 /* Comfortable defines for amradio_set_stereo */ #define WANT_STEREO 0x00 #define WANT_MONO 0x01 /* module parameter */ static int radio_nr = -1; module_param(radio_nr, int, 0); MODULE_PARM_DESC(radio_nr, "Radio Nr"); static int usb_amradio_probe(struct usb_interface *intf, const struct usb_device_id *id); static void usb_amradio_disconnect(struct usb_interface *intf); static int usb_amradio_open(struct file *file); static int usb_amradio_close(struct file *file); static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message); static int usb_amradio_resume(struct usb_interface *intf); /* Data for one (physical) device */ struct amradio_device { /* reference to USB and video device */ struct usb_device *usbdev; struct usb_interface *intf; struct video_device videodev; struct v4l2_device v4l2_dev; unsigned char *buffer; struct mutex lock; /* buffer locking */ int curfreq; int stereo; int muted; int initialized; }; static inline struct amradio_device *to_amradio_dev(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct amradio_device, v4l2_dev); } /* USB Device ID List */ static struct usb_device_id usb_amradio_device_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_AMRADIO_VENDOR, USB_AMRADIO_PRODUCT, USB_CLASS_HID, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_amradio_device_table); /* USB subsystem interface */ static struct usb_driver usb_amradio_driver = { .name = MR800_DRIVER_NAME, .probe = usb_amradio_probe, .disconnect = usb_amradio_disconnect, .suspend = usb_amradio_suspend, .resume = usb_amradio_resume, .reset_resume = usb_amradio_resume, .id_table = usb_amradio_device_table, .supports_autosuspend = 1, }; /* switch on/off the radio. Send 8 bytes to device */ static int amradio_set_mute(struct amradio_device *radio, char argument) { int retval; int size; radio->buffer[0] = 0x00; radio->buffer[1] = 0x55; radio->buffer[2] = 0xaa; radio->buffer[3] = 0x00; radio->buffer[4] = AMRADIO_SET_MUTE; radio->buffer[5] = argument; radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2), (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT); if (retval < 0 || size != BUFFER_LENGTH) { amradio_dev_warn(&radio->videodev.dev, "set mute failed\n"); return retval; } radio->muted = argument; return retval; } /* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */ static int amradio_setfreq(struct amradio_device *radio, int freq) { int retval; int size; unsigned short freq_send = 0x10 + (freq >> 3) / 25; radio->buffer[0] = 0x00; radio->buffer[1] = 0x55; radio->buffer[2] = 0xaa; radio->buffer[3] = 0x03; radio->buffer[4] = AMRADIO_SET_FREQ; radio->buffer[5] = 0x00; radio->buffer[6] = 0x00; radio->buffer[7] = 0x08; retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2), (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT); if (retval < 0 || size != BUFFER_LENGTH) goto out_err; /* frequency is calculated from freq_send and placed in first 2 bytes */ radio->buffer[0] = (freq_send >> 8) & 0xff; radio->buffer[1] = freq_send & 0xff; radio->buffer[2] = 0x01; radio->buffer[3] = 0x00; radio->buffer[4] = 0x00; /* 5 and 6 bytes of buffer already = 0x00 */ radio->buffer[7] = 0x00; retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2), (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT); if (retval < 0 || size != BUFFER_LENGTH) goto out_err; radio->curfreq = freq; goto out; out_err: amradio_dev_warn(&radio->videodev.dev, "set frequency failed\n"); out: return retval; } static int amradio_set_stereo(struct amradio_device *radio, char argument) { int retval; int size; radio->buffer[0] = 0x00; radio->buffer[1] = 0x55; radio->buffer[2] = 0xaa; radio->buffer[3] = 0x00; radio->buffer[4] = AMRADIO_SET_MONO; radio->buffer[5] = argument; radio->buffer[6] = 0x00; radio->buffer[7] = 0x00; retval = usb_bulk_msg(radio->usbdev, usb_sndintpipe(radio->usbdev, 2), (void *) (radio->buffer), BUFFER_LENGTH, &size, USB_TIMEOUT); if (retval < 0 || size != BUFFER_LENGTH) { amradio_dev_warn(&radio->videodev.dev, "set stereo failed\n"); return retval; } if (argument == WANT_STEREO) radio->stereo = 1; else radio->stereo = 0; return retval; } /* Handle unplugging the device. * We call video_unregister_device in any case. * The last function called in this procedure is * usb_amradio_device_release. */ static void usb_amradio_disconnect(struct usb_interface *intf) { struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf)); mutex_lock(&radio->lock); /* increase the device node's refcount */ get_device(&radio->videodev.dev); v4l2_device_disconnect(&radio->v4l2_dev); video_unregister_device(&radio->videodev); mutex_unlock(&radio->lock); /* decrease the device node's refcount, allowing it to be released */ put_device(&radio->videodev.dev); } /* vidioc_querycap - query device capabilities */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *v) { struct amradio_device *radio = file->private_data; strlcpy(v->driver, "radio-mr800", sizeof(v->driver)); strlcpy(v->card, "AverMedia MR 800 USB FM Radio", sizeof(v->card)); usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info)); v->capabilities = V4L2_CAP_TUNER; return 0; } /* vidioc_g_tuner - get tuner attributes */ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct amradio_device *radio = file->private_data; int retval; if (v->index > 0) return -EINVAL; /* TODO: Add function which look is signal stereo or not * amradio_getstat(radio); */ /* we call amradio_set_stereo to set radio->stereo * Honestly, amradio_getstat should cover this in future and * amradio_set_stereo shouldn't be here */ retval = amradio_set_stereo(radio, WANT_STEREO); strcpy(v->name, "FM"); v->type = V4L2_TUNER_RADIO; v->rangelow = FREQ_MIN * FREQ_MUL; v->rangehigh = FREQ_MAX * FREQ_MUL; v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; v->capability = V4L2_TUNER_CAP_LOW; if (radio->stereo) v->audmode = V4L2_TUNER_MODE_STEREO; else v->audmode = V4L2_TUNER_MODE_MONO; v->signal = 0xffff; /* Can't get the signal strength, sad.. */ v->afc = 0; /* Don't know what is this */ return retval; } /* vidioc_s_tuner - set tuner attributes */ static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *v) { struct amradio_device *radio = file->private_data; int retval = -EINVAL; if (v->index > 0) return -EINVAL; /* mono/stereo selector */ switch (v->audmode) { case V4L2_TUNER_MODE_MONO: retval = amradio_set_stereo(radio, WANT_MONO); break; case V4L2_TUNER_MODE_STEREO: retval = amradio_set_stereo(radio, WANT_STEREO); break; } return retval; } /* vidioc_s_frequency - set tuner radio frequency */ static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct amradio_device *radio = file->private_data; if (f->tuner != 0 || f->type != V4L2_TUNER_RADIO) return -EINVAL; return amradio_setfreq(radio, f->frequency); } /* vidioc_g_frequency - get tuner radio frequency */ static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct amradio_device *radio = file->private_data; if (f->tuner != 0) return -EINVAL; f->type = V4L2_TUNER_RADIO; f->frequency = radio->curfreq; return 0; } /* vidioc_queryctrl - enumerate control items */ static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { switch (qc->id) { case V4L2_CID_AUDIO_MUTE: return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1); } return -EINVAL; } /* vidioc_g_ctrl - get the value of a control */ static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct amradio_device *radio = file->private_data; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: ctrl->value = radio->muted; return 0; } return -EINVAL; } /* vidioc_s_ctrl - set the value of a control */ static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct amradio_device *radio = file->private_data; int retval = -EINVAL; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (ctrl->value) retval = amradio_set_mute(radio, AMRADIO_STOP); else retval = amradio_set_mute(radio, AMRADIO_START); break; } return retval; } /* vidioc_g_audio - get audio attributes */ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (a->index > 1) return -EINVAL; strcpy(a->name, "Radio"); a->capability = V4L2_AUDCAP_STEREO; return 0; } /* vidioc_s_audio - set audio attributes */ static int vidioc_s_audio(struct file *file, void *priv, struct v4l2_audio *a) { if (a->index != 0) return -EINVAL; return 0; } /* vidioc_g_input - get input */ static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i) { *i = 0; return 0; } /* vidioc_s_input - set input */ static int vidioc_s_input(struct file *filp, void *priv, unsigned int i) { if (i != 0) return -EINVAL; return 0; } static int usb_amradio_init(struct amradio_device *radio) { int retval; retval = amradio_set_mute(radio, AMRADIO_STOP); if (retval) goto out_err; retval = amradio_set_stereo(radio, WANT_STEREO); if (retval) goto out_err; radio->initialized = 1; goto out; out_err: amradio_dev_err(&radio->videodev.dev, "initialization failed\n"); out: return retval; } /* open device - amradio_start() and amradio_setfreq() */ static int usb_amradio_open(struct file *file) { struct amradio_device *radio = video_drvdata(file); int retval; file->private_data = radio; retval = usb_autopm_get_interface(radio->intf); if (retval) return retval; if (unlikely(!radio->initialized)) { retval = usb_amradio_init(radio); if (retval) usb_autopm_put_interface(radio->intf); } return retval; } /*close device */ static int usb_amradio_close(struct file *file) { struct amradio_device *radio = file->private_data; if (video_is_registered(&radio->videodev)) usb_autopm_put_interface(radio->intf); return 0; } /* Suspend device - stop device. Need to be checked and fixed */ static int usb_amradio_suspend(struct usb_interface *intf, pm_message_t message) { struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf)); mutex_lock(&radio->lock); if (!radio->muted && radio->initialized) { amradio_set_mute(radio, AMRADIO_STOP); radio->muted = 0; } mutex_unlock(&radio->lock); dev_info(&intf->dev, "going into suspend..\n"); return 0; } /* Resume device - start device. Need to be checked and fixed */ static int usb_amradio_resume(struct usb_interface *intf) { struct amradio_device *radio = to_amradio_dev(usb_get_intfdata(intf)); mutex_lock(&radio->lock); if (unlikely(!radio->initialized)) goto unlock; if (radio->stereo) amradio_set_stereo(radio, WANT_STEREO); else amradio_set_stereo(radio, WANT_MONO); amradio_setfreq(radio, radio->curfreq); if (!radio->muted) amradio_set_mute(radio, AMRADIO_START); unlock: mutex_unlock(&radio->lock); dev_info(&intf->dev, "coming out of suspend..\n"); return 0; } /* File system interface */ static const struct v4l2_file_operations usb_amradio_fops = { .owner = THIS_MODULE, .open = usb_amradio_open, .release = usb_amradio_close, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops usb_amradio_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, }; static void usb_amradio_video_device_release(struct video_device *videodev) { struct amradio_device *radio = video_get_drvdata(videodev); /* free rest memory */ kfree(radio->buffer); kfree(radio); } /* check if the device is present and register with v4l and usb if it is */ static int usb_amradio_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct amradio_device *radio; int retval = 0; radio = kzalloc(sizeof(struct amradio_device), GFP_KERNEL); if (!radio) { dev_err(&intf->dev, "kmalloc for amradio_device failed\n"); retval = -ENOMEM; goto err; } radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL); if (!radio->buffer) { dev_err(&intf->dev, "kmalloc for radio->buffer failed\n"); retval = -ENOMEM; goto err_nobuf; } retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev); if (retval < 0) { dev_err(&intf->dev, "couldn't register v4l2_device\n"); goto err_v4l2; } mutex_init(&radio->lock); strlcpy(radio->videodev.name, radio->v4l2_dev.name, sizeof(radio->videodev.name)); radio->videodev.v4l2_dev = &radio->v4l2_dev; radio->videodev.fops = &usb_amradio_fops; radio->videodev.ioctl_ops = &usb_amradio_ioctl_ops; radio->videodev.release = usb_amradio_video_device_release; radio->videodev.lock = &radio->lock; radio->usbdev = interface_to_usbdev(intf); radio->intf = intf; radio->curfreq = 95.16 * FREQ_MUL; video_set_drvdata(&radio->videodev, radio); retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr); if (retval < 0) { dev_err(&intf->dev, "could not register video device\n"); goto err_vdev; } return 0; err_vdev: v4l2_device_unregister(&radio->v4l2_dev); err_v4l2: kfree(radio->buffer); err_nobuf: kfree(radio); err: return retval; } module_usb_driver(usb_amradio_driver);
gpl-2.0
tobetter/hardkernel-linux
drivers/ide/triflex.c
5160
3927
/* * IDE Chipset driver for the Compaq TriFlex IDE controller. * * Known to work with the Compaq Workstation 5x00 series. * * Copyright (C) 2002 Hewlett-Packard Development Group, L.P. * Author: Torben Mathiasen <torben.mathiasen@hp.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Loosely based on the piix & svwks drivers. * * Documentation: * Not publicly available. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #define DRV_NAME "triflex" static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); u32 triflex_timings = 0; u16 timing = 0; u8 channel_offset = hwif->channel ? 0x74 : 0x70, unit = drive->dn & 1; pci_read_config_dword(dev, channel_offset, &triflex_timings); switch (drive->dma_mode) { case XFER_MW_DMA_2: timing = 0x0103; break; case XFER_MW_DMA_1: timing = 0x0203; break; case XFER_MW_DMA_0: timing = 0x0808; break; case XFER_SW_DMA_2: case XFER_SW_DMA_1: case XFER_SW_DMA_0: timing = 0x0f0f; break; case XFER_PIO_4: timing = 0x0202; break; case XFER_PIO_3: timing = 0x0204; break; case XFER_PIO_2: timing = 0x0404; break; case XFER_PIO_1: timing = 0x0508; break; case XFER_PIO_0: timing = 0x0808; break; } triflex_timings &= ~(0xFFFF << (16 * unit)); triflex_timings |= (timing << (16 * unit)); pci_write_config_dword(dev, channel_offset, triflex_timings); } static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { drive->dma_mode = drive->pio_mode; triflex_set_mode(hwif, drive); } static const struct ide_port_ops triflex_port_ops = { .set_pio_mode = triflex_set_pio_mode, .set_dma_mode = triflex_set_mode, }; static const struct ide_port_info triflex_device __devinitdata = { .name = DRV_NAME, .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}}, .port_ops = &triflex_port_ops, .pio_mask = ATA_PIO4, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, }; static int __devinit triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &triflex_device, NULL); } static const struct pci_device_id triflex_pci_tbl[] = { { PCI_VDEVICE(COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, triflex_pci_tbl); #ifdef CONFIG_PM static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state) { /* * We must not disable or powerdown the device. * APM bios refuses to suspend if IDE is not accessible. */ pci_save_state(dev); return 0; } #else #define triflex_ide_pci_suspend NULL #endif static struct pci_driver triflex_pci_driver = { .name = "TRIFLEX_IDE", .id_table = triflex_pci_tbl, .probe = triflex_init_one, .remove = ide_pci_remove, .suspend = triflex_ide_pci_suspend, .resume = ide_pci_resume, }; static int __init triflex_ide_init(void) { return ide_pci_register_driver(&triflex_pci_driver); } static void __exit triflex_ide_exit(void) { pci_unregister_driver(&triflex_pci_driver); } module_init(triflex_ide_init); module_exit(triflex_ide_exit); MODULE_AUTHOR("Torben Mathiasen"); MODULE_DESCRIPTION("PCI driver module for Compaq Triflex IDE"); MODULE_LICENSE("GPL");
gpl-2.0
Apology11/test
drivers/ide/piix.c
5160
14392
/* * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> * Copyright (C) 2003 Red Hat * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com> * * May be copied or modified under the terms of the GNU General Public License * * Documentation: * * Publicly available from Intel web site. Errata documentation * is also publicly available. As an aide to anyone hacking on this * driver the list of errata that are relevant is below.going back to * PIIX4. Older device documentation is now a bit tricky to find. * * Errata of note: * * Unfixable * PIIX4 errata #9 - Only on ultra obscure hw * ICH3 errata #13 - Not observed to affect real hw * by Intel * * Things we must deal with * PIIX4 errata #10 - BM IDE hang with non UDMA * (must stop/start dma to recover) * 440MX errata #15 - As PIIX4 errata #10 * PIIX4 errata #15 - Must not read control registers * during a PIO transfer * 440MX errata #13 - As PIIX4 errata #15 * ICH2 errata #21 - DMA mode 0 doesn't work right * ICH0/1 errata #55 - As ICH2 errata #21 * ICH2 spec c #9 - Extra operations needed to handle * drive hotswap [NOT YET SUPPORTED] * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary * and must be dword aligned * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3 * * Should have been BIOS fixed: * 450NX: errata #19 - DMA hangs on old 450NX * 450NX: errata #20 - DMA hangs on old 450NX * 450NX: errata #25 - Corruption with DMA on old 450NX * ICH3 errata #15 - IDE deadlock under high load * (BIOS must set dev 31 fn 0 bit 23) * ICH3 errata #18 - Don't use native mode */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <asm/io.h> #define DRV_NAME "piix" static int no_piix_dma; /** * piix_set_pio_mode - set host controller for PIO mode * @port: port * @drive: drive * * Set the interface PIO mode based upon the settings done by AMI BIOS. */ static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); int is_slave = drive->dn & 1; int master_port = hwif->channel ? 0x42 : 0x40; int slave_port = 0x44; unsigned long flags; u16 master_data; u8 slave_data; static DEFINE_SPINLOCK(tune_lock); int control = 0; const u8 pio = drive->pio_mode - XFER_PIO_0; /* ISP RTC */ static const u8 timings[][2]= { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; /* * Master vs slave is synchronized above us but the slave register is * shared by the two hwifs so the corner case of two slave timeouts in * parallel must be locked. */ spin_lock_irqsave(&tune_lock, flags); pci_read_config_word(dev, master_port, &master_data); if (pio > 1) control |= 1; /* Programmable timing on */ if (drive->media == ide_disk) control |= 4; /* Prefetch, post write */ if (ide_pio_need_iordy(drive, pio)) control |= 2; /* IORDY */ if (is_slave) { master_data |= 0x4000; master_data &= ~0x0070; if (pio > 1) { /* Set PPE, IE and TIME */ master_data |= control << 4; } pci_read_config_byte(dev, slave_port, &slave_data); slave_data &= hwif->channel ? 0x0f : 0xf0; slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (hwif->channel ? 4 : 0); } else { master_data &= ~0x3307; if (pio > 1) { /* enable PPE, IE and TIME */ master_data |= control; } master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8); } pci_write_config_word(dev, master_port, master_data); if (is_slave) pci_write_config_byte(dev, slave_port, slave_data); spin_unlock_irqrestore(&tune_lock, flags); } /** * piix_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Set a PIIX host controller to the desired DMA mode. This involves * programming the right timing data into the PCI configuration space. */ static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { struct pci_dev *dev = to_pci_dev(hwif->dev); u8 maslave = hwif->channel ? 0x42 : 0x40; int a_speed = 3 << (drive->dn * 4); int u_flag = 1 << drive->dn; int v_flag = 0x01 << drive->dn; int w_flag = 0x10 << drive->dn; int u_speed = 0; int sitre; u16 reg4042, reg4a; u8 reg48, reg54, reg55; const u8 speed = drive->dma_mode; pci_read_config_word(dev, maslave, &reg4042); sitre = (reg4042 & 0x4000) ? 1 : 0; pci_read_config_byte(dev, 0x48, &reg48); pci_read_config_word(dev, 0x4a, &reg4a); pci_read_config_byte(dev, 0x54, &reg54); pci_read_config_byte(dev, 0x55, &reg55); if (speed >= XFER_UDMA_0) { u8 udma = speed - XFER_UDMA_0; u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4); if (!(reg48 & u_flag)) pci_write_config_byte(dev, 0x48, reg48 | u_flag); if (speed == XFER_UDMA_5) { pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); } else { pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); } if ((reg4a & a_speed) != u_speed) pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed); if (speed > XFER_UDMA_2) { if (!(reg54 & v_flag)) pci_write_config_byte(dev, 0x54, reg54 | v_flag); } else pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); } else { const u8 mwdma_to_pio[] = { 0, 3, 4 }; if (reg48 & u_flag) pci_write_config_byte(dev, 0x48, reg48 & ~u_flag); if (reg4a & a_speed) pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); if (reg54 & v_flag) pci_write_config_byte(dev, 0x54, reg54 & ~v_flag); if (reg55 & w_flag) pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); if (speed >= XFER_MW_DMA_0) drive->pio_mode = mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0; else drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */ piix_set_pio_mode(hwif, drive); } } /** * init_chipset_ich - set up the ICH chipset * @dev: PCI device to set up * * Initialize the PCI device as required. For the ICH this turns * out to be nice and simple. */ static int init_chipset_ich(struct pci_dev *dev) { u32 extra = 0; pci_read_config_dword(dev, 0x54, &extra); pci_write_config_dword(dev, 0x54, extra | 0x400); return 0; } /** * ich_clear_irq - clear BMDMA status * @drive: IDE drive * * ICHx contollers set DMA INTR no matter DMA or PIO. * BMDMA status might need to be cleared even for * PIO interrupts to prevent spurious/lost IRQ. */ static void ich_clear_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat; /* * ide_dma_end() needs BMDMA status for error checking. * So, skip clearing BMDMA status here and leave it * to ide_dma_end() if this is DMA interrupt. */ if (drive->waiting_for_dma || hwif->dma_base == 0) return; /* clear the INTR & ERROR bits */ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS); /* Should we force the bit as well ? */ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS); } struct ich_laptop { u16 device; u16 subvendor; u16 subdevice; }; /* * List of laptops that use short cables rather than 80 wire */ static const struct ich_laptop ich_laptop[] = { /* devid, subvendor, subdev */ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ /* end marker */ { 0, } }; static u8 piix_cable_detect(ide_hwif_t *hwif) { struct pci_dev *pdev = to_pci_dev(hwif->dev); const struct ich_laptop *lap = &ich_laptop[0]; u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30; /* check for specials */ while (lap->device) { if (lap->device == pdev->device && lap->subvendor == pdev->subsystem_vendor && lap->subdevice == pdev->subsystem_device) { return ATA_CBL_PATA40_SHORT; } lap++; } pci_read_config_byte(pdev, 0x54, &reg54h); return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40; } /** * init_hwif_piix - fill in the hwif for the PIIX * @hwif: IDE interface * * Set up the ide_hwif_t for the PIIX interface according to the * capabilities of the hardware. */ static void __devinit init_hwif_piix(ide_hwif_t *hwif) { if (!hwif->dma_base) return; if (no_piix_dma) hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0; } static const struct ide_port_ops piix_port_ops = { .set_pio_mode = piix_set_pio_mode, .set_dma_mode = piix_set_dma_mode, .cable_detect = piix_cable_detect, }; static const struct ide_port_ops ich_port_ops = { .set_pio_mode = piix_set_pio_mode, .set_dma_mode = piix_set_dma_mode, .clear_irq = ich_clear_irq, .cable_detect = piix_cable_detect, }; #define DECLARE_PIIX_DEV(udma) \ { \ .name = DRV_NAME, \ .init_hwif = init_hwif_piix, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .port_ops = &piix_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ .mwdma_mask = ATA_MWDMA12_ONLY, \ .udma_mask = udma, \ } #define DECLARE_ICH_DEV(mwdma, udma) \ { \ .name = DRV_NAME, \ .init_chipset = init_chipset_ich, \ .init_hwif = init_hwif_piix, \ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ .port_ops = &ich_port_ops, \ .pio_mask = ATA_PIO4, \ .swdma_mask = ATA_SWDMA2_ONLY, \ .mwdma_mask = mwdma, \ .udma_mask = udma, \ } static const struct ide_port_info piix_pci_info[] __devinitdata = { /* 0: MPIIX */ { /* * MPIIX actually has only a single IDE channel mapped to * the primary or secondary ports depending on the value * of the bit 14 of the IDETIM register at offset 0x6c */ .name = DRV_NAME, .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}}, .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, /* This is a painful system best to let it self tune for now */ }, /* 1: PIIXa/PIIXb/PIIX3 */ DECLARE_PIIX_DEV(0x00), /* no udma */ /* 2: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA2), /* 3: ICH0 */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA2), /* 4: ICH */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA4), /* 5: PIIX4 */ DECLARE_PIIX_DEV(ATA_UDMA4), /* 6: ICH[2-6]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */ DECLARE_ICH_DEV(ATA_MWDMA12_ONLY, ATA_UDMA5), /* 7: ICH7/7-R, no MWDMA1 */ DECLARE_ICH_DEV(ATA_MWDMA2_ONLY, ATA_UDMA5), }; /** * piix_init_one - called when a PIIX is found * @dev: the piix device * @id: the matching pci id * * Called when the PCI registration layer (or the IDE initialization) * finds a device matching our IDE device tables. */ static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL); } /** * piix_check_450nx - Check for problem 450NX setup * * Check for the present of 450NX errata #19 and errata #25. If * they are found, disable use of DMA IDE */ static void __devinit piix_check_450nx(void) { struct pci_dev *pdev = NULL; u16 cfg; while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL) { /* Look for 450NX PXB. Check for problem configurations A PCI quirk checks bit 6 already */ pci_read_config_word(pdev, 0x41, &cfg); /* Only on the original revision: IDE DMA can hang */ if (pdev->revision == 0x00) no_piix_dma = 1; /* On all revisions below 5 PXB bus lock must be disabled for IDE */ else if (cfg & (1<<14) && pdev->revision < 5) no_piix_dma = 2; } if(no_piix_dma) printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n"); if(no_piix_dma == 2) printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n"); } static const struct pci_device_id piix_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 }, #ifdef CONFIG_BLK_DEV_IDE_SATA { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 }, #endif { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 7 }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, piix_pci_tbl); static struct pci_driver piix_pci_driver = { .name = "PIIX_IDE", .id_table = piix_pci_tbl, .probe = piix_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init piix_ide_init(void) { piix_check_450nx(); return ide_pci_register_driver(&piix_pci_driver); } static void __exit piix_ide_exit(void) { pci_unregister_driver(&piix_pci_driver); } module_init(piix_ide_init); module_exit(piix_ide_exit); MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz"); MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE"); MODULE_LICENSE("GPL");
gpl-2.0
emwno/android_kernel_konaxx
drivers/input/apm-power.c
8232
2634
/* * Input Power Event -> APM Bridge * * Copyright (c) 2007 Richard Purdie * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/apm-emulation.h> static void system_power_event(unsigned int keycode) { switch (keycode) { case KEY_SUSPEND: apm_queue_event(APM_USER_SUSPEND); pr_info("Requesting system suspend...\n"); break; default: break; } } static void apmpower_event(struct input_handle *handle, unsigned int type, unsigned int code, int value) { /* only react on key down events */ if (value != 1) return; switch (type) { case EV_PWR: system_power_event(code); break; default: break; } } static int apmpower_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "apm-power"; error = input_register_handle(handle); if (error) { pr_err("Failed to register input power handler, error %d\n", error); kfree(handle); return error; } error = input_open_device(handle); if (error) { pr_err("Failed to open input power device, error %d\n", error); input_unregister_handle(handle); kfree(handle); return error; } return 0; } static void apmpower_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id apmpower_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_PWR) }, }, { }, }; MODULE_DEVICE_TABLE(input, apmpower_ids); static struct input_handler apmpower_handler = { .event = apmpower_event, .connect = apmpower_connect, .disconnect = apmpower_disconnect, .name = "apm-power", .id_table = apmpower_ids, }; static int __init apmpower_init(void) { return input_register_handler(&apmpower_handler); } static void __exit apmpower_exit(void) { input_unregister_handler(&apmpower_handler); } module_init(apmpower_init); module_exit(apmpower_exit); MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); MODULE_DESCRIPTION("Input Power Event -> APM Bridge"); MODULE_LICENSE("GPL");
gpl-2.0
JamesAng/lx-std
drivers/staging/ft1000/ft1000-usb/ft1000_download.c
8232
34720
//===================================================== // CopyRight (C) 2007 Qualcomm Inc. All Rights Reserved. // // // This file is part of Express Card USB Driver // // $Id: //==================================================== // 20090926; aelias; removed compiler warnings; ubuntu 9.04; 2.6.28-15-generic #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/vmalloc.h> #include "ft1000_usb.h" #define DWNLD_HANDSHAKE_LOC 0x02 #define DWNLD_TYPE_LOC 0x04 #define DWNLD_SIZE_MSW_LOC 0x06 #define DWNLD_SIZE_LSW_LOC 0x08 #define DWNLD_PS_HDR_LOC 0x0A #define MAX_DSP_WAIT_LOOPS 40 #define DSP_WAIT_SLEEP_TIME 1000 /* 1 millisecond */ #define DSP_WAIT_DISPATCH_LVL 50 /* 50 usec */ #define HANDSHAKE_TIMEOUT_VALUE 0xF1F1 #define HANDSHAKE_RESET_VALUE 0xFEFE /* When DSP requests startover */ #define HANDSHAKE_RESET_VALUE_USB 0xFE7E /* When DSP requests startover */ #define HANDSHAKE_DSP_BL_READY 0xFEFE /* At start DSP writes this when bootloader ready */ #define HANDSHAKE_DSP_BL_READY_USB 0xFE7E /* At start DSP writes this when bootloader ready */ #define HANDSHAKE_DRIVER_READY 0xFFFF /* Driver writes after receiving 0xFEFE */ #define HANDSHAKE_SEND_DATA 0x0000 /* DSP writes this when ready for more data */ #define HANDSHAKE_REQUEST 0x0001 /* Request from DSP */ #define HANDSHAKE_RESPONSE 0x0000 /* Satisfied DSP request */ #define REQUEST_CODE_LENGTH 0x0000 #define REQUEST_RUN_ADDRESS 0x0001 #define REQUEST_CODE_SEGMENT 0x0002 /* In WORD count */ #define REQUEST_DONE_BL 0x0003 #define REQUEST_DONE_CL 0x0004 #define REQUEST_VERSION_INFO 0x0005 #define REQUEST_CODE_BY_VERSION 0x0006 #define REQUEST_MAILBOX_DATA 0x0007 #define REQUEST_FILE_CHECKSUM 0x0008 #define STATE_START_DWNLD 0x01 #define STATE_BOOT_DWNLD 0x02 #define STATE_CODE_DWNLD 0x03 #define STATE_DONE_DWNLD 0x04 #define STATE_SECTION_PROV 0x05 #define STATE_DONE_PROV 0x06 #define STATE_DONE_FILE 0x07 #define MAX_LENGTH 0x7f0 // Temporary download mechanism for Magnemite #define DWNLD_MAG_TYPE_LOC 0x00 #define DWNLD_MAG_LEN_LOC 0x01 #define DWNLD_MAG_ADDR_LOC 0x02 #define DWNLD_MAG_CHKSUM_LOC 0x03 #define DWNLD_MAG_VAL_LOC 0x04 #define HANDSHAKE_MAG_DSP_BL_READY 0xFEFE0000 /* At start DSP writes this when bootloader ready */ #define HANDSHAKE_MAG_DSP_ENTRY 0x01000000 /* Dsp writes this to request for entry address */ #define HANDSHAKE_MAG_DSP_DATA 0x02000000 /* Dsp writes this to request for data block */ #define HANDSHAKE_MAG_DSP_DONE 0x03000000 /* Dsp writes this to indicate download done */ #define HANDSHAKE_MAG_DRV_READY 0xFFFF0000 /* Driver writes this to indicate ready to download */ #define HANDSHAKE_MAG_DRV_DATA 0x02FECDAB /* Driver writes this to indicate data available to DSP */ #define HANDSHAKE_MAG_DRV_ENTRY 0x01FECDAB /* Driver writes this to indicate entry point to DSP */ #define HANDSHAKE_MAG_TIMEOUT_VALUE 0xF1F1 // New Magnemite downloader #define DWNLD_MAG1_HANDSHAKE_LOC 0x00 #define DWNLD_MAG1_TYPE_LOC 0x01 #define DWNLD_MAG1_SIZE_LOC 0x02 #define DWNLD_MAG1_PS_HDR_LOC 0x03 struct dsp_file_hdr { long version_id; // Version ID of this image format. long package_id; // Package ID of code release. long build_date; // Date/time stamp when file was built. long commands_offset; // Offset to attached commands in Pseudo Hdr format. long loader_offset; // Offset to bootloader code. long loader_code_address; // Start address of bootloader. long loader_code_end; // Where bootloader code ends. long loader_code_size; long version_data_offset; // Offset were scrambled version data begins. long version_data_size; // Size, in words, of scrambled version data. long nDspImages; // Number of DSP images in file. }; #pragma pack(1) struct dsp_image_info { long coff_date; // Date/time when DSP Coff image was built. long begin_offset; // Offset in file where image begins. long end_offset; // Offset in file where image begins. long run_address; // On chip Start address of DSP code. long image_size; // Size of image. long version; // Embedded version # of DSP code. unsigned short checksum; // DSP File checksum unsigned short pad1; }; //--------------------------------------------------------------------------- // Function: check_usb_db // // Parameters: struct ft1000_device - device structure // // Returns: 0 - success // // Description: This function checks if the doorbell register is cleared // // Notes: // //--------------------------------------------------------------------------- static u32 check_usb_db (struct ft1000_device *ft1000dev) { int loopcnt; u16 temp; u32 status; loopcnt = 0; while (loopcnt < 10) { status = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); DEBUG("check_usb_db: read FT1000_REG_DOORBELL value is %x\n", temp); if (temp & 0x0080) { DEBUG("FT1000:Got checkusb doorbell\n"); status = ft1000_write_register(ft1000dev, 0x0080, FT1000_REG_DOORBELL); status = ft1000_write_register(ft1000dev, 0x0100, FT1000_REG_DOORBELL); status = ft1000_write_register(ft1000dev, 0x8000, FT1000_REG_DOORBELL); break; } else { loopcnt++; msleep(10); } } loopcnt = 0; while (loopcnt < 20) { status = ft1000_read_register(ft1000dev, &temp, FT1000_REG_DOORBELL); DEBUG("FT1000:check_usb_db:Doorbell = 0x%x\n", temp); if (temp & 0x8000) { loopcnt++; msleep(10); } else { DEBUG("check_usb_db: door bell is cleared, return 0\n"); return 0; } } return HANDSHAKE_MAG_TIMEOUT_VALUE; } //--------------------------------------------------------------------------- // Function: get_handshake // // Parameters: struct ft1000_device - device structure // u16 expected_value - the handshake value expected // // Returns: handshakevalue - success // HANDSHAKE_TIMEOUT_VALUE - failure // // Description: This function gets the handshake and compare with the expected value // // Notes: // //--------------------------------------------------------------------------- static u16 get_handshake(struct ft1000_device *ft1000dev, u16 expected_value) { u16 handshake; int loopcnt; u32 status = 0; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); loopcnt = 0; while (loopcnt < 100) { /* Need to clear downloader doorbell if Hartley ASIC */ status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX, FT1000_REG_DOORBELL); if (pft1000info->fcodeldr) { DEBUG(" get_handshake: fcodeldr is %d\n", pft1000info->fcodeldr); pft1000info->fcodeldr = 0; status = check_usb_db(ft1000dev); if (status != STATUS_SUCCESS) { DEBUG("get_handshake: check_usb_db failed\n"); status = STATUS_FAILURE; break; } status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_RX, FT1000_REG_DOORBELL); } status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1); handshake = ntohs(handshake); if (status) return HANDSHAKE_TIMEOUT_VALUE; if ((handshake == expected_value) || (handshake == HANDSHAKE_RESET_VALUE_USB)) { return handshake; } else { loopcnt++; msleep(10); } } return HANDSHAKE_TIMEOUT_VALUE; } //--------------------------------------------------------------------------- // Function: put_handshake // // Parameters: struct ft1000_device - device structure // u16 handshake_value - handshake to be written // // Returns: none // // Description: This function write the handshake value to the handshake location // in DPRAM // // Notes: // //--------------------------------------------------------------------------- static void put_handshake(struct ft1000_device *ft1000dev,u16 handshake_value) { u32 tempx; u16 tempword; u32 status; tempx = (u32)handshake_value; tempx = ntohl(tempx); tempword = (u16)(tempx & 0xffff); status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 0); tempword = (u16)(tempx >> 16); status = ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, tempword, 1); status = ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL); } static u16 get_handshake_usb(struct ft1000_device *ft1000dev, u16 expected_value) { u16 handshake; int loopcnt; u16 temp; u32 status = 0; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); loopcnt = 0; handshake = 0; while (loopcnt < 100) { if (pft1000info->usbboot == 2) { status = ft1000_read_dpram32(ft1000dev, 0, (u8 *)&(pft1000info->tempbuf[0]), 64); for (temp = 0; temp < 16; temp++) { DEBUG("tempbuf %d = 0x%x\n", temp, pft1000info->tempbuf[temp]); } status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1); DEBUG("handshake from read_dpram16 = 0x%x\n", handshake); if (pft1000info->dspalive == pft1000info->tempbuf[6]) { handshake = 0; } else { handshake = pft1000info->tempbuf[1]; pft1000info->dspalive = pft1000info->tempbuf[6]; } } else { status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_HANDSHAKE_LOC, (u8 *)&handshake, 1); } loopcnt++; msleep(10); handshake = ntohs(handshake); if ((handshake == expected_value) || (handshake == HANDSHAKE_RESET_VALUE_USB)) return handshake; } return HANDSHAKE_TIMEOUT_VALUE; } static void put_handshake_usb(struct ft1000_device *ft1000dev,u16 handshake_value) { int i; for (i=0; i<1000; i++); } //--------------------------------------------------------------------------- // Function: get_request_type // // Parameters: struct ft1000_device - device structure // // Returns: request type - success // // Description: This function returns the request type // // Notes: // //--------------------------------------------------------------------------- static u16 get_request_type(struct ft1000_device *ft1000dev) { u16 request_type; u32 status; u16 tempword; u32 tempx; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); if (pft1000info->bootmode == 1) { status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx); tempx = ntohl(tempx); } else { tempx = 0; status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1); tempx |= (tempword << 16); tempx = ntohl(tempx); } request_type = (u16)tempx; return request_type; } static u16 get_request_type_usb(struct ft1000_device *ft1000dev) { u16 request_type; u32 status; u16 tempword; u32 tempx; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); if (pft1000info->bootmode == 1) { status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempx); tempx = ntohl(tempx); } else { if (pft1000info->usbboot == 2) { tempx = pft1000info->tempbuf[2]; tempword = pft1000info->tempbuf[3]; } else { tempx = 0; status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_TYPE_LOC, (u8 *)&tempword, 1); } tempx |= (tempword << 16); tempx = ntohl(tempx); } request_type = (u16)tempx; return request_type; } //--------------------------------------------------------------------------- // Function: get_request_value // // Parameters: struct ft1000_device - device structure // // Returns: request value - success // // Description: This function returns the request value // // Notes: // //--------------------------------------------------------------------------- static long get_request_value(struct ft1000_device *ft1000dev) { u32 value; u16 tempword; u32 status; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); if (pft1000info->bootmode == 1) { status = fix_ft1000_read_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&value); value = ntohl(value); } else { status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 0); value = tempword; status = ft1000_read_dpram16(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempword, 1); value |= (tempword << 16); value = ntohl(value); } return value; } //--------------------------------------------------------------------------- // Function: put_request_value // // Parameters: struct ft1000_device - device structure // long lvalue - value to be put into DPRAM location DWNLD_MAG1_SIZE_LOC // // Returns: none // // Description: This function writes a value to DWNLD_MAG1_SIZE_LOC // // Notes: // //--------------------------------------------------------------------------- static void put_request_value(struct ft1000_device *ft1000dev, long lvalue) { u32 tempx; u32 status; tempx = ntohl(lvalue); status = fix_ft1000_write_dpram32(ft1000dev, DWNLD_MAG1_SIZE_LOC, (u8 *)&tempx); } //--------------------------------------------------------------------------- // Function: hdr_checksum // // Parameters: struct pseudo_hdr *pHdr - Pseudo header pointer // // Returns: checksum - success // // Description: This function returns the checksum of the pseudo header // // Notes: // //--------------------------------------------------------------------------- static u16 hdr_checksum(struct pseudo_hdr *pHdr) { u16 *usPtr = (u16 *)pHdr; u16 chksum; chksum = ((((((usPtr[0] ^ usPtr[1]) ^ usPtr[2]) ^ usPtr[3]) ^ usPtr[4]) ^ usPtr[5]) ^ usPtr[6]); return chksum; } static int check_buffers(u16 *buff_w, u16 *buff_r, int len, int offset) { int i; for (i = 0; i < len; i++) { if (buff_w[i] != buff_r[i + offset]) return -1; } return 0; } //--------------------------------------------------------------------------- // Function: write_blk // // Parameters: struct ft1000_device - device structure // u16 **pUsFile - DSP image file pointer in u16 // u8 **pUcFile - DSP image file pointer in u8 // long word_length - length of the buffer to be written // to DPRAM // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes a block of DSP image to DPRAM // // Notes: // //--------------------------------------------------------------------------- static u32 write_blk (struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length) { u32 Status = STATUS_SUCCESS; u16 dpram; int loopcnt, i, j; u16 tempword; u16 tempbuffer[64]; u16 resultbuffer[64]; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); //DEBUG("FT1000:download:start word_length = %d\n",(int)word_length); dpram = (u16)DWNLD_MAG1_PS_HDR_LOC; tempword = *(*pUsFile); (*pUsFile)++; Status = ft1000_write_dpram16(ft1000dev, dpram, tempword, 0); tempword = *(*pUsFile); (*pUsFile)++; Status = ft1000_write_dpram16(ft1000dev, dpram++, tempword, 1); *pUcFile = *pUcFile + 4; word_length--; tempword = (u16)word_length; word_length = (word_length / 16) + 1; for (; word_length > 0; word_length--) /* In words */ { loopcnt = 0; for (i=0; i<32; i++) { if (tempword != 0) { tempbuffer[i++] = *(*pUsFile); (*pUsFile)++; tempbuffer[i] = *(*pUsFile); (*pUsFile)++; *pUcFile = *pUcFile + 4; loopcnt++; tempword--; } else { tempbuffer[i++] = 0; tempbuffer[i] = 0; } } //DEBUG("write_blk: loopcnt is %d\n", loopcnt); //DEBUG("write_blk: bootmode = %d\n", bootmode); //DEBUG("write_blk: dpram = %x\n", dpram); if (pft1000info->bootmode == 0) { if (dpram >= 0x3F4) Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 8); else Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64); } else { for (j=0; j<10; j++) { Status = ft1000_write_dpram32 (ft1000dev, dpram, (u8 *)&tempbuffer[0], 64); if (Status == STATUS_SUCCESS) { // Work around for ASIC bit stuffing problem. if ( (tempbuffer[31] & 0xfe00) == 0xfe00) { Status = ft1000_write_dpram32(ft1000dev, dpram+12, (u8 *)&tempbuffer[24], 64); } // Let's check the data written Status = ft1000_read_dpram32 (ft1000dev, dpram, (u8 *)&resultbuffer[0], 64); if ( (tempbuffer[31] & 0xfe00) == 0xfe00) { if (check_buffers(tempbuffer, resultbuffer, 28, 0)) { DEBUG("FT1000:download:DPRAM write failed 1 during bootloading\n"); msleep(10); Status = STATUS_FAILURE; break; } Status = ft1000_read_dpram32 (ft1000dev, dpram+12, (u8 *)&resultbuffer[0], 64); if (check_buffers(tempbuffer, resultbuffer, 16, 24)) { DEBUG("FT1000:download:DPRAM write failed 2 during bootloading\n"); msleep(10); Status = STATUS_FAILURE; break; } } else { if (check_buffers(tempbuffer, resultbuffer, 32, 0)) { DEBUG("FT1000:download:DPRAM write failed 3 during bootloading\n"); msleep(10); Status = STATUS_FAILURE; break; } } if (Status == STATUS_SUCCESS) break; } } if (Status != STATUS_SUCCESS) { DEBUG("FT1000:download:Write failed tempbuffer[31] = 0x%x\n", tempbuffer[31]); break; } } dpram = dpram + loopcnt; } return Status; } static void usb_dnld_complete (struct urb *urb) { //DEBUG("****** usb_dnld_complete\n"); } //--------------------------------------------------------------------------- // Function: write_blk_fifo // // Parameters: struct ft1000_device - device structure // u16 **pUsFile - DSP image file pointer in u16 // u8 **pUcFile - DSP image file pointer in u8 // long word_length - length of the buffer to be written // to DPRAM // // Returns: STATUS_SUCCESS - success // STATUS_FAILURE - failure // // Description: This function writes a block of DSP image to DPRAM // // Notes: // //--------------------------------------------------------------------------- static u32 write_blk_fifo(struct ft1000_device *ft1000dev, u16 **pUsFile, u8 **pUcFile, long word_length) { u32 Status = STATUS_SUCCESS; int byte_length; byte_length = word_length * 4; if (byte_length && ((byte_length % 64) == 0)) byte_length += 4; if (byte_length < 64) byte_length = 68; usb_init_urb(ft1000dev->tx_urb); memcpy(ft1000dev->tx_buf, *pUcFile, byte_length); usb_fill_bulk_urb(ft1000dev->tx_urb, ft1000dev->dev, usb_sndbulkpipe(ft1000dev->dev, ft1000dev->bulk_out_endpointAddr), ft1000dev->tx_buf, byte_length, usb_dnld_complete, (void *)ft1000dev); usb_submit_urb(ft1000dev->tx_urb, GFP_ATOMIC); *pUsFile = *pUsFile + (word_length << 1); *pUcFile = *pUcFile + (word_length << 2); return Status; } //--------------------------------------------------------------------------- // // Function: scram_dnldr // // Synopsis: Scramble downloader for Harley based ASIC via USB interface // // Arguments: pFileStart - pointer to start of file // FileLength - file length // // Returns: status - return code //--------------------------------------------------------------------------- u16 scram_dnldr(struct ft1000_device *ft1000dev, void *pFileStart, u32 FileLength) { u16 status = STATUS_SUCCESS; u32 state; u16 handshake; struct pseudo_hdr *pseudo_header; u16 pseudo_header_len; long word_length; u16 request; u16 temp; u16 tempword; struct dsp_file_hdr *file_hdr; struct dsp_image_info *dsp_img_info = NULL; long requested_version; bool correct_version; struct drv_msg *mailbox_data; u16 *data = NULL; u16 *s_file = NULL; u8 *c_file = NULL; u8 *boot_end = NULL, *code_end = NULL; int image; long loader_code_address, loader_code_size = 0; long run_address = 0, run_size = 0; u32 templong; u32 image_chksum = 0; u16 dpram = 0; u8 *pbuffer; struct prov_record *pprov_record; struct ft1000_info *pft1000info = netdev_priv(ft1000dev->net); DEBUG("Entered scram_dnldr...\n"); pft1000info->fcodeldr = 0; pft1000info->usbboot = 0; pft1000info->dspalive = 0xffff; // // Get version id of file, at first 4 bytes of file, for newer files. // state = STATE_START_DWNLD; file_hdr = (struct dsp_file_hdr *)pFileStart; ft1000_write_register(ft1000dev, 0x800, FT1000_REG_MAG_WATERMARK); s_file = (u16 *) (pFileStart + file_hdr->loader_offset); c_file = (u8 *) (pFileStart + file_hdr->loader_offset); boot_end = (u8 *) (pFileStart + file_hdr->loader_code_end); loader_code_address = file_hdr->loader_code_address; loader_code_size = file_hdr->loader_code_size; correct_version = FALSE; while ((status == STATUS_SUCCESS) && (state != STATE_DONE_FILE)) { switch (state) { case STATE_START_DWNLD: DEBUG("FT1000:STATE_START_DWNLD\n"); if (pft1000info->usbboot) handshake = get_handshake_usb(ft1000dev, HANDSHAKE_DSP_BL_READY); else handshake = get_handshake(ft1000dev, HANDSHAKE_DSP_BL_READY); if (handshake == HANDSHAKE_DSP_BL_READY) { DEBUG ("scram_dnldr: handshake is HANDSHAKE_DSP_BL_READY, call put_handshake(HANDSHAKE_DRIVER_READY)\n"); put_handshake(ft1000dev, HANDSHAKE_DRIVER_READY); } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); status = STATUS_FAILURE; } state = STATE_BOOT_DWNLD; break; case STATE_BOOT_DWNLD: DEBUG("FT1000:STATE_BOOT_DWNLD\n"); pft1000info->bootmode = 1; handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST); if (handshake == HANDSHAKE_REQUEST) { /* * Get type associated with the request. */ request = get_request_type(ft1000dev); switch (request) { case REQUEST_RUN_ADDRESS: DEBUG("FT1000:REQUEST_RUN_ADDRESS\n"); put_request_value(ft1000dev, loader_code_address); break; case REQUEST_CODE_LENGTH: DEBUG("FT1000:REQUEST_CODE_LENGTH\n"); put_request_value(ft1000dev, loader_code_size); break; case REQUEST_DONE_BL: DEBUG("FT1000:REQUEST_DONE_BL\n"); /* Reposition ptrs to beginning of code section */ s_file = (u16 *) (boot_end); c_file = (u8 *) (boot_end); //DEBUG("FT1000:download:s_file = 0x%8x\n", (int)s_file); //DEBUG("FT1000:download:c_file = 0x%8x\n", (int)c_file); state = STATE_CODE_DWNLD; pft1000info->fcodeldr = 1; break; case REQUEST_CODE_SEGMENT: //DEBUG("FT1000:REQUEST_CODE_SEGMENT\n"); word_length = get_request_value(ft1000dev); //DEBUG("FT1000:word_length = 0x%x\n", (int)word_length); //NdisMSleep (100); if (word_length > MAX_LENGTH) { DEBUG ("FT1000:download:Download error: Max length exceeded\n"); status = STATUS_FAILURE; break; } if ((word_length * 2 + c_file) > boot_end) { /* * Error, beyond boot code range. */ DEBUG ("FT1000:download:Download error: Requested len=%d exceeds BOOT code boundary.\n", (int)word_length); status = STATUS_FAILURE; break; } /* * Position ASIC DPRAM auto-increment pointer. */ dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = word_length / 2; status = write_blk(ft1000dev, &s_file, &c_file, word_length); //DEBUG("write_blk returned %d\n", status); break; default: DEBUG ("FT1000:download:Download error: Bad request type=%d in BOOT download state.\n", request); status = STATUS_FAILURE; break; } if (pft1000info->usbboot) put_handshake_usb(ft1000dev, HANDSHAKE_RESPONSE); else put_handshake(ft1000dev, HANDSHAKE_RESPONSE); } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); status = STATUS_FAILURE; } break; case STATE_CODE_DWNLD: //DEBUG("FT1000:STATE_CODE_DWNLD\n"); pft1000info->bootmode = 0; if (pft1000info->usbboot) handshake = get_handshake_usb(ft1000dev, HANDSHAKE_REQUEST); else handshake = get_handshake(ft1000dev, HANDSHAKE_REQUEST); if (handshake == HANDSHAKE_REQUEST) { /* * Get type associated with the request. */ if (pft1000info->usbboot) request = get_request_type_usb(ft1000dev); else request = get_request_type(ft1000dev); switch (request) { case REQUEST_FILE_CHECKSUM: DEBUG ("FT1000:download:image_chksum = 0x%8x\n", image_chksum); put_request_value(ft1000dev, image_chksum); break; case REQUEST_RUN_ADDRESS: DEBUG ("FT1000:download: REQUEST_RUN_ADDRESS\n"); if (correct_version) { DEBUG ("FT1000:download:run_address = 0x%8x\n", (int)run_address); put_request_value(ft1000dev, run_address); } else { DEBUG ("FT1000:download:Download error: Got Run address request before image offset request.\n"); status = STATUS_FAILURE; break; } break; case REQUEST_CODE_LENGTH: DEBUG ("FT1000:download:REQUEST_CODE_LENGTH\n"); if (correct_version) { DEBUG ("FT1000:download:run_size = 0x%8x\n", (int)run_size); put_request_value(ft1000dev, run_size); } else { DEBUG ("FT1000:download:Download error: Got Size request before image offset request.\n"); status = STATUS_FAILURE; break; } break; case REQUEST_DONE_CL: pft1000info->usbboot = 3; /* Reposition ptrs to beginning of provisioning section */ s_file = (u16 *) (pFileStart + file_hdr->commands_offset); c_file = (u8 *) (pFileStart + file_hdr->commands_offset); state = STATE_DONE_DWNLD; break; case REQUEST_CODE_SEGMENT: //DEBUG("FT1000:download: REQUEST_CODE_SEGMENT - CODELOADER\n"); if (!correct_version) { DEBUG ("FT1000:download:Download error: Got Code Segment request before image offset request.\n"); status = STATUS_FAILURE; break; } word_length = get_request_value(ft1000dev); //DEBUG("FT1000:download:word_length = %d\n", (int)word_length); if (word_length > MAX_LENGTH) { DEBUG ("FT1000:download:Download error: Max length exceeded\n"); status = STATUS_FAILURE; break; } if ((word_length * 2 + c_file) > code_end) { /* * Error, beyond boot code range. */ DEBUG ("FT1000:download:Download error: Requested len=%d exceeds DSP code boundary.\n", (int)word_length); status = STATUS_FAILURE; break; } /* * Position ASIC DPRAM auto-increment pointer. */ dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = word_length / 2; write_blk_fifo(ft1000dev, &s_file, &c_file, word_length); if (pft1000info->usbboot == 0) pft1000info->usbboot++; if (pft1000info->usbboot == 1) { tempword = 0; ft1000_write_dpram16(ft1000dev, DWNLD_MAG1_PS_HDR_LOC, tempword, 0); } break; case REQUEST_MAILBOX_DATA: DEBUG ("FT1000:download: REQUEST_MAILBOX_DATA\n"); // Convert length from byte count to word count. Make sure we round up. word_length = (long)(pft1000info->DSPInfoBlklen + 1) / 2; put_request_value(ft1000dev, word_length); mailbox_data = (struct drv_msg *)&(pft1000info-> DSPInfoBlk[0]); /* * Position ASIC DPRAM auto-increment pointer. */ data = (u16 *) & mailbox_data->data[0]; dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = (word_length / 2); for (; word_length > 0; word_length--) { /* In words */ templong = *data++; templong |= (*data++ << 16); status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *) & templong); } break; case REQUEST_VERSION_INFO: DEBUG ("FT1000:download:REQUEST_VERSION_INFO\n"); word_length = file_hdr->version_data_size; put_request_value(ft1000dev, word_length); /* * Position ASIC DPRAM auto-increment pointer. */ s_file = (u16 *) (pFileStart + file_hdr-> version_data_offset); dpram = (u16) DWNLD_MAG1_PS_HDR_LOC; if (word_length & 0x1) word_length++; word_length = (word_length / 2); for (; word_length > 0; word_length--) { /* In words */ templong = ntohs(*s_file++); temp = ntohs(*s_file++); templong |= (temp << 16); status = fix_ft1000_write_dpram32 (ft1000dev, dpram++, (u8 *) & templong); } break; case REQUEST_CODE_BY_VERSION: DEBUG ("FT1000:download:REQUEST_CODE_BY_VERSION\n"); correct_version = FALSE; requested_version = get_request_value(ft1000dev); dsp_img_info = (struct dsp_image_info *)(pFileStart + sizeof (struct dsp_file_hdr)); for (image = 0; image < file_hdr->nDspImages; image++) { if (dsp_img_info->version == requested_version) { correct_version = TRUE; DEBUG ("FT1000:download: correct_version is TRUE\n"); s_file = (u16 *) (pFileStart + dsp_img_info-> begin_offset); c_file = (u8 *) (pFileStart + dsp_img_info-> begin_offset); code_end = (u8 *) (pFileStart + dsp_img_info-> end_offset); run_address = dsp_img_info-> run_address; run_size = dsp_img_info-> image_size; image_chksum = (u32) dsp_img_info-> checksum; break; } dsp_img_info++; } //end of for if (!correct_version) { /* * Error, beyond boot code range. */ DEBUG ("FT1000:download:Download error: Bad Version Request = 0x%x.\n", (int)requested_version); status = STATUS_FAILURE; break; } break; default: DEBUG ("FT1000:download:Download error: Bad request type=%d in CODE download state.\n", request); status = STATUS_FAILURE; break; } if (pft1000info->usbboot) put_handshake_usb(ft1000dev, HANDSHAKE_RESPONSE); else put_handshake(ft1000dev, HANDSHAKE_RESPONSE); } else { DEBUG ("FT1000:download:Download error: Handshake failed\n"); status = STATUS_FAILURE; } break; case STATE_DONE_DWNLD: DEBUG("FT1000:download:Code loader is done...\n"); state = STATE_SECTION_PROV; break; case STATE_SECTION_PROV: DEBUG("FT1000:download:STATE_SECTION_PROV\n"); pseudo_header = (struct pseudo_hdr *)c_file; if (pseudo_header->checksum == hdr_checksum(pseudo_header)) { if (pseudo_header->portdest != 0x80 /* Dsp OAM */ ) { state = STATE_DONE_PROV; break; } pseudo_header_len = ntohs(pseudo_header->length); /* Byte length for PROV records */ // Get buffer for provisioning data pbuffer = kmalloc((pseudo_header_len + sizeof(struct pseudo_hdr)), GFP_ATOMIC); if (pbuffer) { memcpy(pbuffer, (void *)c_file, (u32) (pseudo_header_len + sizeof(struct pseudo_hdr))); // link provisioning data pprov_record = kmalloc(sizeof(struct prov_record), GFP_ATOMIC); if (pprov_record) { pprov_record->pprov_data = pbuffer; list_add_tail(&pprov_record-> list, &pft1000info-> prov_list); // Move to next entry if available c_file = (u8 *) ((unsigned long) c_file + (u32) ((pseudo_header_len + 1) & 0xFFFFFFFE) + sizeof(struct pseudo_hdr)); if ((unsigned long)(c_file) - (unsigned long)(pFileStart) >= (unsigned long)FileLength) { state = STATE_DONE_FILE; } } else { kfree(pbuffer); status = STATUS_FAILURE; } } else { status = STATUS_FAILURE; } } else { /* Checksum did not compute */ status = STATUS_FAILURE; } DEBUG ("ft1000:download: after STATE_SECTION_PROV, state = %d, status= %d\n", state, status); break; case STATE_DONE_PROV: DEBUG("FT1000:download:STATE_DONE_PROV\n"); state = STATE_DONE_FILE; break; default: status = STATUS_FAILURE; break; } /* End Switch */ if (status != STATUS_SUCCESS) { break; } /**** // Check if Card is present status = Harley_Read_Register(&temp, FT1000_REG_SUP_IMASK); if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0x0000) ) { break; } status = Harley_Read_Register(&temp, FT1000_REG_ASIC_ID); if ( (status != NDIS_STATUS_SUCCESS) || (temp == 0xffff) ) { break; } ****/ } /* End while */ DEBUG("Download exiting with status = 0x%8x\n", status); ft1000_write_register(ft1000dev, FT1000_DB_DNLD_TX, FT1000_REG_DOORBELL); return status; }
gpl-2.0
GenetICS/lge_kernel_msm7x27
drivers/infiniband/hw/ipath/ipath_iba6110.c
9000
63667
/* * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ /* * This file contains all of the code that is specific to the InfiniPath * HT chip. */ #include <linux/vmalloc.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/htirq.h> #include <rdma/ib_verbs.h> #include "ipath_kernel.h" #include "ipath_registers.h" static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64); /* * This lists the InfiniPath registers, in the actual chip layout. * This structure should never be directly accessed. * * The names are in InterCap form because they're taken straight from * the chip specification. Since they're only used in this file, they * don't pollute the rest of the source. */ struct _infinipath_do_not_use_kernel_regs { unsigned long long Revision; unsigned long long Control; unsigned long long PageAlign; unsigned long long PortCnt; unsigned long long DebugPortSelect; unsigned long long DebugPort; unsigned long long SendRegBase; unsigned long long UserRegBase; unsigned long long CounterRegBase; unsigned long long Scratch; unsigned long long ReservedMisc1; unsigned long long InterruptConfig; unsigned long long IntBlocked; unsigned long long IntMask; unsigned long long IntStatus; unsigned long long IntClear; unsigned long long ErrorMask; unsigned long long ErrorStatus; unsigned long long ErrorClear; unsigned long long HwErrMask; unsigned long long HwErrStatus; unsigned long long HwErrClear; unsigned long long HwDiagCtrl; unsigned long long MDIO; unsigned long long IBCStatus; unsigned long long IBCCtrl; unsigned long long ExtStatus; unsigned long long ExtCtrl; unsigned long long GPIOOut; unsigned long long GPIOMask; unsigned long long GPIOStatus; unsigned long long GPIOClear; unsigned long long RcvCtrl; unsigned long long RcvBTHQP; unsigned long long RcvHdrSize; unsigned long long RcvHdrCnt; unsigned long long RcvHdrEntSize; unsigned long long RcvTIDBase; unsigned long long RcvTIDCnt; unsigned long long RcvEgrBase; unsigned long long RcvEgrCnt; unsigned long long RcvBufBase; unsigned long long RcvBufSize; unsigned long long RxIntMemBase; unsigned long long RxIntMemSize; unsigned long long RcvPartitionKey; unsigned long long ReservedRcv[10]; unsigned long long SendCtrl; unsigned long long SendPIOBufBase; unsigned long long SendPIOSize; unsigned long long SendPIOBufCnt; unsigned long long SendPIOAvailAddr; unsigned long long TxIntMemBase; unsigned long long TxIntMemSize; unsigned long long ReservedSend[9]; unsigned long long SendBufferError; unsigned long long SendBufferErrorCONT1; unsigned long long SendBufferErrorCONT2; unsigned long long SendBufferErrorCONT3; unsigned long long ReservedSBE[4]; unsigned long long RcvHdrAddr0; unsigned long long RcvHdrAddr1; unsigned long long RcvHdrAddr2; unsigned long long RcvHdrAddr3; unsigned long long RcvHdrAddr4; unsigned long long RcvHdrAddr5; unsigned long long RcvHdrAddr6; unsigned long long RcvHdrAddr7; unsigned long long RcvHdrAddr8; unsigned long long ReservedRHA[7]; unsigned long long RcvHdrTailAddr0; unsigned long long RcvHdrTailAddr1; unsigned long long RcvHdrTailAddr2; unsigned long long RcvHdrTailAddr3; unsigned long long RcvHdrTailAddr4; unsigned long long RcvHdrTailAddr5; unsigned long long RcvHdrTailAddr6; unsigned long long RcvHdrTailAddr7; unsigned long long RcvHdrTailAddr8; unsigned long long ReservedRHTA[7]; unsigned long long Sync; /* Software only */ unsigned long long Dump; /* Software only */ unsigned long long SimVer; /* Software only */ unsigned long long ReservedSW[5]; unsigned long long SerdesConfig0; unsigned long long SerdesConfig1; unsigned long long SerdesStatus; unsigned long long XGXSConfig; unsigned long long ReservedSW2[4]; }; struct _infinipath_do_not_use_counters { __u64 LBIntCnt; __u64 LBFlowStallCnt; __u64 Reserved1; __u64 TxUnsupVLErrCnt; __u64 TxDataPktCnt; __u64 TxFlowPktCnt; __u64 TxDwordCnt; __u64 TxLenErrCnt; __u64 TxMaxMinLenErrCnt; __u64 TxUnderrunCnt; __u64 TxFlowStallCnt; __u64 TxDroppedPktCnt; __u64 RxDroppedPktCnt; __u64 RxDataPktCnt; __u64 RxFlowPktCnt; __u64 RxDwordCnt; __u64 RxLenErrCnt; __u64 RxMaxMinLenErrCnt; __u64 RxICRCErrCnt; __u64 RxVCRCErrCnt; __u64 RxFlowCtrlErrCnt; __u64 RxBadFormatCnt; __u64 RxLinkProblemCnt; __u64 RxEBPCnt; __u64 RxLPCRCErrCnt; __u64 RxBufOvflCnt; __u64 RxTIDFullErrCnt; __u64 RxTIDValidErrCnt; __u64 RxPKeyMismatchCnt; __u64 RxP0HdrEgrOvflCnt; __u64 RxP1HdrEgrOvflCnt; __u64 RxP2HdrEgrOvflCnt; __u64 RxP3HdrEgrOvflCnt; __u64 RxP4HdrEgrOvflCnt; __u64 RxP5HdrEgrOvflCnt; __u64 RxP6HdrEgrOvflCnt; __u64 RxP7HdrEgrOvflCnt; __u64 RxP8HdrEgrOvflCnt; __u64 Reserved6; __u64 Reserved7; __u64 IBStatusChangeCnt; __u64 IBLinkErrRecoveryCnt; __u64 IBLinkDownedCnt; __u64 IBSymbolErrCnt; }; #define IPATH_KREG_OFFSET(field) (offsetof( \ struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64)) #define IPATH_CREG_OFFSET(field) (offsetof( \ struct _infinipath_do_not_use_counters, field) / sizeof(u64)) static const struct ipath_kregs ipath_ht_kregs = { .kr_control = IPATH_KREG_OFFSET(Control), .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase), .kr_debugport = IPATH_KREG_OFFSET(DebugPort), .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect), .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear), .kr_errormask = IPATH_KREG_OFFSET(ErrorMask), .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus), .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl), .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus), .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear), .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask), .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut), .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus), .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl), .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear), .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask), .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus), .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl), .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus), .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked), .kr_intclear = IPATH_KREG_OFFSET(IntClear), .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig), .kr_intmask = IPATH_KREG_OFFSET(IntMask), .kr_intstatus = IPATH_KREG_OFFSET(IntStatus), .kr_mdio = IPATH_KREG_OFFSET(MDIO), .kr_pagealign = IPATH_KREG_OFFSET(PageAlign), .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey), .kr_portcnt = IPATH_KREG_OFFSET(PortCnt), .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP), .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase), .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize), .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl), .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase), .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt), .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt), .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize), .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize), .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase), .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize), .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase), .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt), .kr_revision = IPATH_KREG_OFFSET(Revision), .kr_scratch = IPATH_KREG_OFFSET(Scratch), .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError), .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl), .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr), .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase), .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt), .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize), .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase), .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase), .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize), .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase), .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0), .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1), .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus), .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig), /* * These should not be used directly via ipath_write_kreg64(), * use them with ipath_write_kreg64_port(), */ .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0), .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0) }; static const struct ipath_cregs ipath_ht_cregs = { .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt), .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt), .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt), .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt), .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt), .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt), .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt), .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt), .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt), .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt), .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt), .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt), /* calc from Reg_CounterRegBase + offset */ .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt), .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt), .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt), .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt), .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt), .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt), .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt), .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt), .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt), .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt), .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt), .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt), .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt), .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt), .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt), .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt), .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt), .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt), .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt), .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt), .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt) }; /* kr_intstatus, kr_intclear, kr_intmask bits */ #define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1) #define INFINIPATH_I_RCVURG_SHIFT 0 #define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1) #define INFINIPATH_I_RCVAVAIL_SHIFT 12 /* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */ #define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0 #define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL #define INFINIPATH_HWE_HTCLNKABYTE0CRCERR 0x0000000000800000ULL #define INFINIPATH_HWE_HTCLNKABYTE1CRCERR 0x0000000001000000ULL #define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR 0x0000000002000000ULL #define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR 0x0000000004000000ULL #define INFINIPATH_HWE_HTCMISCERR4 0x0000000008000000ULL #define INFINIPATH_HWE_HTCMISCERR5 0x0000000010000000ULL #define INFINIPATH_HWE_HTCMISCERR6 0x0000000020000000ULL #define INFINIPATH_HWE_HTCMISCERR7 0x0000000040000000ULL #define INFINIPATH_HWE_HTCBUSTREQPARITYERR 0x0000000080000000ULL #define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL #define INFINIPATH_HWE_HTCBUSIREQPARITYERR 0x0000000200000000ULL #define INFINIPATH_HWE_COREPLL_FBSLIP 0x0080000000000000ULL #define INFINIPATH_HWE_COREPLL_RFSLIP 0x0100000000000000ULL #define INFINIPATH_HWE_HTBPLL_FBSLIP 0x0200000000000000ULL #define INFINIPATH_HWE_HTBPLL_RFSLIP 0x0400000000000000ULL #define INFINIPATH_HWE_HTAPLL_FBSLIP 0x0800000000000000ULL #define INFINIPATH_HWE_HTAPLL_RFSLIP 0x1000000000000000ULL #define INFINIPATH_HWE_SERDESPLLFAILED 0x2000000000000000ULL #define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf #define IBA6110_IBCS_LINKSTATE_SHIFT 4 /* kr_extstatus bits */ #define INFINIPATH_EXTS_FREQSEL 0x2 #define INFINIPATH_EXTS_SERDESSEL 0x4 #define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000 #define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000 /* TID entries (memory), HT-only */ #define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */ #define INFINIPATH_RT_VALID 0x8000000000000000ULL #define INFINIPATH_RT_ADDR_SHIFT 0 #define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL #define INFINIPATH_RT_BUFSIZE_SHIFT 48 #define INFINIPATH_R_INTRAVAIL_SHIFT 16 #define INFINIPATH_R_TAILUPD_SHIFT 31 /* kr_xgxsconfig bits */ #define INFINIPATH_XGXS_RESET 0x7ULL /* * masks and bits that are different in different chips, or present only * in one */ static const ipath_err_t infinipath_hwe_htcmemparityerr_mask = INFINIPATH_HWE_HTCMEMPARITYERR_MASK; static const ipath_err_t infinipath_hwe_htcmemparityerr_shift = INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT; static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr = INFINIPATH_HWE_HTCLNKABYTE0CRCERR; static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr = INFINIPATH_HWE_HTCLNKABYTE1CRCERR; static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr = INFINIPATH_HWE_HTCLNKBBYTE0CRCERR; static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr = INFINIPATH_HWE_HTCLNKBBYTE1CRCERR; #define _IPATH_GPIO_SDA_NUM 1 #define _IPATH_GPIO_SCL_NUM 0 #define IPATH_GPIO_SDA \ (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) #define IPATH_GPIO_SCL \ (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT)) /* keep the code below somewhat more readable; not used elsewhere */ #define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \ infinipath_hwe_htclnkabyte1crcerr) #define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr | \ infinipath_hwe_htclnkbbyte1crcerr) #define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr | \ infinipath_hwe_htclnkbbyte0crcerr) #define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr | \ infinipath_hwe_htclnkbbyte1crcerr) static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs, char *msg, size_t msgl) { char bitsmsg[64]; ipath_err_t crcbits = hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS); /* don't check if 8bit HT */ if (dd->ipath_flags & IPATH_8BIT_IN_HT0) crcbits &= ~infinipath_hwe_htclnkabyte1crcerr; /* don't check if 8bit HT */ if (dd->ipath_flags & IPATH_8BIT_IN_HT1) crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr; /* * we'll want to ignore link errors on link that is * not in use, if any. For now, complain about both */ if (crcbits) { u16 ctrl0, ctrl1; snprintf(bitsmsg, sizeof bitsmsg, "[HT%s lane %s CRC (%llx); powercycle to completely clear]", !(crcbits & _IPATH_HTLINK1_CRCBITS) ? "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS) ? "1 (B)" : "0+1 (A+B)"), !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0" : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" : "0+1"), (unsigned long long) crcbits); strlcat(msg, bitsmsg, msgl); /* * print extra info for debugging. slave/primary * config word 4, 8 (link control 0, 1) */ if (pci_read_config_word(dd->pcidev, dd->ipath_ht_slave_off + 0x4, &ctrl0)) dev_info(&dd->pcidev->dev, "Couldn't read " "linkctrl0 of slave/primary " "config block\n"); else if (!(ctrl0 & 1 << 6)) /* not if EOC bit set */ ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0, ((ctrl0 >> 8) & 7) ? " CRC" : "", ((ctrl0 >> 4) & 1) ? "linkfail" : ""); if (pci_read_config_word(dd->pcidev, dd->ipath_ht_slave_off + 0x8, &ctrl1)) dev_info(&dd->pcidev->dev, "Couldn't read " "linkctrl1 of slave/primary " "config block\n"); else if (!(ctrl1 & 1 << 6)) /* not if EOC bit set */ ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1, ((ctrl1 >> 8) & 7) ? " CRC" : "", ((ctrl1 >> 4) & 1) ? "linkfail" : ""); /* disable until driver reloaded */ dd->ipath_hwerrmask &= ~crcbits; ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); ipath_dbg("HT crc errs: %s\n", msg); } else ipath_dbg("ignoring HT crc errors 0x%llx, " "not in use\n", (unsigned long long) (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))); } /* 6110 specific hardware errors... */ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = { INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"), INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"), INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"), INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"), INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"), INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"), INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"), INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"), }; #define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \ INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \ << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) #define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \ << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) static void ipath_ht_txe_recover(struct ipath_devdata *dd) { ++ipath_stats.sps_txeparity; dev_info(&dd->pcidev->dev, "Recovering from TXE PIO parity error\n"); } /** * ipath_ht_handle_hwerrors - display hardware errors. * @dd: the infinipath device * @msg: the output buffer * @msgl: the size of the output buffer * * Use same msg buffer as regular errors to avoid excessive stack * use. Most hardware errors are catastrophic, but for right now, * we'll print them and continue. We reuse the same message buffer as * ipath_handle_errors() to avoid excessive stack usage. */ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg, size_t msgl) { ipath_err_t hwerrs; u32 bits, ctrl; int isfatal = 0; char bitsmsg[64]; int log_idx; hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus); if (!hwerrs) { ipath_cdbg(VERBOSE, "Called but no hardware errors set\n"); /* * better than printing cofusing messages * This seems to be related to clearing the crc error, or * the pll error during init. */ goto bail; } else if (hwerrs == -1LL) { ipath_dev_err(dd, "Read of hardware error status failed " "(all bits set); ignoring\n"); goto bail; } ipath_stats.sps_hwerrs++; /* Always clear the error status register, except MEMBISTFAIL, * regardless of whether we continue or stop using the chip. * We want that set so we know it failed, even across driver reload. * We'll still ignore it in the hwerrmask. We do this partly for * diagnostics, but also for support */ ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, hwerrs&~INFINIPATH_HWE_MEMBISTFAILED); hwerrs &= dd->ipath_hwerrmask; /* We log some errors to EEPROM, check if we have any of those. */ for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log) ipath_inc_eeprom_err(dd, log_idx, 1); /* * make sure we get this much out, unless told to be quiet, * it's a parity error we may recover from, * or it's occurred within the last 5 seconds */ if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY | RXE_EAGER_PARITY)) || (ipath_debug & __IPATH_VERBDBG)) dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx " "(cleared)\n", (unsigned long long) hwerrs); dd->ipath_lasthwerror |= hwerrs; if (hwerrs & ~dd->ipath_hwe_bitsextant) ipath_dev_err(dd, "hwerror interrupt with unknown errors " "%llx set\n", (unsigned long long) (hwerrs & ~dd->ipath_hwe_bitsextant)); ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control); if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) { /* * parity errors in send memory are recoverable, * just cancel the send (if indicated in * sendbuffererror), * count the occurrence, unfreeze (if no other handled * hardware error bits are set), and continue. They can * occur if a processor speculative read is done to the PIO * buffer while we are sending a packet, for example. */ if (hwerrs & TXE_PIO_PARITY) { ipath_ht_txe_recover(dd); hwerrs &= ~TXE_PIO_PARITY; } if (!hwerrs) { ipath_dbg("Clearing freezemode on ignored or " "recovered hardware error\n"); ipath_clear_freeze(dd); } } *msg = '\0'; /* * may someday want to decode into which bits are which * functional area for parity errors, etc. */ if (hwerrs & (infinipath_hwe_htcmemparityerr_mask << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) { bits = (u32) ((hwerrs >> INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) & INFINIPATH_HWE_HTCMEMPARITYERR_MASK); snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } ipath_format_hwerrors(hwerrs, ipath_6110_hwerror_msgs, sizeof(ipath_6110_hwerror_msgs) / sizeof(ipath_6110_hwerror_msgs[0]), msg, msgl); if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS)) hwerr_crcbits(dd, hwerrs, msg, msgl); if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) { strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]", msgl); /* ignore from now on, so disable until driver reloaded */ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED; ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); } #define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP | \ INFINIPATH_HWE_COREPLL_RFSLIP | \ INFINIPATH_HWE_HTBPLL_FBSLIP | \ INFINIPATH_HWE_HTBPLL_RFSLIP | \ INFINIPATH_HWE_HTAPLL_FBSLIP | \ INFINIPATH_HWE_HTAPLL_RFSLIP) if (hwerrs & _IPATH_PLL_FAIL) { snprintf(bitsmsg, sizeof bitsmsg, "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) (hwerrs & _IPATH_PLL_FAIL)); strlcat(msg, bitsmsg, msgl); /* ignore from now on, so disable until driver reloaded */ dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); } if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) { /* * If it occurs, it is left masked since the eternal * interface is unused */ dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED; ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, dd->ipath_hwerrmask); } if (hwerrs) { /* * if any set that we aren't ignoring; only * make the complaint once, in case it's stuck * or recurring, and we get here multiple * times. * force link down, so switch knows, and * LEDs are turned off */ if (dd->ipath_flags & IPATH_INITTED) { ipath_set_linkstate(dd, IPATH_IB_LINKDOWN); ipath_setup_ht_setextled(dd, INFINIPATH_IBCS_L_STATE_DOWN, INFINIPATH_IBCS_LT_STATE_DISABLED); ipath_dev_err(dd, "Fatal Hardware Error (freeze " "mode), no longer usable, SN %.16s\n", dd->ipath_serial); isfatal = 1; } *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY; /* mark as having had error */ *dd->ipath_statusp |= IPATH_STATUS_HWERROR; /* * mark as not usable, at a minimum until driver * is reloaded, probably until reboot, since no * other reset is possible. */ dd->ipath_flags &= ~IPATH_INITTED; } else *msg = 0; /* recovered from all of them */ if (*msg) ipath_dev_err(dd, "%s hardware error\n", msg); if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) /* * for status file; if no trailing brace is copied, * we'll know it was truncated. */ snprintf(dd->ipath_freezemsg, dd->ipath_freezelen, "{%s}", msg); bail:; } /** * ipath_ht_boardname - fill in the board name * @dd: the infinipath device * @name: the output buffer * @namelen: the size of the output buffer * * fill in the board name, based on the board revision register */ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name, size_t namelen) { char *n = NULL; u8 boardrev = dd->ipath_boardrev; int ret = 0; switch (boardrev) { case 5: /* * original production board; two production levels, with * different serial number ranges. See ipath_ht_early_init() for * case where we enable IPATH_GPIO_INTR for later serial # range. * Original 112* serial number is no longer supported. */ n = "InfiniPath_QHT7040"; break; case 7: /* small form factor production board */ n = "InfiniPath_QHT7140"; break; default: /* don't know, just print the number */ ipath_dev_err(dd, "Don't yet know about board " "with ID %u\n", boardrev); snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u", boardrev); break; } if (n) snprintf(name, namelen, "%s", n); if (ret) { ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name); goto bail; } if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 4)) { /* * This version of the driver only supports Rev 3.2 - 3.4 */ ipath_dev_err(dd, "Unsupported InfiniPath hardware revision %u.%u!\n", dd->ipath_majrev, dd->ipath_minrev); ret = 1; goto bail; } /* * pkt/word counters are 32 bit, and therefore wrap fast enough * that we snapshot them from a timer, and maintain 64 bit shadow * copies */ dd->ipath_flags |= IPATH_32BITCOUNTERS; dd->ipath_flags |= IPATH_GPIO_INTR; if (dd->ipath_lbus_speed != 800) ipath_dev_err(dd, "Incorrectly configured for HT @ %uMHz\n", dd->ipath_lbus_speed); /* * set here, not in ipath_init_*_funcs because we have to do * it after we can read chip registers. */ dd->ipath_ureg_align = ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign); bail: return ret; } static void ipath_check_htlink(struct ipath_devdata *dd) { u8 linkerr, link_off, i; for (i = 0; i < 2; i++) { link_off = dd->ipath_ht_slave_off + i * 4 + 0xd; if (pci_read_config_byte(dd->pcidev, link_off, &linkerr)) dev_info(&dd->pcidev->dev, "Couldn't read " "linkerror%d of HT slave/primary block\n", i); else if (linkerr & 0xf0) { ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, " "clearing\n", linkerr >> 4, i); /* * writing the linkerr bits that are set should * clear them */ if (pci_write_config_byte(dd->pcidev, link_off, linkerr)) ipath_dbg("Failed write to clear HT " "linkerror%d\n", i); if (pci_read_config_byte(dd->pcidev, link_off, &linkerr)) dev_info(&dd->pcidev->dev, "Couldn't reread linkerror%d of " "HT slave/primary block\n", i); else if (linkerr & 0xf0) dev_info(&dd->pcidev->dev, "HT linkerror%d bits 0x%x " "couldn't be cleared\n", i, linkerr >> 4); } } } static int ipath_setup_ht_reset(struct ipath_devdata *dd) { ipath_dbg("No reset possible for this InfiniPath hardware\n"); return 0; } #define HT_INTR_DISC_CONFIG 0x80 /* HT interrupt and discovery cap */ #define HT_INTR_REG_INDEX 2 /* intconfig requires indirect accesses */ /* * Bits 13-15 of command==0 is slave/primary block. Clear any HT CRC * errors. We only bother to do this at load time, because it's OK if * it happened before we were loaded (first time after boot/reset), * but any time after that, it's fatal anyway. Also need to not check * for upper byte errors if we are in 8 bit mode, so figure out * our width. For now, at least, also complain if it's 8 bit. */ static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev, int pos, u8 cap_type) { u8 linkwidth = 0, linkerr, link_a_b_off, link_off; u16 linkctrl = 0; int i; dd->ipath_ht_slave_off = pos; /* command word, master_host bit */ /* master host || slave */ if ((cap_type >> 2) & 1) link_a_b_off = 4; else link_a_b_off = 0; ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n", link_a_b_off ? 1 : 0, link_a_b_off ? 'B' : 'A'); link_a_b_off += pos; /* * check both link control registers; clear both HT CRC sets if * necessary. */ for (i = 0; i < 2; i++) { link_off = pos + i * 4 + 0x4; if (pci_read_config_word(pdev, link_off, &linkctrl)) ipath_dev_err(dd, "Couldn't read HT link control%d " "register\n", i); else if (linkctrl & (0xf << 8)) { ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error " "bits %x\n", i, linkctrl & (0xf << 8)); /* * now write them back to clear the error. */ pci_write_config_word(pdev, link_off, linkctrl & (0xf << 8)); } } /* * As with HT CRC bits, same for protocol errors that might occur * during boot. */ for (i = 0; i < 2; i++) { link_off = pos + i * 4 + 0xd; if (pci_read_config_byte(pdev, link_off, &linkerr)) dev_info(&pdev->dev, "Couldn't read linkerror%d " "of HT slave/primary block\n", i); else if (linkerr & 0xf0) { ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, " "clearing\n", linkerr >> 4, i); /* * writing the linkerr bits that are set will clear * them */ if (pci_write_config_byte (pdev, link_off, linkerr)) ipath_dbg("Failed write to clear HT " "linkerror%d\n", i); if (pci_read_config_byte(pdev, link_off, &linkerr)) dev_info(&pdev->dev, "Couldn't reread " "linkerror%d of HT slave/primary " "block\n", i); else if (linkerr & 0xf0) dev_info(&pdev->dev, "HT linkerror%d bits " "0x%x couldn't be cleared\n", i, linkerr >> 4); } } /* * this is just for our link to the host, not devices connected * through tunnel. */ if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth)) ipath_dev_err(dd, "Couldn't read HT link width " "config register\n"); else { u32 width; switch (linkwidth & 7) { case 5: width = 4; break; case 4: width = 2; break; case 3: width = 32; break; case 1: width = 16; break; case 0: default: /* if wrong, assume 8 bit */ width = 8; break; } dd->ipath_lbus_width = width; if (linkwidth != 0x11) { ipath_dev_err(dd, "Not configured for 16 bit HT " "(%x)\n", linkwidth); if (!(linkwidth & 0xf)) { ipath_dbg("Will ignore HT lane1 errors\n"); dd->ipath_flags |= IPATH_8BIT_IN_HT0; } } } /* * this is just for our link to the host, not devices connected * through tunnel. */ if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth)) ipath_dev_err(dd, "Couldn't read HT link frequency " "config register\n"); else { u32 speed; switch (linkwidth & 0xf) { case 6: speed = 1000; break; case 5: speed = 800; break; case 4: speed = 600; break; case 3: speed = 500; break; case 2: speed = 400; break; case 1: speed = 300; break; default: /* * assume reserved and vendor-specific are 200... */ case 0: speed = 200; break; } dd->ipath_lbus_speed = speed; } snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info), "HyperTransport,%uMHz,x%u\n", dd->ipath_lbus_speed, dd->ipath_lbus_width); } static int ipath_ht_intconfig(struct ipath_devdata *dd) { int ret; if (dd->ipath_intconfig) { ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig, dd->ipath_intconfig); /* interrupt address */ ret = 0; } else { ipath_dev_err(dd, "No interrupts enabled, couldn't setup " "interrupt address\n"); ret = -EINVAL; } return ret; } static void ipath_ht_irq_update(struct pci_dev *dev, int irq, struct ht_irq_msg *msg) { struct ipath_devdata *dd = pci_get_drvdata(dev); u64 prev_intconfig = dd->ipath_intconfig; dd->ipath_intconfig = msg->address_lo; dd->ipath_intconfig |= ((u64) msg->address_hi) << 32; /* * If the previous value of dd->ipath_intconfig is zero, we're * getting configured for the first time, and must not program the * intconfig register here (it will be programmed later, when the * hardware is ready). Otherwise, we should. */ if (prev_intconfig) ipath_ht_intconfig(dd); } /** * ipath_setup_ht_config - setup the interruptconfig register * @dd: the infinipath device * @pdev: the PCI device * * setup the interruptconfig register from the HT config info. * Also clear CRC errors in HT linkcontrol, if necessary. * This is done only for the real hardware. It is done before * chip address space is initted, so can't touch infinipath registers */ static int ipath_setup_ht_config(struct ipath_devdata *dd, struct pci_dev *pdev) { int pos, ret; ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update); if (ret < 0) { ipath_dev_err(dd, "Couldn't create interrupt handler: " "err %d\n", ret); goto bail; } dd->ipath_irq = ret; ret = 0; /* * Handle clearing CRC errors in linkctrl register if necessary. We * do this early, before we ever enable errors or hardware errors, * mostly to avoid causing the chip to enter freeze mode. */ pos = pci_find_capability(pdev, PCI_CAP_ID_HT); if (!pos) { ipath_dev_err(dd, "Couldn't find HyperTransport " "capability; no interrupts\n"); ret = -ENODEV; goto bail; } do { u8 cap_type; /* * The HT capability type byte is 3 bytes after the * capability byte. */ if (pci_read_config_byte(pdev, pos + 3, &cap_type)) { dev_info(&pdev->dev, "Couldn't read config " "command @ %d\n", pos); continue; } if (!(cap_type & 0xE0)) slave_or_pri_blk(dd, pdev, pos, cap_type); } while ((pos = pci_find_next_capability(pdev, pos, PCI_CAP_ID_HT))); dd->ipath_flags |= IPATH_SWAP_PIOBUFS; bail: return ret; } /** * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff * @dd: the infinipath device * * Called during driver unload. * This is currently a nop for the HT chip, not for all chips */ static void ipath_setup_ht_cleanup(struct ipath_devdata *dd) { } /** * ipath_setup_ht_setextled - set the state of the two external LEDs * @dd: the infinipath device * @lst: the L state * @ltst: the LT state * * Set the state of the two external LEDs, to indicate physical and * logical state of IB link. For this chip (at least with recommended * board pinouts), LED1 is Green (physical state), and LED2 is Yellow * (logical state) * * Note: We try to match the Mellanox HCA LED behavior as best * we can. Green indicates physical link state is OK (something is * plugged in, and we can train). * Amber indicates the link is logically up (ACTIVE). * Mellanox further blinks the amber LED to indicate data packet * activity, but we have no hardware support for that, so it would * require waking up every 10-20 msecs and checking the counters * on the chip, and then turning the LED off if appropriate. That's * visible overhead, so not something we will do. * */ static void ipath_setup_ht_setextled(struct ipath_devdata *dd, u64 lst, u64 ltst) { u64 extctl; unsigned long flags = 0; /* the diags use the LED to indicate diag info, so we leave * the external LED alone when the diags are running */ if (ipath_diag_inuse) return; /* Allow override of LED display for, e.g. Locating system in rack */ if (dd->ipath_led_override) { ltst = (dd->ipath_led_override & IPATH_LED_PHYS) ? INFINIPATH_IBCS_LT_STATE_LINKUP : INFINIPATH_IBCS_LT_STATE_DISABLED; lst = (dd->ipath_led_override & IPATH_LED_LOG) ? INFINIPATH_IBCS_L_STATE_ACTIVE : INFINIPATH_IBCS_L_STATE_DOWN; } spin_lock_irqsave(&dd->ipath_gpio_lock, flags); /* * start by setting both LED control bits to off, then turn * on the appropriate bit(s). */ if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */ /* * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF * is inverted, because it is normally used to indicate * a hardware fault at reset, if there were errors */ extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON) | INFINIPATH_EXTC_LEDGBLERR_OFF; if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF; if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) extctl |= INFINIPATH_EXTC_LEDGBLOK_ON; } else { extctl = dd->ipath_extctrl & ~(INFINIPATH_EXTC_LED1PRIPORT_ON | INFINIPATH_EXTC_LED2PRIPORT_ON); if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP) extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON; if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE) extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON; } dd->ipath_extctrl = extctl; ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl); spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags); } static void ipath_init_ht_variables(struct ipath_devdata *dd) { /* * setup the register offsets, since they are different for each * chip */ dd->ipath_kregs = &ipath_ht_kregs; dd->ipath_cregs = &ipath_ht_cregs; dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM; dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM; dd->ipath_gpio_sda = IPATH_GPIO_SDA; dd->ipath_gpio_scl = IPATH_GPIO_SCL; /* * Fill in data for field-values that change in newer chips. * We dynamically specify only the mask for LINKTRAININGSTATE * and only the shift for LINKSTATE, as they are the only ones * that change. Also precalculate the 3 link states of interest * and the combined mask. */ dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT; dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK; dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift) | dd->ibcs_lts_mask; dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP << INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift); dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP << INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift); dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP << INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) | (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift); /* * Fill in data for ibcc field-values that change in newer chips. * We dynamically specify only the mask for LINKINITCMD * and only the shift for LINKCMD and MAXPKTLEN, as they are * the only ones that change. */ dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK; dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT; dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT; /* Fill in shifts for RcvCtrl. */ dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT; dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT; dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT; dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */ dd->ipath_i_bitsextant = (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) | (INFINIPATH_I_RCVAVAIL_MASK << INFINIPATH_I_RCVAVAIL_SHIFT) | INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT | INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO; dd->ipath_e_bitsextant = INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC | INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN | INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP | INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION | INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN | INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK | INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN | INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED | INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET | INFINIPATH_E_HARDWARE; dd->ipath_hwe_bitsextant = (INFINIPATH_HWE_HTCMEMPARITYERR_MASK << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) | (INFINIPATH_HWE_TXEMEMPARITYERR_MASK << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) | (INFINIPATH_HWE_RXEMEMPARITYERR_MASK << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) | INFINIPATH_HWE_HTCLNKABYTE0CRCERR | INFINIPATH_HWE_HTCLNKABYTE1CRCERR | INFINIPATH_HWE_HTCLNKBBYTE0CRCERR | INFINIPATH_HWE_HTCLNKBBYTE1CRCERR | INFINIPATH_HWE_HTCMISCERR4 | INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 | INFINIPATH_HWE_HTCMISCERR7 | INFINIPATH_HWE_HTCBUSTREQPARITYERR | INFINIPATH_HWE_HTCBUSTRESPPARITYERR | INFINIPATH_HWE_HTCBUSIREQPARITYERR | INFINIPATH_HWE_RXDSYNCMEMPARITYERR | INFINIPATH_HWE_MEMBISTFAILED | INFINIPATH_HWE_COREPLL_FBSLIP | INFINIPATH_HWE_COREPLL_RFSLIP | INFINIPATH_HWE_HTBPLL_FBSLIP | INFINIPATH_HWE_HTBPLL_RFSLIP | INFINIPATH_HWE_HTAPLL_FBSLIP | INFINIPATH_HWE_HTAPLL_RFSLIP | INFINIPATH_HWE_SERDESPLLFAILED | INFINIPATH_HWE_IBCBUSTOSPCPARITYERR | INFINIPATH_HWE_IBCBUSFRSPCPARITYERR; dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK; dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK; dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT; dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT; /* * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity. * 2 is Some Misc, 3 is reserved for future. */ dd->ipath_eep_st_masks[0].hwerrs_to_log = INFINIPATH_HWE_TXEMEMPARITYERR_MASK << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT; dd->ipath_eep_st_masks[1].hwerrs_to_log = INFINIPATH_HWE_RXEMEMPARITYERR_MASK << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT; dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET; dd->delay_mult = 2; /* SDR, 4X, can't change */ dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X; dd->ipath_link_speed_supported = IPATH_IB_SDR; dd->ipath_link_width_enabled = IB_WIDTH_4X; dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported; /* these can't change for this chip, so set once */ dd->ipath_link_width_active = dd->ipath_link_width_enabled; dd->ipath_link_speed_active = dd->ipath_link_speed_enabled; } /** * ipath_ht_init_hwerrors - enable hardware errors * @dd: the infinipath device * * now that we have finished initializing everything that might reasonably * cause a hardware error, and cleared those errors bits as they occur, * we can enable hardware errors in the mask (potentially enabling * freeze mode), and enable hardware errors as errors (along with * everything else) in errormask */ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd) { ipath_err_t val; u64 extsval; extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus); if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST)) ipath_dev_err(dd, "MemBIST did not complete!\n"); if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT) ipath_dbg("MemBIST corrected\n"); ipath_check_htlink(dd); /* barring bugs, all hwerrors become interrupts, which can */ val = -1LL; /* don't look at crc lane1 if 8 bit */ if (dd->ipath_flags & IPATH_8BIT_IN_HT0) val &= ~infinipath_hwe_htclnkabyte1crcerr; /* don't look at crc lane1 if 8 bit */ if (dd->ipath_flags & IPATH_8BIT_IN_HT1) val &= ~infinipath_hwe_htclnkbbyte1crcerr; /* * disable RXDSYNCMEMPARITY because external serdes is unused, * and therefore the logic will never be used or initialized, * and uninitialized state will normally result in this error * being asserted. Similarly for the external serdess pll * lock signal. */ val &= ~(INFINIPATH_HWE_SERDESPLLFAILED | INFINIPATH_HWE_RXDSYNCMEMPARITYERR); /* * Disable MISCERR4 because of an inversion in the HT core * logic checking for errors that cause this bit to be set. * The errata can also cause the protocol error bit to be set * in the HT config space linkerror register(s). */ val &= ~INFINIPATH_HWE_HTCMISCERR4; /* * PLL ignored because unused MDIO interface has a logic problem */ if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9) val &= ~INFINIPATH_HWE_SERDESPLLFAILED; dd->ipath_hwerrmask = val; } /** * ipath_ht_bringup_serdes - bring up the serdes * @dd: the infinipath device */ static int ipath_ht_bringup_serdes(struct ipath_devdata *dd) { u64 val, config1; int ret = 0, change = 0; ipath_dbg("Trying to bringup serdes\n"); if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) & INFINIPATH_HWE_SERDESPLLFAILED) { ipath_dbg("At start, serdes PLL failed bit set in " "hwerrstatus, clearing and continuing\n"); ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, INFINIPATH_HWE_SERDESPLLFAILED); } val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1); ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx " "config1=%llx, sstatus=%llx xgxs %llx\n", (unsigned long long) val, (unsigned long long) config1, (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus), (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); /* force reset on */ val |= INFINIPATH_SERDC0_RESET_PLL /* | INFINIPATH_SERDC0_RESET_MASK */ ; ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); udelay(15); /* need pll reset set at least for a bit */ if (val & INFINIPATH_SERDC0_RESET_PLL) { u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL; /* set lane resets, and tx idle, during pll reset */ val2 |= INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE; ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing " "%llx)\n", (unsigned long long) val2); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val2); /* * be sure chip saw it */ val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); /* * need pll reset clear at least 11 usec before lane * resets cleared; give it a few more */ udelay(15); val = val2; /* for check below */ } if (val & (INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE)) { val &= ~(INFINIPATH_SERDC0_RESET_PLL | INFINIPATH_SERDC0_RESET_MASK | INFINIPATH_SERDC0_TXIDLE); /* clear them */ ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); } val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); if (val & INFINIPATH_XGXS_RESET) { /* normally true after boot */ val &= ~INFINIPATH_XGXS_RESET; change = 1; } if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) & INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) { /* need to compensate for Tx inversion in partner */ val &= ~(INFINIPATH_XGXS_RX_POL_MASK << INFINIPATH_XGXS_RX_POL_SHIFT); val |= dd->ipath_rx_pol_inv << INFINIPATH_XGXS_RX_POL_SHIFT; change = 1; } if (change) ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); /* clear current and de-emphasis bits */ config1 &= ~0x0ffffffff00ULL; /* set current to 20ma */ config1 |= 0x00000000000ULL; /* set de-emphasis to -5.68dB */ config1 |= 0x0cccc000000ULL; ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1); ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx " "config1=%llx, sstatus=%llx xgxs %llx\n", (unsigned long long) val, (unsigned long long) config1, (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus), (unsigned long long) ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig)); return ret; /* for now, say we always succeeded */ } /** * ipath_ht_quiet_serdes - set serdes to txidle * @dd: the infinipath device * driver is being unloaded */ static void ipath_ht_quiet_serdes(struct ipath_devdata *dd) { u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0); val |= INFINIPATH_SERDC0_TXIDLE; ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n", (unsigned long long) val); ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val); } /** * ipath_pe_put_tid - write a TID in chip * @dd: the infinipath device * @tidptr: pointer to the expected TID (in chip) to update * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing * * This exists as a separate routine to allow for special locking etc. * It's used for both the full cleanup on exit, as well as the normal * setup and teardown. */ static void ipath_ht_put_tid(struct ipath_devdata *dd, u64 __iomem *tidptr, u32 type, unsigned long pa) { if (!dd->ipath_kregbase) return; if (pa != dd->ipath_tidinvalid) { if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) { dev_info(&dd->pcidev->dev, "physaddr %lx has more than " "40 bits, using only 40!!!\n", pa); pa &= INFINIPATH_RT_ADDR_MASK; } if (type == RCVHQ_RCV_TYPE_EAGER) pa |= dd->ipath_tidtemplate; else { /* in words (fixed, full page). */ u64 lenvalid = PAGE_SIZE >> 2; lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT; pa |= lenvalid | INFINIPATH_RT_VALID; } } writeq(pa, tidptr); } /** * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager * @dd: the infinipath device * @port: the port * * Used from ipath_close(), and at chip initialization. */ static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port) { u64 __iomem *tidbase; int i; if (!dd->ipath_kregbase) return; ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port); /* * need to invalidate all of the expected TID entries for this * port, so we don't have valid entries that might somehow get * used (early in next use of this port, or through some bug) */ tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvtidbase + port * dd->ipath_rcvtidcnt * sizeof(*tidbase)); for (i = 0; i < dd->ipath_rcvtidcnt; i++) ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED, dd->ipath_tidinvalid); tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvegrbase + port * dd->ipath_rcvegrcnt * sizeof(*tidbase)); for (i = 0; i < dd->ipath_rcvegrcnt; i++) ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER, dd->ipath_tidinvalid); } /** * ipath_ht_tidtemplate - setup constants for TID updates * @dd: the infinipath device * * We setup stuff that we use a lot, to avoid calculating each time */ static void ipath_ht_tidtemplate(struct ipath_devdata *dd) { dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2; dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT; dd->ipath_tidtemplate |= INFINIPATH_RT_VALID; /* * work around chip errata bug 7358, by marking invalid tids * as having max length */ dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) << INFINIPATH_RT_BUFSIZE_SHIFT; } static int ipath_ht_early_init(struct ipath_devdata *dd) { u32 __iomem *piobuf; u32 pioincr, val32; int i; /* * one cache line; long IB headers will spill over into received * buffer */ dd->ipath_rcvhdrentsize = 16; dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE; /* * For HT, we allocate a somewhat overly large eager buffer, * such that we can guarantee that we can receive the largest * packet that we can send out. To truly support a 4KB MTU, * we need to bump this to a large value. To date, other than * testing, we have never encountered an HCA that can really * send 4KB MTU packets, so we do not handle that (we'll get * errors interrupts if we ever see one). */ dd->ipath_rcvegrbufsize = dd->ipath_piosize2k; /* * the min() check here is currently a nop, but it may not * always be, depending on just how we do ipath_rcvegrbufsize */ dd->ipath_ibmaxlen = min(dd->ipath_piosize2k, dd->ipath_rcvegrbufsize); dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen; ipath_ht_tidtemplate(dd); /* * zero all the TID entries at startup. We do this for sanity, * in case of a previous driver crash of some kind, and also * because the chip powers up with these memories in an unknown * state. Use portcnt, not cfgports, since this is for the * full chip, not for current (possibly different) configuration * value. * Chip Errata bug 6447 */ for (val32 = 0; val32 < dd->ipath_portcnt; val32++) ipath_ht_clear_tids(dd, val32); /* * write the pbc of each buffer, to be sure it's initialized, then * cancel all the buffers, and also abort any packets that might * have been in flight for some reason (the latter is for driver * unload/reload, but isn't a bad idea at first init). PIO send * isn't enabled at this point, so there is no danger of sending * these out on the wire. * Chip Errata bug 6610 */ piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) + dd->ipath_piobufbase); pioincr = dd->ipath_palign / sizeof(*piobuf); for (i = 0; i < dd->ipath_piobcnt2k; i++) { /* * reasonable word count, just to init pbc */ writel(16, piobuf); piobuf += pioincr; } ipath_get_eeprom_info(dd); if (dd->ipath_boardrev == 5) { /* * Later production QHT7040 has same changes as QHT7140, so * can use GPIO interrupts. They have serial #'s starting * with 128, rather than 112. */ if (dd->ipath_serial[0] == '1' && dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') dd->ipath_flags |= IPATH_GPIO_INTR; else { ipath_dev_err(dd, "Unsupported InfiniPath board " "(serial number %.16s)!\n", dd->ipath_serial); return 1; } } if (dd->ipath_minrev >= 4) { /* Rev4+ reports extra errors via internal GPIO pins */ dd->ipath_flags |= IPATH_GPIO_ERRINTRS; dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK; ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, dd->ipath_gpio_mask); } return 0; } /** * ipath_init_ht_get_base_info - set chip-specific flags for user code * @dd: the infinipath device * @kbase: ipath_base_info pointer * * We set the PCIE flag because the lower bandwidth on PCIe vs * HyperTransport can affect some user packet algorithms. */ static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase) { struct ipath_base_info *kinfo = kbase; kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT | IPATH_RUNTIME_PIO_REGSWAPPED; if (pd->port_dd->ipath_minrev < 4) kinfo->spi_runtime_flags |= IPATH_RUNTIME_RCVHDR_COPY; return 0; } static void ipath_ht_free_irq(struct ipath_devdata *dd) { free_irq(dd->ipath_irq, dd); ht_destroy_irq(dd->ipath_irq); dd->ipath_irq = 0; dd->ipath_intconfig = 0; } static struct ipath_message_header * ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr) { return (struct ipath_message_header *) &rhf_addr[sizeof(u64) / sizeof(u32)]; } static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports) { dd->ipath_portcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); dd->ipath_p0_rcvegrcnt = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt); } static void ipath_ht_read_counters(struct ipath_devdata *dd, struct infinipath_counters *cntrs) { cntrs->LBIntCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt)); cntrs->LBFlowStallCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt)); cntrs->TxSDmaDescCnt = 0; cntrs->TxUnsupVLErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt)); cntrs->TxDataPktCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt)); cntrs->TxFlowPktCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt)); cntrs->TxDwordCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt)); cntrs->TxLenErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt)); cntrs->TxMaxMinLenErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt)); cntrs->TxUnderrunCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt)); cntrs->TxFlowStallCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt)); cntrs->TxDroppedPktCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt)); cntrs->RxDroppedPktCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt)); cntrs->RxDataPktCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt)); cntrs->RxFlowPktCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt)); cntrs->RxDwordCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt)); cntrs->RxLenErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt)); cntrs->RxMaxMinLenErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt)); cntrs->RxICRCErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt)); cntrs->RxVCRCErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt)); cntrs->RxFlowCtrlErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt)); cntrs->RxBadFormatCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt)); cntrs->RxLinkProblemCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt)); cntrs->RxEBPCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt)); cntrs->RxLPCRCErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt)); cntrs->RxBufOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt)); cntrs->RxTIDFullErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt)); cntrs->RxTIDValidErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt)); cntrs->RxPKeyMismatchCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt)); cntrs->RxP0HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt)); cntrs->RxP1HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt)); cntrs->RxP2HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt)); cntrs->RxP3HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt)); cntrs->RxP4HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt)); cntrs->RxP5HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt)); cntrs->RxP6HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt)); cntrs->RxP7HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt)); cntrs->RxP8HdrEgrOvflCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt)); cntrs->RxP9HdrEgrOvflCnt = 0; cntrs->RxP10HdrEgrOvflCnt = 0; cntrs->RxP11HdrEgrOvflCnt = 0; cntrs->RxP12HdrEgrOvflCnt = 0; cntrs->RxP13HdrEgrOvflCnt = 0; cntrs->RxP14HdrEgrOvflCnt = 0; cntrs->RxP15HdrEgrOvflCnt = 0; cntrs->RxP16HdrEgrOvflCnt = 0; cntrs->IBStatusChangeCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt)); cntrs->IBLinkErrRecoveryCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt)); cntrs->IBLinkDownedCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt)); cntrs->IBSymbolErrCnt = ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt)); cntrs->RxVL15DroppedPktCnt = 0; cntrs->RxOtherLocalPhyErrCnt = 0; cntrs->PcieRetryBufDiagQwordCnt = 0; cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs; cntrs->LocalLinkIntegrityErrCnt = (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ? dd->ipath_lli_errs : dd->ipath_lli_errors; cntrs->RxVlErrCnt = 0; cntrs->RxDlidFltrCnt = 0; } /* no interrupt fallback for these chips */ static int ipath_ht_nointr_fallback(struct ipath_devdata *dd) { return 0; } /* * reset the XGXS (between serdes and IBC). Slightly less intrusive * than resetting the IBC or external link state, and useful in some * cases to cause some retraining. To do this right, we reset IBC * as well. */ static void ipath_ht_xgxs_reset(struct ipath_devdata *dd) { u64 val, prev_val; prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig); val = prev_val | INFINIPATH_XGXS_RESET; prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */ ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control & ~INFINIPATH_C_LINKENABLE); ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val); ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch); ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val); ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); } static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which) { int ret; switch (which) { case IPATH_IB_CFG_LWID: ret = dd->ipath_link_width_active; break; case IPATH_IB_CFG_SPD: ret = dd->ipath_link_speed_active; break; case IPATH_IB_CFG_LWID_ENB: ret = dd->ipath_link_width_enabled; break; case IPATH_IB_CFG_SPD_ENB: ret = dd->ipath_link_speed_enabled; break; default: ret = -ENOTSUPP; break; } return ret; } /* we assume range checking is already done, if needed */ static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val) { int ret = 0; if (which == IPATH_IB_CFG_LWID_ENB) dd->ipath_link_width_enabled = val; else if (which == IPATH_IB_CFG_SPD_ENB) dd->ipath_link_speed_enabled = val; else ret = -ENOTSUPP; return ret; } static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b) { } static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs) { ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs), ipath_ib_linktrstate(dd, ibcs)); return 0; } /** * ipath_init_iba6110_funcs - set up the chip-specific function pointers * @dd: the infinipath device * * This is global, and is called directly at init to set up the * chip-specific function pointers for later use. */ void ipath_init_iba6110_funcs(struct ipath_devdata *dd) { dd->ipath_f_intrsetup = ipath_ht_intconfig; dd->ipath_f_bus = ipath_setup_ht_config; dd->ipath_f_reset = ipath_setup_ht_reset; dd->ipath_f_get_boardname = ipath_ht_boardname; dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors; dd->ipath_f_early_init = ipath_ht_early_init; dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors; dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes; dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes; dd->ipath_f_clear_tids = ipath_ht_clear_tids; dd->ipath_f_put_tid = ipath_ht_put_tid; dd->ipath_f_cleanup = ipath_setup_ht_cleanup; dd->ipath_f_setextled = ipath_setup_ht_setextled; dd->ipath_f_get_base_info = ipath_ht_get_base_info; dd->ipath_f_free_irq = ipath_ht_free_irq; dd->ipath_f_tidtemplate = ipath_ht_tidtemplate; dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback; dd->ipath_f_get_msgheader = ipath_ht_get_msgheader; dd->ipath_f_config_ports = ipath_ht_config_ports; dd->ipath_f_read_counters = ipath_ht_read_counters; dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset; dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg; dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg; dd->ipath_f_config_jint = ipath_ht_config_jint; dd->ipath_f_ib_updown = ipath_ht_ib_updown; /* * initialize chip-specific variables */ ipath_init_ht_variables(dd); }
gpl-2.0
high1/android_kernel_htc_golfu_wifi
drivers/video/vesafb.c
10280
15256
/* * framebuffer driver for VBE 2.0 compliant graphic boards * * switching to graphics mode happens at boot time (while * running in real mode, see arch/i386/boot/video.S). * * (c) 1998 Gerd Knorr <kraxel@goldbach.in-berlin.de> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/fb.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/screen_info.h> #include <video/vga.h> #include <asm/io.h> #include <asm/mtrr.h> #define dac_reg (0x3c8) #define dac_val (0x3c9) /* --------------------------------------------------------------------- */ static struct fb_var_screeninfo vesafb_defined __initdata = { .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .right_margin = 32, .upper_margin = 16, .lower_margin = 4, .vsync_len = 4, .vmode = FB_VMODE_NONINTERLACED, }; static struct fb_fix_screeninfo vesafb_fix __initdata = { .id = "VESA VGA", .type = FB_TYPE_PACKED_PIXELS, .accel = FB_ACCEL_NONE, }; static int inverse __read_mostly; static int mtrr __read_mostly; /* disable mtrr */ static int vram_remap __initdata; /* Set amount of memory to be used */ static int vram_total __initdata; /* Set total amount of memory */ static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */ static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */ static void (*pmi_start)(void) __read_mostly; static void (*pmi_pal) (void) __read_mostly; static int depth __read_mostly; static int vga_compat __read_mostly; /* --------------------------------------------------------------------- */ static int vesafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { #ifdef __i386__ int offset; offset = (var->yoffset * info->fix.line_length + var->xoffset) / 4; __asm__ __volatile__( "call *(%%edi)" : /* no return value */ : "a" (0x4f07), /* EAX */ "b" (0), /* EBX */ "c" (offset), /* ECX */ "d" (offset >> 16), /* EDX */ "D" (&pmi_start)); /* EDI */ #endif return 0; } static int vesa_setpalette(int regno, unsigned red, unsigned green, unsigned blue) { int shift = 16 - depth; int err = -EINVAL; /* * Try VGA registers first... */ if (vga_compat) { outb_p(regno, dac_reg); outb_p(red >> shift, dac_val); outb_p(green >> shift, dac_val); outb_p(blue >> shift, dac_val); err = 0; } #ifdef __i386__ /* * Fallback to the PMI.... */ if (err && pmi_setpal) { struct { u_char blue, green, red, pad; } entry; entry.red = red >> shift; entry.green = green >> shift; entry.blue = blue >> shift; entry.pad = 0; __asm__ __volatile__( "call *(%%esi)" : /* no return value */ : "a" (0x4f09), /* EAX */ "b" (0), /* EBX */ "c" (1), /* ECX */ "d" (regno), /* EDX */ "D" (&entry), /* EDI */ "S" (&pmi_pal)); /* ESI */ err = 0; } #endif return err; } static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { int err = 0; /* * Set a single color register. The values supplied are * already rounded down to the hardware's capabilities * (according to the entries in the `var' structure). Return * != 0 for invalid regno. */ if (regno >= info->cmap.len) return 1; if (info->var.bits_per_pixel == 8) err = vesa_setpalette(regno,red,green,blue); else if (regno < 16) { switch (info->var.bits_per_pixel) { case 16: if (info->var.red.offset == 10) { /* 1:5:5:5 */ ((u32*) (info->pseudo_palette))[regno] = ((red & 0xf800) >> 1) | ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); } else { /* 0:5:6:5 */ ((u32*) (info->pseudo_palette))[regno] = ((red & 0xf800) ) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); } break; case 24: case 32: red >>= 8; green >>= 8; blue >>= 8; ((u32 *)(info->pseudo_palette))[regno] = (red << info->var.red.offset) | (green << info->var.green.offset) | (blue << info->var.blue.offset); break; } } return err; } static void vesafb_destroy(struct fb_info *info) { fb_dealloc_cmap(&info->cmap); if (info->screen_base) iounmap(info->screen_base); release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); framebuffer_release(info); } static struct fb_ops vesafb_ops = { .owner = THIS_MODULE, .fb_destroy = vesafb_destroy, .fb_setcolreg = vesafb_setcolreg, .fb_pan_display = vesafb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int __init vesafb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (! strcmp(this_opt, "inverse")) inverse=1; else if (! strcmp(this_opt, "redraw")) ypan=0; else if (! strcmp(this_opt, "ypan")) ypan=1; else if (! strcmp(this_opt, "ywrap")) ypan=2; else if (! strcmp(this_opt, "vgapal")) pmi_setpal=0; else if (! strcmp(this_opt, "pmipal")) pmi_setpal=1; else if (! strncmp(this_opt, "mtrr:", 5)) mtrr = simple_strtoul(this_opt+5, NULL, 0); else if (! strcmp(this_opt, "nomtrr")) mtrr=0; else if (! strncmp(this_opt, "vtotal:", 7)) vram_total = simple_strtoul(this_opt+7, NULL, 0); else if (! strncmp(this_opt, "vremap:", 7)) vram_remap = simple_strtoul(this_opt+7, NULL, 0); } return 0; } static int __init vesafb_probe(struct platform_device *dev) { struct fb_info *info; int i, err; unsigned int size_vmode; unsigned int size_remap; unsigned int size_total; if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) return -ENODEV; vga_compat = (screen_info.capabilities & 2) ? 0 : 1; vesafb_fix.smem_start = screen_info.lfb_base; vesafb_defined.bits_per_pixel = screen_info.lfb_depth; if (15 == vesafb_defined.bits_per_pixel) vesafb_defined.bits_per_pixel = 16; vesafb_defined.xres = screen_info.lfb_width; vesafb_defined.yres = screen_info.lfb_height; vesafb_fix.line_length = screen_info.lfb_linelength; vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; /* size_vmode -- that is the amount of memory needed for the * used video mode, i.e. the minimum amount of * memory we need. */ size_vmode = vesafb_defined.yres * vesafb_fix.line_length; /* size_total -- all video memory we have. Used for mtrr * entries, resource allocation and bounds * checking. */ size_total = screen_info.lfb_size * 65536; if (vram_total) size_total = vram_total * 1024 * 1024; if (size_total < size_vmode) size_total = size_vmode; /* size_remap -- the amount of video memory we are going to * use for vesafb. With modern cards it is no * option to simply use size_total as that * wastes plenty of kernel address space. */ size_remap = size_vmode * 2; if (vram_remap) size_remap = vram_remap * 1024 * 1024; if (size_remap < size_vmode) size_remap = size_vmode; if (size_remap > size_total) size_remap = size_total; vesafb_fix.smem_len = size_remap; #ifndef __i386__ screen_info.vesapm_seg = 0; #endif if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { printk(KERN_WARNING "vesafb: cannot reserve video memory at 0x%lx\n", vesafb_fix.smem_start); /* We cannot make this fatal. Sometimes this comes from magic spaces our resource handlers simply don't know about */ } info = framebuffer_alloc(sizeof(u32) * 256, &dev->dev); if (!info) { release_mem_region(vesafb_fix.smem_start, size_total); return -ENOMEM; } info->pseudo_palette = info->par; info->par = NULL; /* set vesafb aperture size for generic probing */ info->apertures = alloc_apertures(1); if (!info->apertures) { err = -ENOMEM; goto err; } info->apertures->ranges[0].base = screen_info.lfb_base; info->apertures->ranges[0].size = size_total; printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); if (screen_info.vesapm_seg) { printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", screen_info.vesapm_seg,screen_info.vesapm_off); } if (screen_info.vesapm_seg < 0xc000) ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */ if (ypan || pmi_setpal) { unsigned short *pmi_base; pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); pmi_start = (void*)((char*)pmi_base + pmi_base[1]); pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); if (pmi_base[3]) { printk(KERN_INFO "vesafb: pmi: ports = "); for (i = pmi_base[3]/2; pmi_base[i] != 0xffff; i++) printk("%x ",pmi_base[i]); printk("\n"); if (pmi_base[i] != 0xffff) { /* * memory areas not supported (yet?) * * Rules are: we have to set up a descriptor for the requested * memory area and pass it in the ES register to the BIOS function. */ printk(KERN_INFO "vesafb: can't handle memory requests, pmi disabled\n"); ypan = pmi_setpal = 0; } } } if (vesafb_defined.bits_per_pixel == 8 && !pmi_setpal && !vga_compat) { printk(KERN_WARNING "vesafb: hardware palette is unchangeable,\n" " colors may be incorrect\n"); vesafb_fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; } vesafb_defined.xres_virtual = vesafb_defined.xres; vesafb_defined.yres_virtual = vesafb_fix.smem_len / vesafb_fix.line_length; if (ypan && vesafb_defined.yres_virtual > vesafb_defined.yres) { printk(KERN_INFO "vesafb: scrolling: %s using protected mode interface, yres_virtual=%d\n", (ypan > 1) ? "ywrap" : "ypan",vesafb_defined.yres_virtual); } else { printk(KERN_INFO "vesafb: scrolling: redraw\n"); vesafb_defined.yres_virtual = vesafb_defined.yres; ypan = 0; } /* some dummy values for timing to make fbset happy */ vesafb_defined.pixclock = 10000000 / vesafb_defined.xres * 1000 / vesafb_defined.yres; vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8; vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8; vesafb_defined.red.offset = screen_info.red_pos; vesafb_defined.red.length = screen_info.red_size; vesafb_defined.green.offset = screen_info.green_pos; vesafb_defined.green.length = screen_info.green_size; vesafb_defined.blue.offset = screen_info.blue_pos; vesafb_defined.blue.length = screen_info.blue_size; vesafb_defined.transp.offset = screen_info.rsvd_pos; vesafb_defined.transp.length = screen_info.rsvd_size; if (vesafb_defined.bits_per_pixel <= 8) { depth = vesafb_defined.green.length; vesafb_defined.red.length = vesafb_defined.green.length = vesafb_defined.blue.length = vesafb_defined.bits_per_pixel; } printk(KERN_INFO "vesafb: %s: " "size=%d:%d:%d:%d, shift=%d:%d:%d:%d\n", (vesafb_defined.bits_per_pixel > 8) ? "Truecolor" : (vga_compat || pmi_setpal) ? "Pseudocolor" : "Static Pseudocolor", screen_info.rsvd_size, screen_info.red_size, screen_info.green_size, screen_info.blue_size, screen_info.rsvd_pos, screen_info.red_pos, screen_info.green_pos, screen_info.blue_pos); vesafb_fix.ypanstep = ypan ? 1 : 0; vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0; /* request failure does not faze us, as vgacon probably has this * region already (FIXME) */ request_region(0x3c0, 32, "vesafb"); #ifdef CONFIG_MTRR if (mtrr) { unsigned int temp_size = size_total; unsigned int type = 0; switch (mtrr) { case 1: type = MTRR_TYPE_UNCACHABLE; break; case 2: type = MTRR_TYPE_WRBACK; break; case 3: type = MTRR_TYPE_WRCOMB; break; case 4: type = MTRR_TYPE_WRTHROUGH; break; default: type = 0; break; } if (type) { int rc; /* Find the largest power-of-two */ temp_size = roundup_pow_of_two(temp_size); /* Try and find a power of two to add */ do { rc = mtrr_add(vesafb_fix.smem_start, temp_size, type, 1); temp_size >>= 1; } while (temp_size >= PAGE_SIZE && rc == -EINVAL); } } #endif switch (mtrr) { case 1: /* uncachable */ info->screen_base = ioremap_nocache(vesafb_fix.smem_start, vesafb_fix.smem_len); break; case 2: /* write-back */ info->screen_base = ioremap_cache(vesafb_fix.smem_start, vesafb_fix.smem_len); break; case 3: /* write-combining */ info->screen_base = ioremap_wc(vesafb_fix.smem_start, vesafb_fix.smem_len); break; case 4: /* write-through */ default: info->screen_base = ioremap(vesafb_fix.smem_start, vesafb_fix.smem_len); break; } if (!info->screen_base) { printk(KERN_ERR "vesafb: abort, cannot ioremap video memory 0x%x @ 0x%lx\n", vesafb_fix.smem_len, vesafb_fix.smem_start); err = -EIO; goto err; } printk(KERN_INFO "vesafb: framebuffer at 0x%lx, mapped to 0x%p, " "using %dk, total %dk\n", vesafb_fix.smem_start, info->screen_base, size_remap/1024, size_total/1024); info->fbops = &vesafb_ops; info->var = vesafb_defined; info->fix = vesafb_fix; info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE | (ypan ? FBINFO_HWACCEL_YPAN : 0); if (!ypan) info->fbops->fb_pan_display = NULL; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { err = -ENOMEM; goto err; } if (register_framebuffer(info)<0) { err = -EINVAL; fb_dealloc_cmap(&info->cmap); goto err; } printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id); return 0; err: if (info->screen_base) iounmap(info->screen_base); framebuffer_release(info); release_mem_region(vesafb_fix.smem_start, size_total); return err; } static struct platform_driver vesafb_driver = { .driver = { .name = "vesafb", }, }; static struct platform_device *vesafb_device; static int __init vesafb_init(void) { int ret; char *option = NULL; /* ignore error return of fb_get_options */ fb_get_options("vesafb", &option); vesafb_setup(option); vesafb_device = platform_device_alloc("vesafb", 0); if (!vesafb_device) return -ENOMEM; ret = platform_device_add(vesafb_device); if (!ret) { ret = platform_driver_probe(&vesafb_driver, vesafb_probe); if (ret) platform_device_del(vesafb_device); } if (ret) { platform_device_put(vesafb_device); vesafb_device = NULL; } return ret; } module_init(vesafb_init); MODULE_LICENSE("GPL");
gpl-2.0
malvira/lpc31xx
crypto/rmd320.c
10536
13371
/* * Cryptographic API. * * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd320_ctx { u64 byte_count; u32 state[10]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define K5 RMD_K5 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K9 #define KK5 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } static void rmd320_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; ee = state[4]; /* Initialize right lane */ aaa = state[5]; bbb = state[6]; ccc = state[7]; ddd = state[8]; eee = state[9]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); /* Swap contents of "a" registers */ tmp = aa; aa = aaa; aaa = tmp; /* round 2: left lane" */ ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); /* round 2: right lane */ ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); /* Swap contents of "b" registers */ tmp = bb; bb = bbb; bbb = tmp; /* round 3: left lane" */ ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); /* round 3: right lane */ ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); /* Swap contents of "c" registers */ tmp = cc; cc = ccc; ccc = tmp; /* round 4: left lane" */ ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); /* round 4: right lane */ ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); /* Swap contents of "d" registers */ tmp = dd; dd = ddd; ddd = tmp; /* round 5: left lane" */ ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); /* round 5: right lane */ ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); /* Swap contents of "e" registers */ tmp = ee; ee = eee; eee = tmp; /* combine results */ state[0] += aa; state[1] += bb; state[2] += cc; state[3] += dd; state[4] += ee; state[5] += aaa; state[6] += bbb; state[7] += ccc; state[8] += ddd; state[9] += eee; return; } static int rmd320_init(struct shash_desc *desc) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; rctx->state[5] = RMD_H5; rctx->state[6] = RMD_H6; rctx->state[7] = RMD_H7; rctx->state[8] = RMD_H8; rctx->state[9] = RMD_H9; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd320_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd320_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd320_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd320_final(struct shash_desc *desc, u8 *out) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd320_update(desc, padding, padlen); /* Append length */ rmd320_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 10; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD320_DIGEST_SIZE, .init = rmd320_init, .update = rmd320_update, .final = rmd320_final, .descsize = sizeof(struct rmd320_ctx), .base = { .cra_name = "rmd320", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD320_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd320_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd320_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(rmd320_mod_init); module_exit(rmd320_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
gpl-2.0
wbdub/kernel_asus_tf300t
crypto/rmd320.c
10536
13371
/* * Cryptographic API. * * RIPEMD-320 - RACE Integrity Primitives Evaluation Message Digest. * * Based on the reference implementation by Antoon Bosselaers, ESAT-COSIC * * Copyright (c) 2008 Adrian-Ken Rueegsegger <ken@codelabs.ch> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> #include <asm/byteorder.h> #include "ripemd.h" struct rmd320_ctx { u64 byte_count; u32 state[10]; __le32 buffer[16]; }; #define K1 RMD_K1 #define K2 RMD_K2 #define K3 RMD_K3 #define K4 RMD_K4 #define K5 RMD_K5 #define KK1 RMD_K6 #define KK2 RMD_K7 #define KK3 RMD_K8 #define KK4 RMD_K9 #define KK5 RMD_K1 #define F1(x, y, z) (x ^ y ^ z) /* XOR */ #define F2(x, y, z) (z ^ (x & (y ^ z))) /* x ? y : z */ #define F3(x, y, z) ((x | ~y) ^ z) #define F4(x, y, z) (y ^ (z & (x ^ y))) /* z ? x : y */ #define F5(x, y, z) (x ^ (y | ~z)) #define ROUND(a, b, c, d, e, f, k, x, s) { \ (a) += f((b), (c), (d)) + le32_to_cpup(&(x)) + (k); \ (a) = rol32((a), (s)) + (e); \ (c) = rol32((c), 10); \ } static void rmd320_transform(u32 *state, const __le32 *in) { u32 aa, bb, cc, dd, ee, aaa, bbb, ccc, ddd, eee, tmp; /* Initialize left lane */ aa = state[0]; bb = state[1]; cc = state[2]; dd = state[3]; ee = state[4]; /* Initialize right lane */ aaa = state[5]; bbb = state[6]; ccc = state[7]; ddd = state[8]; eee = state[9]; /* round 1: left lane */ ROUND(aa, bb, cc, dd, ee, F1, K1, in[0], 11); ROUND(ee, aa, bb, cc, dd, F1, K1, in[1], 14); ROUND(dd, ee, aa, bb, cc, F1, K1, in[2], 15); ROUND(cc, dd, ee, aa, bb, F1, K1, in[3], 12); ROUND(bb, cc, dd, ee, aa, F1, K1, in[4], 5); ROUND(aa, bb, cc, dd, ee, F1, K1, in[5], 8); ROUND(ee, aa, bb, cc, dd, F1, K1, in[6], 7); ROUND(dd, ee, aa, bb, cc, F1, K1, in[7], 9); ROUND(cc, dd, ee, aa, bb, F1, K1, in[8], 11); ROUND(bb, cc, dd, ee, aa, F1, K1, in[9], 13); ROUND(aa, bb, cc, dd, ee, F1, K1, in[10], 14); ROUND(ee, aa, bb, cc, dd, F1, K1, in[11], 15); ROUND(dd, ee, aa, bb, cc, F1, K1, in[12], 6); ROUND(cc, dd, ee, aa, bb, F1, K1, in[13], 7); ROUND(bb, cc, dd, ee, aa, F1, K1, in[14], 9); ROUND(aa, bb, cc, dd, ee, F1, K1, in[15], 8); /* round 1: right lane */ ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[5], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[14], 9); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[7], 9); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[0], 11); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[9], 13); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[2], 15); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[11], 15); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[4], 5); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[13], 7); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[6], 7); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[15], 8); ROUND(eee, aaa, bbb, ccc, ddd, F5, KK1, in[8], 11); ROUND(ddd, eee, aaa, bbb, ccc, F5, KK1, in[1], 14); ROUND(ccc, ddd, eee, aaa, bbb, F5, KK1, in[10], 14); ROUND(bbb, ccc, ddd, eee, aaa, F5, KK1, in[3], 12); ROUND(aaa, bbb, ccc, ddd, eee, F5, KK1, in[12], 6); /* Swap contents of "a" registers */ tmp = aa; aa = aaa; aaa = tmp; /* round 2: left lane" */ ROUND(ee, aa, bb, cc, dd, F2, K2, in[7], 7); ROUND(dd, ee, aa, bb, cc, F2, K2, in[4], 6); ROUND(cc, dd, ee, aa, bb, F2, K2, in[13], 8); ROUND(bb, cc, dd, ee, aa, F2, K2, in[1], 13); ROUND(aa, bb, cc, dd, ee, F2, K2, in[10], 11); ROUND(ee, aa, bb, cc, dd, F2, K2, in[6], 9); ROUND(dd, ee, aa, bb, cc, F2, K2, in[15], 7); ROUND(cc, dd, ee, aa, bb, F2, K2, in[3], 15); ROUND(bb, cc, dd, ee, aa, F2, K2, in[12], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[0], 12); ROUND(ee, aa, bb, cc, dd, F2, K2, in[9], 15); ROUND(dd, ee, aa, bb, cc, F2, K2, in[5], 9); ROUND(cc, dd, ee, aa, bb, F2, K2, in[2], 11); ROUND(bb, cc, dd, ee, aa, F2, K2, in[14], 7); ROUND(aa, bb, cc, dd, ee, F2, K2, in[11], 13); ROUND(ee, aa, bb, cc, dd, F2, K2, in[8], 12); /* round 2: right lane */ ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[6], 9); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[11], 13); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[3], 15); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[7], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[0], 12); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[13], 8); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[5], 9); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[10], 11); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[14], 7); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[15], 7); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[8], 12); ROUND(ddd, eee, aaa, bbb, ccc, F4, KK2, in[12], 7); ROUND(ccc, ddd, eee, aaa, bbb, F4, KK2, in[4], 6); ROUND(bbb, ccc, ddd, eee, aaa, F4, KK2, in[9], 15); ROUND(aaa, bbb, ccc, ddd, eee, F4, KK2, in[1], 13); ROUND(eee, aaa, bbb, ccc, ddd, F4, KK2, in[2], 11); /* Swap contents of "b" registers */ tmp = bb; bb = bbb; bbb = tmp; /* round 3: left lane" */ ROUND(dd, ee, aa, bb, cc, F3, K3, in[3], 11); ROUND(cc, dd, ee, aa, bb, F3, K3, in[10], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[14], 6); ROUND(aa, bb, cc, dd, ee, F3, K3, in[4], 7); ROUND(ee, aa, bb, cc, dd, F3, K3, in[9], 14); ROUND(dd, ee, aa, bb, cc, F3, K3, in[15], 9); ROUND(cc, dd, ee, aa, bb, F3, K3, in[8], 13); ROUND(bb, cc, dd, ee, aa, F3, K3, in[1], 15); ROUND(aa, bb, cc, dd, ee, F3, K3, in[2], 14); ROUND(ee, aa, bb, cc, dd, F3, K3, in[7], 8); ROUND(dd, ee, aa, bb, cc, F3, K3, in[0], 13); ROUND(cc, dd, ee, aa, bb, F3, K3, in[6], 6); ROUND(bb, cc, dd, ee, aa, F3, K3, in[13], 5); ROUND(aa, bb, cc, dd, ee, F3, K3, in[11], 12); ROUND(ee, aa, bb, cc, dd, F3, K3, in[5], 7); ROUND(dd, ee, aa, bb, cc, F3, K3, in[12], 5); /* round 3: right lane */ ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[15], 9); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[5], 7); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[1], 15); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[3], 11); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[7], 8); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[14], 6); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[6], 6); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[9], 14); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[11], 12); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[8], 13); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[12], 5); ROUND(ccc, ddd, eee, aaa, bbb, F3, KK3, in[2], 14); ROUND(bbb, ccc, ddd, eee, aaa, F3, KK3, in[10], 13); ROUND(aaa, bbb, ccc, ddd, eee, F3, KK3, in[0], 13); ROUND(eee, aaa, bbb, ccc, ddd, F3, KK3, in[4], 7); ROUND(ddd, eee, aaa, bbb, ccc, F3, KK3, in[13], 5); /* Swap contents of "c" registers */ tmp = cc; cc = ccc; ccc = tmp; /* round 4: left lane" */ ROUND(cc, dd, ee, aa, bb, F4, K4, in[1], 11); ROUND(bb, cc, dd, ee, aa, F4, K4, in[9], 12); ROUND(aa, bb, cc, dd, ee, F4, K4, in[11], 14); ROUND(ee, aa, bb, cc, dd, F4, K4, in[10], 15); ROUND(dd, ee, aa, bb, cc, F4, K4, in[0], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[8], 15); ROUND(bb, cc, dd, ee, aa, F4, K4, in[12], 9); ROUND(aa, bb, cc, dd, ee, F4, K4, in[4], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[13], 9); ROUND(dd, ee, aa, bb, cc, F4, K4, in[3], 14); ROUND(cc, dd, ee, aa, bb, F4, K4, in[7], 5); ROUND(bb, cc, dd, ee, aa, F4, K4, in[15], 6); ROUND(aa, bb, cc, dd, ee, F4, K4, in[14], 8); ROUND(ee, aa, bb, cc, dd, F4, K4, in[5], 6); ROUND(dd, ee, aa, bb, cc, F4, K4, in[6], 5); ROUND(cc, dd, ee, aa, bb, F4, K4, in[2], 12); /* round 4: right lane */ ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[8], 15); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[6], 5); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[4], 8); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[1], 11); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[3], 14); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[11], 14); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[15], 6); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[0], 14); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[5], 6); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[12], 9); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[2], 12); ROUND(bbb, ccc, ddd, eee, aaa, F2, KK4, in[13], 9); ROUND(aaa, bbb, ccc, ddd, eee, F2, KK4, in[9], 12); ROUND(eee, aaa, bbb, ccc, ddd, F2, KK4, in[7], 5); ROUND(ddd, eee, aaa, bbb, ccc, F2, KK4, in[10], 15); ROUND(ccc, ddd, eee, aaa, bbb, F2, KK4, in[14], 8); /* Swap contents of "d" registers */ tmp = dd; dd = ddd; ddd = tmp; /* round 5: left lane" */ ROUND(bb, cc, dd, ee, aa, F5, K5, in[4], 9); ROUND(aa, bb, cc, dd, ee, F5, K5, in[0], 15); ROUND(ee, aa, bb, cc, dd, F5, K5, in[5], 5); ROUND(dd, ee, aa, bb, cc, F5, K5, in[9], 11); ROUND(cc, dd, ee, aa, bb, F5, K5, in[7], 6); ROUND(bb, cc, dd, ee, aa, F5, K5, in[12], 8); ROUND(aa, bb, cc, dd, ee, F5, K5, in[2], 13); ROUND(ee, aa, bb, cc, dd, F5, K5, in[10], 12); ROUND(dd, ee, aa, bb, cc, F5, K5, in[14], 5); ROUND(cc, dd, ee, aa, bb, F5, K5, in[1], 12); ROUND(bb, cc, dd, ee, aa, F5, K5, in[3], 13); ROUND(aa, bb, cc, dd, ee, F5, K5, in[8], 14); ROUND(ee, aa, bb, cc, dd, F5, K5, in[11], 11); ROUND(dd, ee, aa, bb, cc, F5, K5, in[6], 8); ROUND(cc, dd, ee, aa, bb, F5, K5, in[15], 5); ROUND(bb, cc, dd, ee, aa, F5, K5, in[13], 6); /* round 5: right lane */ ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[12], 8); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[15], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[10], 12); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[4], 9); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[1], 12); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[5], 5); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[8], 14); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[7], 6); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[6], 8); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[2], 13); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[13], 6); ROUND(aaa, bbb, ccc, ddd, eee, F1, KK5, in[14], 5); ROUND(eee, aaa, bbb, ccc, ddd, F1, KK5, in[0], 15); ROUND(ddd, eee, aaa, bbb, ccc, F1, KK5, in[3], 13); ROUND(ccc, ddd, eee, aaa, bbb, F1, KK5, in[9], 11); ROUND(bbb, ccc, ddd, eee, aaa, F1, KK5, in[11], 11); /* Swap contents of "e" registers */ tmp = ee; ee = eee; eee = tmp; /* combine results */ state[0] += aa; state[1] += bb; state[2] += cc; state[3] += dd; state[4] += ee; state[5] += aaa; state[6] += bbb; state[7] += ccc; state[8] += ddd; state[9] += eee; return; } static int rmd320_init(struct shash_desc *desc) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); rctx->byte_count = 0; rctx->state[0] = RMD_H0; rctx->state[1] = RMD_H1; rctx->state[2] = RMD_H2; rctx->state[3] = RMD_H3; rctx->state[4] = RMD_H4; rctx->state[5] = RMD_H5; rctx->state[6] = RMD_H6; rctx->state[7] = RMD_H7; rctx->state[8] = RMD_H8; rctx->state[9] = RMD_H9; memset(rctx->buffer, 0, sizeof(rctx->buffer)); return 0; } static int rmd320_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); const u32 avail = sizeof(rctx->buffer) - (rctx->byte_count & 0x3f); rctx->byte_count += len; /* Enough space in buffer? If so copy and we're done */ if (avail > len) { memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, len); goto out; } memcpy((char *)rctx->buffer + (sizeof(rctx->buffer) - avail), data, avail); rmd320_transform(rctx->state, rctx->buffer); data += avail; len -= avail; while (len >= sizeof(rctx->buffer)) { memcpy(rctx->buffer, data, sizeof(rctx->buffer)); rmd320_transform(rctx->state, rctx->buffer); data += sizeof(rctx->buffer); len -= sizeof(rctx->buffer); } memcpy(rctx->buffer, data, len); out: return 0; } /* Add padding and return the message digest. */ static int rmd320_final(struct shash_desc *desc, u8 *out) { struct rmd320_ctx *rctx = shash_desc_ctx(desc); u32 i, index, padlen; __le64 bits; __le32 *dst = (__le32 *)out; static const u8 padding[64] = { 0x80, }; bits = cpu_to_le64(rctx->byte_count << 3); /* Pad out to 56 mod 64 */ index = rctx->byte_count & 0x3f; padlen = (index < 56) ? (56 - index) : ((64+56) - index); rmd320_update(desc, padding, padlen); /* Append length */ rmd320_update(desc, (const u8 *)&bits, sizeof(bits)); /* Store state in digest */ for (i = 0; i < 10; i++) dst[i] = cpu_to_le32p(&rctx->state[i]); /* Wipe context */ memset(rctx, 0, sizeof(*rctx)); return 0; } static struct shash_alg alg = { .digestsize = RMD320_DIGEST_SIZE, .init = rmd320_init, .update = rmd320_update, .final = rmd320_final, .descsize = sizeof(struct rmd320_ctx), .base = { .cra_name = "rmd320", .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = RMD320_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init rmd320_mod_init(void) { return crypto_register_shash(&alg); } static void __exit rmd320_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(rmd320_mod_init); module_exit(rmd320_mod_fini); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>"); MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
gpl-2.0
ViciousAOSP/platform_kernel_ViciousKernel
arch/mn10300/mm/cache.c
12072
1615
/* MN10300 Cache flushing routines * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/threads.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cacheflush.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/smp.h> #include "cache-smp.h" EXPORT_SYMBOL(mn10300_icache_inv); EXPORT_SYMBOL(mn10300_icache_inv_range); EXPORT_SYMBOL(mn10300_icache_inv_range2); EXPORT_SYMBOL(mn10300_icache_inv_page); EXPORT_SYMBOL(mn10300_dcache_inv); EXPORT_SYMBOL(mn10300_dcache_inv_range); EXPORT_SYMBOL(mn10300_dcache_inv_range2); EXPORT_SYMBOL(mn10300_dcache_inv_page); #ifdef CONFIG_MN10300_CACHE_WBACK EXPORT_SYMBOL(mn10300_dcache_flush); EXPORT_SYMBOL(mn10300_dcache_flush_inv); EXPORT_SYMBOL(mn10300_dcache_flush_inv_range); EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2); EXPORT_SYMBOL(mn10300_dcache_flush_inv_page); EXPORT_SYMBOL(mn10300_dcache_flush_range); EXPORT_SYMBOL(mn10300_dcache_flush_range2); EXPORT_SYMBOL(mn10300_dcache_flush_page); #endif /* * allow userspace to flush the instruction cache */ asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) { if (end < start) return -EINVAL; flush_icache_range(start, end); return 0; }
gpl-2.0
floft/rpi-linux
scripts/dtc/libfdt/fdt_strerror.c
14888
3401
/* * libfdt - Flat Device Tree manipulation * Copyright (C) 2006 David Gibson, IBM Corporation. * * libfdt is dual licensed: you can use it either under the terms of * the GPL, or the BSD license, at your option. * * a) This library is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, * MA 02110-1301 USA * * Alternatively, * * b) Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libfdt_env.h" #include <fdt.h> #include <libfdt.h> #include "libfdt_internal.h" struct fdt_errtabent { const char *str; }; #define FDT_ERRTABENT(val) \ [(val)] = { .str = #val, } static struct fdt_errtabent fdt_errtable[] = { FDT_ERRTABENT(FDT_ERR_NOTFOUND), FDT_ERRTABENT(FDT_ERR_EXISTS), FDT_ERRTABENT(FDT_ERR_NOSPACE), FDT_ERRTABENT(FDT_ERR_BADOFFSET), FDT_ERRTABENT(FDT_ERR_BADPATH), FDT_ERRTABENT(FDT_ERR_BADSTATE), FDT_ERRTABENT(FDT_ERR_TRUNCATED), FDT_ERRTABENT(FDT_ERR_BADMAGIC), FDT_ERRTABENT(FDT_ERR_BADVERSION), FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE), FDT_ERRTABENT(FDT_ERR_BADLAYOUT), }; #define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0])) const char *fdt_strerror(int errval) { if (errval > 0) return "<valid offset/length>"; else if (errval == 0) return "<no error>"; else if (errval > -FDT_ERRTABSIZE) { const char *s = fdt_errtable[-errval].str; if (s) return s; } return "<unknown error>"; }
gpl-2.0
mechkeys/qmk_firmware
lib/lufa/LUFA/Drivers/USB/Core/USBTask.c
41
2437
/* LUFA Library Copyright (C) Dean Camera, 2017. dean [at] fourwalledcubicle [dot] com www.lufa-lib.org */ /* Copyright 2017 Dean Camera (dean [at] fourwalledcubicle [dot] com) Permission to use, copy, modify, distribute, and sell this software and its documentation for any purpose is hereby granted without fee, provided that the above copyright notice appear in all copies and that both that the copyright notice and this permission notice and warranty disclaimer appear in supporting documentation, and that the name of the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. The author disclaims all warranties with regard to this software, including all implied warranties of merchantability and fitness. In no event shall the author be liable for any special, indirect or consequential damages or any damages whatsoever resulting from loss of use, data or profits, whether in an action of contract, negligence or other tortious action, arising out of or in connection with the use or performance of this software. */ #define __INCLUDE_FROM_USBTASK_C #define __INCLUDE_FROM_USB_DRIVER #include "USBTask.h" volatile bool USB_IsInitialized; USB_Request_Header_t USB_ControlRequest; #if defined(USB_CAN_BE_HOST) && !defined(HOST_STATE_AS_GPIOR) volatile uint8_t USB_HostState; #endif #if defined(USB_CAN_BE_DEVICE) && !defined(DEVICE_STATE_AS_GPIOR) volatile uint8_t USB_DeviceState; #endif void USB_USBTask(void) { #if defined(USB_CAN_BE_BOTH) if (USB_CurrentMode == USB_MODE_Device) USB_DeviceTask(); else if (USB_CurrentMode == USB_MODE_Host) USB_HostTask(); #elif defined(USB_CAN_BE_HOST) USB_HostTask(); #elif defined(USB_CAN_BE_DEVICE) USB_DeviceTask(); #endif } #if defined(USB_CAN_BE_DEVICE) static void USB_DeviceTask(void) { if (USB_DeviceState == DEVICE_STATE_Unattached) return; uint8_t PrevEndpoint = Endpoint_GetCurrentEndpoint(); Endpoint_SelectEndpoint(ENDPOINT_CONTROLEP); if (Endpoint_IsSETUPReceived()) USB_Device_ProcessControlRequest(); Endpoint_SelectEndpoint(PrevEndpoint); } #endif #if defined(USB_CAN_BE_HOST) static void USB_HostTask(void) { uint8_t PrevPipe = Pipe_GetCurrentPipe(); Pipe_SelectPipe(PIPE_CONTROLPIPE); USB_Host_ProcessNextHostState(); Pipe_SelectPipe(PrevPipe); } #endif
gpl-2.0
manover/systemd
src/udev/udev-builtin-usb_id.c
41
17984
/* * USB device properties and persistent device path * * Copyright (c) 2005 SUSE Linux Products GmbH, Germany * Author: Hannes Reinecke <hare@suse.de> * * Copyright (C) 2005-2011 Kay Sievers <kay@vrfy.org> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "alloc-util.h" #include "fd-util.h" #include "string-util.h" #include "udev.h" static void set_usb_iftype(char *to, int if_class_num, size_t len) { const char *type = "generic"; switch (if_class_num) { case 1: type = "audio"; break; case 2: /* CDC-Control */ break; case 3: type = "hid"; break; case 5: /* Physical */ break; case 6: type = "media"; break; case 7: type = "printer"; break; case 8: type = "storage"; break; case 9: type = "hub"; break; case 0x0a: /* CDC-Data */ break; case 0x0b: /* Chip/Smart Card */ break; case 0x0d: /* Content Security */ break; case 0x0e: type = "video"; break; case 0xdc: /* Diagnostic Device */ break; case 0xe0: /* Wireless Controller */ break; case 0xfe: /* Application-specific */ break; case 0xff: /* Vendor-specific */ break; default: break; } strncpy(to, type, len); to[len-1] = '\0'; } static int set_usb_mass_storage_ifsubtype(char *to, const char *from, size_t len) { int type_num = 0; char *eptr; const char *type = "generic"; type_num = strtoul(from, &eptr, 0); if (eptr != from) { switch (type_num) { case 1: /* RBC devices */ type = "rbc"; break; case 2: type = "atapi"; break; case 3: type = "tape"; break; case 4: /* UFI */ type = "floppy"; break; case 6: /* Transparent SPC-2 devices */ type = "scsi"; break; default: break; } } strscpy(to, len, type); return type_num; } static void set_scsi_type(char *to, const char *from, size_t len) { int type_num; char *eptr; const char *type = "generic"; type_num = strtoul(from, &eptr, 0); if (eptr != from) { switch (type_num) { case 0: case 0xe: type = "disk"; break; case 1: type = "tape"; break; case 4: case 7: case 0xf: type = "optical"; break; case 5: type = "cd"; break; default: break; } } strscpy(to, len, type); } #define USB_DT_DEVICE 0x01 #define USB_DT_INTERFACE 0x04 static int dev_if_packed_info(struct udev_device *dev, char *ifs_str, size_t len) { _cleanup_free_ char *filename = NULL; _cleanup_close_ int fd = -1; ssize_t size; unsigned char buf[18 + 65535]; size_t pos = 0; unsigned strpos = 0; struct usb_interface_descriptor { uint8_t bLength; uint8_t bDescriptorType; uint8_t bInterfaceNumber; uint8_t bAlternateSetting; uint8_t bNumEndpoints; uint8_t bInterfaceClass; uint8_t bInterfaceSubClass; uint8_t bInterfaceProtocol; uint8_t iInterface; } _packed_; if (asprintf(&filename, "%s/descriptors", udev_device_get_syspath(dev)) < 0) return log_oom(); fd = open(filename, O_RDONLY|O_CLOEXEC); if (fd < 0) return log_debug_errno(errno, "Error opening USB device 'descriptors' file: %m"); size = read(fd, buf, sizeof(buf)); if (size < 18 || size == sizeof(buf)) return -EIO; ifs_str[0] = '\0'; while (pos + sizeof(struct usb_interface_descriptor) < (size_t) size && strpos + 7 < len - 2) { struct usb_interface_descriptor *desc; char if_str[8]; desc = (struct usb_interface_descriptor *) &buf[pos]; if (desc->bLength < 3) break; pos += desc->bLength; if (desc->bDescriptorType != USB_DT_INTERFACE) continue; if (snprintf(if_str, 8, ":%02x%02x%02x", desc->bInterfaceClass, desc->bInterfaceSubClass, desc->bInterfaceProtocol) != 7) continue; if (strstr(ifs_str, if_str) != NULL) continue; memcpy(&ifs_str[strpos], if_str, 8), strpos += 7; } if (strpos > 0) { ifs_str[strpos++] = ':'; ifs_str[strpos++] = '\0'; } return 0; } /* * A unique USB identification is generated like this: * * 1.) Get the USB device type from InterfaceClass and InterfaceSubClass * 2.) If the device type is 'Mass-Storage/SPC-2' or 'Mass-Storage/RBC', * use the SCSI vendor and model as USB-Vendor and USB-model. * 3.) Otherwise, use the USB manufacturer and product as * USB-Vendor and USB-model. Any non-printable characters * in those strings will be skipped; a slash '/' will be converted * into a full stop '.'. * 4.) If that fails, too, we will use idVendor and idProduct * as USB-Vendor and USB-model. * 5.) The USB identification is the USB-vendor and USB-model * string concatenated with an underscore '_'. * 6.) If the device supplies a serial number, this number * is concatenated with the identification with an underscore '_'. */ static int builtin_usb_id(struct udev_device *dev, int argc, char *argv[], bool test) { char vendor_str[64] = ""; char vendor_str_enc[256]; const char *vendor_id; char model_str[64] = ""; char model_str_enc[256]; const char *product_id; char serial_str[UTIL_NAME_SIZE] = ""; char packed_if_str[UTIL_NAME_SIZE] = ""; char revision_str[64] = ""; char type_str[64] = ""; char instance_str[64] = ""; const char *ifnum = NULL; const char *driver = NULL; char serial[256]; struct udev_device *dev_interface = NULL; struct udev_device *dev_usb = NULL; const char *if_class, *if_subclass; int if_class_num; int protocol = 0; size_t l; char *s; assert(dev); /* shortcut, if we are called directly for a "usb_device" type */ if (udev_device_get_devtype(dev) != NULL && streq(udev_device_get_devtype(dev), "usb_device")) { dev_if_packed_info(dev, packed_if_str, sizeof(packed_if_str)); dev_usb = dev; goto fallback; } /* usb interface directory */ dev_interface = udev_device_get_parent_with_subsystem_devtype(dev, "usb", "usb_interface"); if (dev_interface == NULL) { log_debug("unable to access usb_interface device of '%s'", udev_device_get_syspath(dev)); return EXIT_FAILURE; } ifnum = udev_device_get_sysattr_value(dev_interface, "bInterfaceNumber"); driver = udev_device_get_sysattr_value(dev_interface, "driver"); if_class = udev_device_get_sysattr_value(dev_interface, "bInterfaceClass"); if (!if_class) { log_debug("%s: cannot get bInterfaceClass attribute", udev_device_get_sysname(dev)); return EXIT_FAILURE; } if_class_num = strtoul(if_class, NULL, 16); if (if_class_num == 8) { /* mass storage */ if_subclass = udev_device_get_sysattr_value(dev_interface, "bInterfaceSubClass"); if (if_subclass != NULL) protocol = set_usb_mass_storage_ifsubtype(type_str, if_subclass, sizeof(type_str)-1); } else { set_usb_iftype(type_str, if_class_num, sizeof(type_str)-1); } log_debug("%s: if_class %d protocol %d", udev_device_get_syspath(dev_interface), if_class_num, protocol); /* usb device directory */ dev_usb = udev_device_get_parent_with_subsystem_devtype(dev_interface, "usb", "usb_device"); if (!dev_usb) { log_debug("unable to find parent 'usb' device of '%s'", udev_device_get_syspath(dev)); return EXIT_FAILURE; } /* all interfaces of the device in a single string */ dev_if_packed_info(dev_usb, packed_if_str, sizeof(packed_if_str)); /* mass storage : SCSI or ATAPI */ if (protocol == 6 || protocol == 2) { struct udev_device *dev_scsi; const char *scsi_model, *scsi_vendor, *scsi_type, *scsi_rev; int host, bus, target, lun; /* get scsi device */ dev_scsi = udev_device_get_parent_with_subsystem_devtype(dev, "scsi", "scsi_device"); if (dev_scsi == NULL) { log_debug("unable to find parent 'scsi' device of '%s'", udev_device_get_syspath(dev)); goto fallback; } if (sscanf(udev_device_get_sysname(dev_scsi), "%d:%d:%d:%d", &host, &bus, &target, &lun) != 4) { log_debug("invalid scsi device '%s'", udev_device_get_sysname(dev_scsi)); goto fallback; } /* Generic SPC-2 device */ scsi_vendor = udev_device_get_sysattr_value(dev_scsi, "vendor"); if (!scsi_vendor) { log_debug("%s: cannot get SCSI vendor attribute", udev_device_get_sysname(dev_scsi)); goto fallback; } udev_util_encode_string(scsi_vendor, vendor_str_enc, sizeof(vendor_str_enc)); util_replace_whitespace(scsi_vendor, vendor_str, sizeof(vendor_str)-1); util_replace_chars(vendor_str, NULL); scsi_model = udev_device_get_sysattr_value(dev_scsi, "model"); if (!scsi_model) { log_debug("%s: cannot get SCSI model attribute", udev_device_get_sysname(dev_scsi)); goto fallback; } udev_util_encode_string(scsi_model, model_str_enc, sizeof(model_str_enc)); util_replace_whitespace(scsi_model, model_str, sizeof(model_str)-1); util_replace_chars(model_str, NULL); scsi_type = udev_device_get_sysattr_value(dev_scsi, "type"); if (!scsi_type) { log_debug("%s: cannot get SCSI type attribute", udev_device_get_sysname(dev_scsi)); goto fallback; } set_scsi_type(type_str, scsi_type, sizeof(type_str)-1); scsi_rev = udev_device_get_sysattr_value(dev_scsi, "rev"); if (!scsi_rev) { log_debug("%s: cannot get SCSI revision attribute", udev_device_get_sysname(dev_scsi)); goto fallback; } util_replace_whitespace(scsi_rev, revision_str, sizeof(revision_str)-1); util_replace_chars(revision_str, NULL); /* * some broken devices have the same identifiers * for all luns, export the target:lun number */ sprintf(instance_str, "%d:%d", target, lun); } fallback: vendor_id = udev_device_get_sysattr_value(dev_usb, "idVendor"); product_id = udev_device_get_sysattr_value(dev_usb, "idProduct"); /* fallback to USB vendor & device */ if (vendor_str[0] == '\0') { const char *usb_vendor = NULL; usb_vendor = udev_device_get_sysattr_value(dev_usb, "manufacturer"); if (!usb_vendor) usb_vendor = vendor_id; if (!usb_vendor) { log_debug("No USB vendor information available"); return EXIT_FAILURE; } udev_util_encode_string(usb_vendor, vendor_str_enc, sizeof(vendor_str_enc)); util_replace_whitespace(usb_vendor, vendor_str, sizeof(vendor_str)-1); util_replace_chars(vendor_str, NULL); } if (model_str[0] == '\0') { const char *usb_model = NULL; usb_model = udev_device_get_sysattr_value(dev_usb, "product"); if (!usb_model) usb_model = product_id; if (!usb_model) return EXIT_FAILURE; udev_util_encode_string(usb_model, model_str_enc, sizeof(model_str_enc)); util_replace_whitespace(usb_model, model_str, sizeof(model_str)-1); util_replace_chars(model_str, NULL); } if (revision_str[0] == '\0') { const char *usb_rev; usb_rev = udev_device_get_sysattr_value(dev_usb, "bcdDevice"); if (usb_rev) { util_replace_whitespace(usb_rev, revision_str, sizeof(revision_str)-1); util_replace_chars(revision_str, NULL); } } if (serial_str[0] == '\0') { const char *usb_serial; usb_serial = udev_device_get_sysattr_value(dev_usb, "serial"); if (usb_serial) { const unsigned char *p; /* http://msdn.microsoft.com/en-us/library/windows/hardware/gg487321.aspx */ for (p = (unsigned char *)usb_serial; *p != '\0'; p++) if (*p < 0x20 || *p > 0x7f || *p == ',') { usb_serial = NULL; break; } } if (usb_serial) { util_replace_whitespace(usb_serial, serial_str, sizeof(serial_str)-1); util_replace_chars(serial_str, NULL); } } s = serial; l = strpcpyl(&s, sizeof(serial), vendor_str, "_", model_str, NULL); if (!isempty(serial_str)) l = strpcpyl(&s, l, "_", serial_str, NULL); if (!isempty(instance_str)) strpcpyl(&s, l, "-", instance_str, NULL); udev_builtin_add_property(dev, test, "ID_VENDOR", vendor_str); udev_builtin_add_property(dev, test, "ID_VENDOR_ENC", vendor_str_enc); udev_builtin_add_property(dev, test, "ID_VENDOR_ID", vendor_id); udev_builtin_add_property(dev, test, "ID_MODEL", model_str); udev_builtin_add_property(dev, test, "ID_MODEL_ENC", model_str_enc); udev_builtin_add_property(dev, test, "ID_MODEL_ID", product_id); udev_builtin_add_property(dev, test, "ID_REVISION", revision_str); udev_builtin_add_property(dev, test, "ID_SERIAL", serial); if (!isempty(serial_str)) udev_builtin_add_property(dev, test, "ID_SERIAL_SHORT", serial_str); if (!isempty(type_str)) udev_builtin_add_property(dev, test, "ID_TYPE", type_str); if (!isempty(instance_str)) udev_builtin_add_property(dev, test, "ID_INSTANCE", instance_str); udev_builtin_add_property(dev, test, "ID_BUS", "usb"); if (!isempty(packed_if_str)) udev_builtin_add_property(dev, test, "ID_USB_INTERFACES", packed_if_str); if (ifnum != NULL) udev_builtin_add_property(dev, test, "ID_USB_INTERFACE_NUM", ifnum); if (driver != NULL) udev_builtin_add_property(dev, test, "ID_USB_DRIVER", driver); return EXIT_SUCCESS; } const struct udev_builtin udev_builtin_usb_id = { .name = "usb_id", .cmd = builtin_usb_id, .help = "USB device properties", .run_once = true, };
gpl-2.0
KMU-embedded/scalablelinux
drivers/infiniband/hw/usnic/usnic_ib_verbs.c
41
20196
/* * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/errno.h> #include <rdma/ib_user_verbs.h> #include <rdma/ib_addr.h> #include "usnic_abi.h" #include "usnic_ib.h" #include "usnic_common_util.h" #include "usnic_ib_qp_grp.h" #include "usnic_fwd.h" #include "usnic_log.h" #include "usnic_uiom.h" #include "usnic_transport.h" #define USNIC_DEFAULT_TRANSPORT USNIC_TRANSPORT_ROCE_CUSTOM static void usnic_ib_fw_string_to_u64(char *fw_ver_str, u64 *fw_ver) { *fw_ver = *((u64 *)fw_ver_str); } static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp, struct ib_udata *udata) { struct usnic_ib_dev *us_ibdev; struct usnic_ib_create_qp_resp resp; struct pci_dev *pdev; struct vnic_dev_bar *bar; struct usnic_vnic_res_chunk *chunk; struct usnic_ib_qp_grp_flow *default_flow; int i, err; memset(&resp, 0, sizeof(resp)); us_ibdev = qp_grp->vf->pf; pdev = usnic_vnic_get_pdev(qp_grp->vf->vnic); if (!pdev) { usnic_err("Failed to get pdev of qp_grp %d\n", qp_grp->grp_id); return -EFAULT; } bar = usnic_vnic_get_bar(qp_grp->vf->vnic, 0); if (!bar) { usnic_err("Failed to get bar0 of qp_grp %d vf %s", qp_grp->grp_id, pci_name(pdev)); return -EFAULT; } resp.vfid = usnic_vnic_get_index(qp_grp->vf->vnic); resp.bar_bus_addr = bar->bus_addr; resp.bar_len = bar->len; chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_RQ); if (IS_ERR_OR_NULL(chunk)) { usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_RQ), qp_grp->grp_id, PTR_ERR(chunk)); return chunk ? PTR_ERR(chunk) : -ENOMEM; } WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_RQ); resp.rq_cnt = chunk->cnt; for (i = 0; i < chunk->cnt; i++) resp.rq_idx[i] = chunk->res[i]->vnic_idx; chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_WQ); if (IS_ERR_OR_NULL(chunk)) { usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_WQ), qp_grp->grp_id, PTR_ERR(chunk)); return chunk ? PTR_ERR(chunk) : -ENOMEM; } WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_WQ); resp.wq_cnt = chunk->cnt; for (i = 0; i < chunk->cnt; i++) resp.wq_idx[i] = chunk->res[i]->vnic_idx; chunk = usnic_ib_qp_grp_get_chunk(qp_grp, USNIC_VNIC_RES_TYPE_CQ); if (IS_ERR_OR_NULL(chunk)) { usnic_err("Failed to get chunk %s for qp_grp %d with err %ld\n", usnic_vnic_res_type_to_str(USNIC_VNIC_RES_TYPE_CQ), qp_grp->grp_id, PTR_ERR(chunk)); return chunk ? PTR_ERR(chunk) : -ENOMEM; } WARN_ON(chunk->type != USNIC_VNIC_RES_TYPE_CQ); resp.cq_cnt = chunk->cnt; for (i = 0; i < chunk->cnt; i++) resp.cq_idx[i] = chunk->res[i]->vnic_idx; default_flow = list_first_entry(&qp_grp->flows_lst, struct usnic_ib_qp_grp_flow, link); resp.transport = default_flow->trans_type; err = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (err) { usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name); return err; } return 0; } static struct usnic_ib_qp_grp* find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev, struct usnic_ib_pd *pd, struct usnic_transport_spec *trans_spec, struct usnic_vnic_res_spec *res_spec) { struct usnic_ib_vf *vf; struct usnic_vnic *vnic; struct usnic_ib_qp_grp *qp_grp; struct device *dev, **dev_list; int i, found = 0; BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock)); if (list_empty(&us_ibdev->vf_dev_list)) { usnic_info("No vfs to allocate\n"); return NULL; } if (usnic_ib_share_vf) { /* Try to find resouces on a used vf which is in pd */ dev_list = usnic_uiom_get_dev_list(pd->umem_pd); for (i = 0; dev_list[i]; i++) { dev = dev_list[i]; vf = pci_get_drvdata(to_pci_dev(dev)); spin_lock(&vf->lock); vnic = vf->vnic; if (!usnic_vnic_check_room(vnic, res_spec)) { usnic_dbg("Found used vnic %s from %s\n", us_ibdev->ib_dev.name, pci_name(usnic_vnic_get_pdev( vnic))); found = 1; break; } spin_unlock(&vf->lock); } usnic_uiom_free_dev_list(dev_list); } if (!found) { /* Try to find resources on an unused vf */ list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) { spin_lock(&vf->lock); vnic = vf->vnic; if (vf->qp_grp_ref_cnt == 0 && usnic_vnic_check_room(vnic, res_spec) == 0) { found = 1; break; } spin_unlock(&vf->lock); } } if (!found) { usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name); return ERR_PTR(-ENOMEM); } qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev, vf, pd, res_spec, trans_spec); spin_unlock(&vf->lock); if (IS_ERR_OR_NULL(qp_grp)) { usnic_err("Failed to allocate qp_grp\n"); return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM); } return qp_grp; } static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp) { struct usnic_ib_vf *vf = qp_grp->vf; WARN_ON(qp_grp->state != IB_QPS_RESET); spin_lock(&vf->lock); usnic_ib_qp_grp_destroy(qp_grp); spin_unlock(&vf->lock); } static void eth_speed_to_ib_speed(int speed, u8 *active_speed, u8 *active_width) { if (speed <= 10000) { *active_width = IB_WIDTH_1X; *active_speed = IB_SPEED_FDR10; } else if (speed <= 20000) { *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_DDR; } else if (speed <= 30000) { *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_QDR; } else if (speed <= 40000) { *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_FDR10; } else { *active_width = IB_WIDTH_4X; *active_speed = IB_SPEED_EDR; } } static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd) { if (cmd.spec.trans_type <= USNIC_TRANSPORT_UNKNOWN || cmd.spec.trans_type >= USNIC_TRANSPORT_MAX) return -EINVAL; return 0; } /* Start of ib callback functions */ enum rdma_link_layer usnic_ib_port_link_layer(struct ib_device *device, u8 port_num) { return IB_LINK_LAYER_ETHERNET; } int usnic_ib_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); union ib_gid gid; struct ethtool_drvinfo info; struct ethtool_cmd cmd; int qp_per_vf; usnic_dbg("\n"); if (uhw->inlen || uhw->outlen) return -EINVAL; mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, &gid.raw[0]); memcpy(&props->sys_image_guid, &gid.global.interface_id, sizeof(gid.global.interface_id)); usnic_ib_fw_string_to_u64(&info.fw_version[0], &props->fw_ver); props->max_mr_size = USNIC_UIOM_MAX_MR_SIZE; props->page_size_cap = USNIC_UIOM_PAGE_SIZE; props->vendor_id = PCI_VENDOR_ID_CISCO; props->vendor_part_id = PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC; props->hw_ver = us_ibdev->pdev->subsystem_device; qp_per_vf = max(us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_WQ], us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_RQ]); props->max_qp = qp_per_vf * atomic_read(&us_ibdev->vf_cnt.refcount); props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; props->max_cq = us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ] * atomic_read(&us_ibdev->vf_cnt.refcount); props->max_pd = USNIC_UIOM_MAX_PD_CNT; props->max_mr = USNIC_UIOM_MAX_MR_CNT; props->local_ca_ack_delay = 0; props->max_pkeys = 0; props->atomic_cap = IB_ATOMIC_NONE; props->masked_atomic_cap = props->atomic_cap; props->max_qp_rd_atom = 0; props->max_qp_init_rd_atom = 0; props->max_res_rd_atom = 0; props->max_srq = 0; props->max_srq_wr = 0; props->max_srq_sge = 0; props->max_fast_reg_page_list_len = 0; props->max_mcast_grp = 0; props->max_mcast_qp_attach = 0; props->max_total_mcast_qp_attach = 0; props->max_map_per_fmr = 0; /* Owned by Userspace * max_qp_wr, max_sge, max_sge_rd, max_cqe */ mutex_unlock(&us_ibdev->usdev_lock); return 0; } int usnic_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); struct ethtool_cmd cmd; usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); props->lid = 0; props->lmc = 1; props->sm_lid = 0; props->sm_sl = 0; if (!us_ibdev->ufdev->link_up) { props->state = IB_PORT_DOWN; props->phys_state = 3; } else if (!us_ibdev->ufdev->inaddr) { props->state = IB_PORT_INIT; props->phys_state = 4; } else { props->state = IB_PORT_ACTIVE; props->phys_state = 5; } props->port_cap_flags = 0; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->bad_pkey_cntr = 0; props->qkey_viol_cntr = 0; eth_speed_to_ib_speed(cmd.speed, &props->active_speed, &props->active_width); props->max_mtu = IB_MTU_4096; props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); /* Userspace will adjust for hdrs */ props->max_msg_sz = us_ibdev->ufdev->mtu; props->max_vl_num = 1; mutex_unlock(&us_ibdev->usdev_lock); return 0; } int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_vf *vf; int err; usnic_dbg("\n"); memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); qp_grp = to_uqp_grp(qp); vf = qp_grp->vf; mutex_lock(&vf->pf->usdev_lock); usnic_dbg("\n"); qp_attr->qp_state = qp_grp->state; qp_attr->cur_qp_state = qp_grp->state; switch (qp_grp->ibqp.qp_type) { case IB_QPT_UD: qp_attr->qkey = 0; break; default: usnic_err("Unexpected qp_type %d\n", qp_grp->ibqp.qp_type); err = -EINVAL; goto err_out; } mutex_unlock(&vf->pf->usdev_lock); return 0; err_out: mutex_unlock(&vf->pf->usdev_lock); return err; } int usnic_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); usnic_dbg("\n"); if (index > 1) return -EINVAL; mutex_lock(&us_ibdev->usdev_lock); memset(&(gid->raw[0]), 0, sizeof(gid->raw)); usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, &gid->raw[0]); mutex_unlock(&us_ibdev->usdev_lock); return 0; } int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { if (index > 1) return -EINVAL; *pkey = 0xffff; return 0; } struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct usnic_ib_pd *pd; void *umem_pd; usnic_dbg("\n"); pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (!pd) return ERR_PTR(-ENOMEM); umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); if (IS_ERR_OR_NULL(umem_pd)) { kfree(pd); return ERR_PTR(umem_pd ? PTR_ERR(umem_pd) : -ENOMEM); } usnic_info("domain 0x%p allocated for context 0x%p and device %s\n", pd, context, ibdev->name); return &pd->ibpd; } int usnic_ib_dealloc_pd(struct ib_pd *pd) { usnic_info("freeing domain 0x%p\n", pd); usnic_uiom_dealloc_pd((to_upd(pd))->umem_pd); kfree(pd); return 0; } struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, struct ib_udata *udata) { int err; struct usnic_ib_dev *us_ibdev; struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_ucontext *ucontext; int cq_cnt; struct usnic_vnic_res_spec res_spec; struct usnic_ib_create_qp_cmd cmd; struct usnic_transport_spec trans_spec; usnic_dbg("\n"); ucontext = to_uucontext(pd->uobject->context); us_ibdev = to_usdev(pd->device); if (init_attr->create_flags) return ERR_PTR(-EINVAL); err = ib_copy_from_udata(&cmd, udata, sizeof(cmd)); if (err) { usnic_err("%s: cannot copy udata for create_qp\n", us_ibdev->ib_dev.name); return ERR_PTR(-EINVAL); } err = create_qp_validate_user_data(cmd); if (err) { usnic_err("%s: Failed to validate user data\n", us_ibdev->ib_dev.name); return ERR_PTR(-EINVAL); } if (init_attr->qp_type != IB_QPT_UD) { usnic_err("%s asked to make a non-UD QP: %d\n", us_ibdev->ib_dev.name, init_attr->qp_type); return ERR_PTR(-EINVAL); } trans_spec = cmd.spec; mutex_lock(&us_ibdev->usdev_lock); cq_cnt = (init_attr->send_cq == init_attr->recv_cq) ? 1 : 2; res_spec = min_transport_spec[trans_spec.trans_type]; usnic_vnic_res_spec_update(&res_spec, USNIC_VNIC_RES_TYPE_CQ, cq_cnt); qp_grp = find_free_vf_and_create_qp_grp(us_ibdev, to_upd(pd), &trans_spec, &res_spec); if (IS_ERR_OR_NULL(qp_grp)) { err = qp_grp ? PTR_ERR(qp_grp) : -ENOMEM; goto out_release_mutex; } err = usnic_ib_fill_create_qp_resp(qp_grp, udata); if (err) { err = -EBUSY; goto out_release_qp_grp; } qp_grp->ctx = ucontext; list_add_tail(&qp_grp->link, &ucontext->qp_grp_list); usnic_ib_log_vf(qp_grp->vf); mutex_unlock(&us_ibdev->usdev_lock); return &qp_grp->ibqp; out_release_qp_grp: qp_grp_destroy(qp_grp); out_release_mutex: mutex_unlock(&us_ibdev->usdev_lock); return ERR_PTR(err); } int usnic_ib_destroy_qp(struct ib_qp *qp) { struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_vf *vf; usnic_dbg("\n"); qp_grp = to_uqp_grp(qp); vf = qp_grp->vf; mutex_lock(&vf->pf->usdev_lock); if (usnic_ib_qp_grp_modify(qp_grp, IB_QPS_RESET, NULL)) { usnic_err("Failed to move qp grp %u to reset\n", qp_grp->grp_id); } list_del(&qp_grp->link); qp_grp_destroy(qp_grp); mutex_unlock(&vf->pf->usdev_lock); return 0; } int usnic_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { struct usnic_ib_qp_grp *qp_grp; int status; usnic_dbg("\n"); qp_grp = to_uqp_grp(ibqp); mutex_lock(&qp_grp->vf->pf->usdev_lock); if ((attr_mask & IB_QP_PORT) && attr->port_num != 1) { /* usnic devices only have one port */ status = -EINVAL; goto out_unlock; } if (attr_mask & IB_QP_STATE) { status = usnic_ib_qp_grp_modify(qp_grp, attr->qp_state, NULL); } else { usnic_err("Unhandled request, attr_mask=0x%x\n", attr_mask); status = -EINVAL; } out_unlock: mutex_unlock(&qp_grp->vf->pf->usdev_lock); return status; } struct ib_cq *usnic_ib_create_cq(struct ib_device *ibdev, const struct ib_cq_init_attr *attr, struct ib_ucontext *context, struct ib_udata *udata) { struct ib_cq *cq; usnic_dbg("\n"); if (attr->flags) return ERR_PTR(-EINVAL); cq = kzalloc(sizeof(*cq), GFP_KERNEL); if (!cq) return ERR_PTR(-EBUSY); return cq; } int usnic_ib_destroy_cq(struct ib_cq *cq) { usnic_dbg("\n"); kfree(cq); return 0; } struct ib_mr *usnic_ib_reg_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) { struct usnic_ib_mr *mr; int err; usnic_dbg("start 0x%llx va 0x%llx length 0x%llx\n", start, virt_addr, length); mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); mr->umem = usnic_uiom_reg_get(to_upd(pd)->umem_pd, start, length, access_flags, 0); if (IS_ERR_OR_NULL(mr->umem)) { err = mr->umem ? PTR_ERR(mr->umem) : -EFAULT; goto err_free; } mr->ibmr.lkey = mr->ibmr.rkey = 0; return &mr->ibmr; err_free: kfree(mr); return ERR_PTR(err); } int usnic_ib_dereg_mr(struct ib_mr *ibmr) { struct usnic_ib_mr *mr = to_umr(ibmr); usnic_dbg("va 0x%lx length 0x%zx\n", mr->umem->va, mr->umem->length); usnic_uiom_reg_release(mr->umem, ibmr->pd->uobject->context->closing); kfree(mr); return 0; } struct ib_ucontext *usnic_ib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct usnic_ib_ucontext *context; struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); usnic_dbg("\n"); context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&context->qp_grp_list); mutex_lock(&us_ibdev->usdev_lock); list_add_tail(&context->link, &us_ibdev->ctx_list); mutex_unlock(&us_ibdev->usdev_lock); return &context->ibucontext; } int usnic_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) { struct usnic_ib_ucontext *context = to_uucontext(ibcontext); struct usnic_ib_dev *us_ibdev = to_usdev(ibcontext->device); usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); BUG_ON(!list_empty(&context->qp_grp_list)); list_del(&context->link); mutex_unlock(&us_ibdev->usdev_lock); kfree(context); return 0; } int usnic_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) { struct usnic_ib_ucontext *uctx = to_ucontext(context); struct usnic_ib_dev *us_ibdev; struct usnic_ib_qp_grp *qp_grp; struct usnic_ib_vf *vf; struct vnic_dev_bar *bar; dma_addr_t bus_addr; unsigned int len; unsigned int vfid; usnic_dbg("\n"); us_ibdev = to_usdev(context->device); vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vfid = vma->vm_pgoff; usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n", vma->vm_pgoff, PAGE_SHIFT, vfid); mutex_lock(&us_ibdev->usdev_lock); list_for_each_entry(qp_grp, &uctx->qp_grp_list, link) { vf = qp_grp->vf; if (usnic_vnic_get_index(vf->vnic) == vfid) { bar = usnic_vnic_get_bar(vf->vnic, 0); if ((vma->vm_end - vma->vm_start) != bar->len) { usnic_err("Bar0 Len %lu - Request map %lu\n", bar->len, vma->vm_end - vma->vm_start); mutex_unlock(&us_ibdev->usdev_lock); return -EINVAL; } bus_addr = bar->bus_addr; len = bar->len; usnic_dbg("bus: %pa vaddr: %p size: %ld\n", &bus_addr, bar->vaddr, bar->len); mutex_unlock(&us_ibdev->usdev_lock); return remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, len, vma->vm_page_prot); } } mutex_unlock(&us_ibdev->usdev_lock); usnic_err("No VF %u found\n", vfid); return -EINVAL; } /* In ib callbacks section - Start of stub funcs */ struct ib_ah *usnic_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { usnic_dbg("\n"); return ERR_PTR(-EPERM); } int usnic_ib_destroy_ah(struct ib_ah *ah) { usnic_dbg("\n"); return -EINVAL; } int usnic_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { usnic_dbg("\n"); return -EINVAL; } int usnic_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { usnic_dbg("\n"); return -EINVAL; } int usnic_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) { usnic_dbg("\n"); return -EINVAL; } int usnic_ib_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags) { usnic_dbg("\n"); return -EINVAL; } struct ib_mr *usnic_ib_get_dma_mr(struct ib_pd *pd, int acc) { usnic_dbg("\n"); return ERR_PTR(-ENOMEM); } /* In ib callbacks section - End of stub funcs */ /* End of ib callbacks section */
gpl-2.0
hajuuk/asuswrt
release/src-rt-6.x.4708/linux/linux-2.6.36/arch/arm/mach-omap2/board-omap4panda.c
41
7441
/* * Board support file for OMAP4430 based PandaBoard. * * Copyright (C) 2010 Texas Instruments * * Author: David Anders <x0132446@ti.com> * * Based on mach-omap2/board-4430sdp.c * * Author: Santosh Shilimkar <santosh.shilimkar@ti.com> * * Based on mach-omap2/board-3430sdp.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/usb/otg.h> #include <linux/i2c/twl.h> #include <linux/regulator/machine.h> #include <mach/hardware.h> #include <mach/omap4-common.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/board.h> #include <plat/common.h> #include <plat/control.h> #include <plat/timer-gp.h> #include <plat/usb.h> #include <plat/mmc.h> #include "hsmmc.h" static void __init omap4_panda_init_irq(void) { omap2_init_common_hw(NULL, NULL); gic_init_irq(); omap_gpio_init(); } static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_UTMI, .mode = MUSB_PERIPHERAL, .power = 100, }; static struct omap2_hsmmc_info mmc[] = { { .mmc = 1, .wires = 8, .gpio_wp = -EINVAL, }, {} /* Terminator */ }; static struct regulator_consumer_supply omap4_panda_vmmc_supply[] = { { .supply = "vmmc", .dev_name = "mmci-omap-hs.0", }, { .supply = "vmmc", .dev_name = "mmci-omap-hs.1", }, }; static int omap4_twl6030_hsmmc_late_init(struct device *dev) { int ret = 0; struct platform_device *pdev = container_of(dev, struct platform_device, dev); struct omap_mmc_platform_data *pdata = dev->platform_data; /* Setting MMC1 Card detect Irq */ if (pdev->id == 0) pdata->slots[0].card_detect_irq = TWL6030_IRQ_BASE + MMCDETECT_INTR_OFFSET; return ret; } static __init void omap4_twl6030_hsmmc_set_late_init(struct device *dev) { struct omap_mmc_platform_data *pdata = dev->platform_data; pdata->init = omap4_twl6030_hsmmc_late_init; } static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) { struct omap2_hsmmc_info *c; omap2_hsmmc_init(controllers); for (c = controllers; c->mmc; c++) omap4_twl6030_hsmmc_set_late_init(c->dev); return 0; } static struct regulator_init_data omap4_panda_vaux1 = { .constraints = { .min_uV = 1000000, .max_uV = 3000000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vaux2 = { .constraints = { .min_uV = 1200000, .max_uV = 2800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vaux3 = { .constraints = { .min_uV = 1000000, .max_uV = 3000000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; /* VMMC1 for MMC1 card */ static struct regulator_init_data omap4_panda_vmmc = { .constraints = { .min_uV = 1200000, .max_uV = 3000000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, .num_consumer_supplies = 2, .consumer_supplies = omap4_panda_vmmc_supply, }; static struct regulator_init_data omap4_panda_vpp = { .constraints = { .min_uV = 1800000, .max_uV = 2500000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vusim = { .constraints = { .min_uV = 1200000, .max_uV = 2900000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vana = { .constraints = { .min_uV = 2100000, .max_uV = 2100000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vcxio = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vdac = { .constraints = { .min_uV = 1800000, .max_uV = 1800000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct regulator_init_data omap4_panda_vusb = { .constraints = { .min_uV = 3300000, .max_uV = 3300000, .apply_uV = true, .valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }, }; static struct twl4030_platform_data omap4_panda_twldata = { .irq_base = TWL6030_IRQ_BASE, .irq_end = TWL6030_IRQ_END, /* Regulators */ .vmmc = &omap4_panda_vmmc, .vpp = &omap4_panda_vpp, .vusim = &omap4_panda_vusim, .vana = &omap4_panda_vana, .vcxio = &omap4_panda_vcxio, .vdac = &omap4_panda_vdac, .vusb = &omap4_panda_vusb, .vaux1 = &omap4_panda_vaux1, .vaux2 = &omap4_panda_vaux2, .vaux3 = &omap4_panda_vaux3, }; static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = { { I2C_BOARD_INFO("twl6030", 0x48), .flags = I2C_CLIENT_WAKE, .irq = OMAP44XX_IRQ_SYS_1N, .platform_data = &omap4_panda_twldata, }, }; static int __init omap4_panda_i2c_init(void) { /* * Phoenix Audio IC needs I2C1 to * start with 400 KHz or less */ omap_register_i2c_bus(1, 400, omap4_panda_i2c_boardinfo, ARRAY_SIZE(omap4_panda_i2c_boardinfo)); omap_register_i2c_bus(2, 400, NULL, 0); omap_register_i2c_bus(3, 400, NULL, 0); omap_register_i2c_bus(4, 400, NULL, 0); return 0; } static void __init omap4_panda_init(void) { int status; omap4_panda_i2c_init(); omap_serial_init(); omap4_twl6030_hsmmc_init(mmc); /* OMAP4 Panda uses internal transceiver so register nop transceiver */ usb_nop_xceiv_register(); if (!cpu_is_omap44xx()) usb_musb_init(&musb_board_data); } static void __init omap4_panda_map_io(void) { omap2_set_globals_443x(); omap44xx_map_common_io(); } MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board") /* Maintainer: David Anders - Texas Instruments Inc */ .phys_io = 0x48000000, .io_pg_offst = ((0xfa000000) >> 18) & 0xfffc, .boot_params = 0x80000100, .map_io = omap4_panda_map_io, .init_irq = omap4_panda_init_irq, .init_machine = omap4_panda_init, .timer = &omap_timer, MACHINE_END
gpl-2.0
wulsic/Hyper_CM11
drivers/char/broadcom/idle_profiler/idle_profiler.c
41
21725
/***************************************************************************** * Copyright 2003 - 2008 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ #include <linux/version.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/broadcom/knllog.h> #include <linux/module.h> #include <linux/threads.h> #include <linux/percpu.h> #include <linux/proc_fs.h> #include <linux/sysctl.h> #include <mach/profile_timer.h> DEFINE_PER_CPU(spinlock_t, idle_lock); DEFINE_PER_CPU(u32, idle_count); EXPORT_PER_CPU_SYMBOL_GPL(idle_count); #define DEFAULT_UPDATE_INTERVAL_MSEC 500 #define HISTORY_SIZE 10 typedef struct { unsigned int minCount; unsigned int maxCount; unsigned int avgCount; unsigned int currCount; unsigned int countHistory[HISTORY_SIZE]; } ICP_STATS; typedef struct { /* CPU monitor on of off. Initialized at startup, read-only by kernel * and write/read by user via proc entry */ unsigned int enable; /* Forced reset of stats, Intiailized at startup, read-only by kernel * and write/read by user via proc entry */ unsigned int reset; /* Window size over which measurement is made. Initialized at startup, * readonly by kernel, write/read by user via proc entry */ unsigned int updateIntervalMsec; /* Weightage of current value to the average (in %). Initialized at * startup, readonly by kernel, write/read by user via proc entry */ unsigned int alphaFactor; /* Resets the stats after this interval expires . Initialized at * startup, readonly by kernel, write/read by user via proc entry */ unsigned int resetInterval; /* local timer to reset the stats, read/write by kernel, * read only by user via proc entry */ unsigned int resetTimer; /* read/write by lernel, read only by user via proc entry */ unsigned int iterations; /* Read/write by kernel, read only by user via proc entry */ ICP_STATS stats; } ICP_STATE; typedef struct { /* Load test on of off. Initialized at startup, read-only by kernel * and write/read by user via proc entry */ unsigned int enable; /* Period of when the load test is run, in mSec */ unsigned int loadPeriod; /* Load percentage (in %). */ unsigned int loadPercentage; /* Test load using thread context. */ unsigned int useThread; /* Priority of load testing thread */ unsigned int threadPriority; /* Number of time slices to spread the load across */ unsigned int timeSlices; /* Flag indicating whether kernel timer used for CPU loading has * started */ unsigned int timerStarted; /* Load testing thread controls */ int threadPid; struct completion threadLoadWaitLock; struct completion threadCompletion; struct timer_list timer; int load; int slice; } ICP_LOAD_TEST; typedef struct { u32 last_smtclk; u32 last_idle_count; } idle_handle_t; typedef struct { idle_handle_t idle_handle; u32 idle; u32 total; } ICP_DATA; typedef struct { int cpu_enum; struct task_struct *task; ICP_STATE state; ICP_STATS stats; ICP_LOAD_TEST loadtest; ICP_DATA data; } ICP_OBJ; static ICP_OBJ obj[NR_CPUS]; /* sysctl */ static int hostIcpThread(void *data); static void init_idle_profile(idle_handle_t *handle); static int get_idle_profile(idle_handle_t *handle, u32 *idle, u32 *total); static struct ctl_table_header *gSysCtlHeader; static int proc_do_hosticp_intvec_clear0(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_do_hosticp_intvec_clear1(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_do_hosticp_loadtest_intvec_enable0(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_do_hosticp_loadtest_intvec_enable1(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #define DEFINE_SYSCTL_ICPSTATS(id) { \ { \ .procname = "minCpu", \ .data = &obj[id].stats.minCount, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "maxCpu", \ .data = &obj[id].stats.maxCount, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "avgCpu", \ .data = &obj[id].stats.avgCount, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "currCpu", \ .data = &obj[id].stats.currCount, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "history", \ .data = &obj[id].stats.countHistory, \ .maxlen = HISTORY_SIZE * sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "summary", \ .data = &obj[id].stats, \ .maxlen = sizeof( ICP_STATS ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ {} \ } static struct ctl_table gSysCtlIcpStats0[] = DEFINE_SYSCTL_ICPSTATS(0); static struct ctl_table gSysCtlIcpStats1[] = DEFINE_SYSCTL_ICPSTATS(1); #define DEFINE_SYSCTL_ICPLOADTEST(id) { \ { \ .procname = "enable", \ .data = &obj[id].loadtest.enable, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_do_hosticp_loadtest_intvec_enable##id \ }, \ { \ .procname = "loadPeriod", \ .data = &obj[id].loadtest.loadPeriod, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "loadPercentage", \ .data = &obj[id].loadtest.loadPercentage, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "useThread", \ .data = &obj[id].loadtest.useThread, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "threadPriority", \ .data = &obj[id].loadtest.threadPriority, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "timeSlices", \ .data = &obj[id].loadtest.timeSlices, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ {} \ } static struct ctl_table gSysCtlIcpLoadTest0[] = DEFINE_SYSCTL_ICPLOADTEST(0); static struct ctl_table gSysCtlIcpLoadTest1[] = DEFINE_SYSCTL_ICPLOADTEST(1); #define DEFINE_SYSCTL_ICP(id) { \ { \ .procname = "enable", \ .data = &obj[id].state.enable, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "reset", \ .data = &obj[id].state.reset, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_do_hosticp_intvec_clear##id \ }, \ { \ .procname = "alpha-factor", \ .data = &obj[id].state.alphaFactor, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "resetInterval", \ .data = &obj[id].state.resetInterval, \ .maxlen = sizeof( int ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "stats", \ .child = gSysCtlIcpStats##id, \ .mode = 0555, \ }, \ { \ .procname = "state", \ .data = &obj[id].state, \ .maxlen = sizeof( ICP_STATE ), \ .mode = 0644, \ .proc_handler = &proc_dointvec \ }, \ { \ .procname = "loadTest", \ .child = gSysCtlIcpLoadTest##id, \ .mode = 0555, \ }, \ {} \ } static struct ctl_table gSysCtlIcp0[] = DEFINE_SYSCTL_ICP(0); static struct ctl_table gSysCtlIcp1[] = DEFINE_SYSCTL_ICP(1); static ctl_table gSysCtl[] = { { .procname = "hostUsage0", .mode = 0555, .child = gSysCtlIcp0}, { .procname = "hostUsage1", .mode = 0555, .child = gSysCtlIcp1}, {} }; /* Actually create (and remove) the /proc file(s). */ static void icp_create_proc(void) { create_proc_entry("hostUsage0", 0, NULL); if (num_online_cpus() != 1) create_proc_entry("hostUsage1", 0, NULL); } static void icp_remove_proc(void) { /* no problem if it was not registered */ remove_proc_entry("hostUsage0", NULL); if (num_online_cpus() != 1) remove_proc_entry("hostUsage1", NULL); } /* If we are writing the clear field, we reset the stats and start logging */ static int intvec_clear(int cpu, ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int rc = 0; if (!table || !table->data) return -EINVAL; if (write) { ICP_STATS *statsp = &obj[cpu].stats; ICP_STATE *statep = &obj[cpu].state; memset(statsp, 0, sizeof(*statsp)); memset(statep, 0, sizeof(*statep)); statsp->minCount = 1000; statep->alphaFactor = (1 << 4); /* 1/16 in Q8 number */ } else { /* No special processing for read. */ rc = proc_dointvec(table, write, buffer, lenp, ppos); } return rc; } /* If we are writing the clear field, we reset the stats and start logging */ static int proc_do_hosticp_intvec_clear0(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return intvec_clear(0, table, write, buffer, lenp, ppos); } /* If we are writing the clear field, we reset the stats and start logging */ static int proc_do_hosticp_intvec_clear1(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return intvec_clear(1, table, write, buffer, lenp, ppos); } #define MAX_LOAD_PERIOD 1000 #define MAX_LOAD_PERCENTAGE 98 #define MAX_TIME_SLICES 50 static void ipc_load(int cpu) { ICP_LOAD_TEST *lt = &obj[cpu].loadtest; int load_this_slice; int loops_this_slice; /* * The test load is implemented in fixed length udelay loops. * The target load percentage is specifed by loadPercentage, * and can be spread across multiple time slots in the * following manner: * * 1 time slot, 60% load: * * | 60% | 60% | 60%| ... * * 5 time slots, 60% load: * * | 98% | 98% | 98% | 6% | 0% | 98% | 98% | 98% | 6% | 0%| ... * * The maximum load of 98% per slot is enforced to prevent system * deadlocks. * */ if (lt->slice == 0) { lt->slice = lt->timeSlices; lt->load = lt->loadPercentage << 4; } load_this_slice = (MAX_LOAD_PERCENTAGE << 4) / lt->timeSlices; if (lt->load < load_this_slice) load_this_slice = lt->load; lt->load -= load_this_slice; load_this_slice = (load_this_slice * lt->timeSlices) >> 4; /* * Convert load percentage for this slot into number of 10uSec loops, * which is conveniently just multiplying the load percentage to the * loop period in mSec. */ loops_this_slice = (load_this_slice * lt->loadPeriod); /*KNLLOG("loops_this_slice=%u load_this_slice=%u lt->loadPeriod=%u * lt->timeSlices=%u lt->loadPercentage=%u lt->load=%u", * loops_this_slice, load_this_slice, lt->loadPeriod, lt->timeSlices, * lt->loadPercentage, lt->load); */ while (loops_this_slice) { udelay(10); loops_this_slice--; } lt->slice--; } static int ipc_load_test_thread(void *data) { int cpu = *((int *)data); ICP_LOAD_TEST *lt = &obj[cpu].loadtest; char strg[20]; if (cpu > num_online_cpus()) { printk(KERN_ERR "%s: bad cpu number 0x%x\n", __func__, cpu); return -1; } sprintf(strg, "loadTest/%d", cpu); daemonize(strg); if (lt->threadPriority > 0) { struct sched_param param; param.sched_priority = (lt->threadPriority < MAX_RT_PRIO) ? lt->threadPriority : (MAX_RT_PRIO - 1); sched_setscheduler(current, SCHED_FIFO, &param); } allow_signal(SIGKILL); allow_signal(SIGTERM); /* Run until signal received */ while (1) { if (wait_for_completion_interruptible(&lt->threadLoadWaitLock) == 0) { ipc_load(cpu); } else break; } complete_and_exit(&lt->threadCompletion, 0); } static void ipc_load_timer_func(ulong data) { int cpu = (int)data; ICP_LOAD_TEST *lt = &obj[cpu].loadtest; struct timer_list *timer; timer = (struct timer_list *)&lt->timer; if (lt->timerStarted) { if (lt->loadPeriod > MAX_LOAD_PERIOD) lt->loadPeriod = MAX_LOAD_PERIOD; timer->expires += msecs_to_jiffies(lt->loadPeriod); add_timer(timer); if (lt->timeSlices < 1) lt->timeSlices = 1; if (lt->timeSlices > MAX_TIME_SLICES) lt->timeSlices = MAX_TIME_SLICES; if (lt->loadPercentage > MAX_LOAD_PERCENTAGE) lt->loadPercentage = MAX_LOAD_PERCENTAGE; if (lt->useThread) complete(&lt->threadLoadWaitLock); else ipc_load(cpu); } } /* If we are writing the enable field, we start/stop the kernel timer */ static int loadtest_intvec_enable(int cpu, ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int rc; ICP_OBJ *o = &obj[cpu]; ICP_LOAD_TEST *lt = &o->loadtest; init_timer(&lt->timer); lt->timer.function = ipc_load_timer_func; lt->timer.expires = 0; lt->timer.data = (ulong) cpu; if (!table || !table->data) return -EINVAL; if (write) { /* use generic int handler to get input value */ rc = proc_dointvec(table, write, buffer, lenp, ppos); if (rc < 0) return rc; if (!lt->timerStarted && lt->enable) { if (lt->useThread) { cpumask_t cpu_mask; init_completion(&lt->threadLoadWaitLock); init_completion(&lt->threadCompletion); cpumask_set_cpu(cpu, &cpu_mask); lt->threadPid = kernel_thread(ipc_load_test_thread, &o->cpu_enum, 0); sched_setaffinity(lt->threadPid, &cpu_mask); cpumask_clear_cpu(cpu, &cpu_mask); } if (lt->loadPeriod > MAX_LOAD_PERIOD) lt->loadPeriod = MAX_LOAD_PERIOD; lt->timer.expires = jiffies + msecs_to_jiffies(lt->loadPeriod); add_timer(&lt->timer); lt->timerStarted = 1; } else if (lt->timerStarted && !lt->enable) { lt->timerStarted = 0; /* Kill load testing thread */ if (lt->useThread) { if (lt->threadPid >= 0) { kill_proc_info(SIGTERM, SEND_SIG_PRIV, lt->threadPid); wait_for_completion (&lt->threadCompletion); } lt->threadPid = -1; } } } else { /* nothing special for read */ return proc_dointvec(table, write, buffer, lenp, ppos); } return rc; } /* If we are writing the enable field, we start/stop the kernel timer */ static int proc_do_hosticp_loadtest_intvec_enable0(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return loadtest_intvec_enable(0, table, write, buffer, lenp, ppos); } /* If we are writing the enable field, we start/stop the kernel timer */ static int proc_do_hosticp_loadtest_intvec_enable1(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return loadtest_intvec_enable(1, table, write, buffer, lenp, ppos); } /* * Initialize by setting up the sysctl and proc/knllog entries, allocating * default storage and resetting the variables. */ static int __init host_cpu_usage_init(void) { int i; gSysCtlHeader = register_sysctl_table(gSysCtl); icp_create_proc(); memset(&obj, 0, sizeof(obj)); /* zero all */ for (i = 0; i < num_online_cpus(); i++) { ICP_OBJ *o = &obj[i]; ICP_LOAD_TEST *lt = &o->loadtest; ICP_STATS *statsp = &o->stats; ICP_STATE *statep = &o->state; ICP_DATA *datap = &o->data; cpumask_t cpu_mask; spin_lock_init(&per_cpu(idle_lock, i)); o->cpu_enum = i; /* set affinity of each thread to force it to run on * separate CPU */ statsp->minCount = 1000; statep->enable = 1; statep->updateIntervalMsec = DEFAULT_UPDATE_INTERVAL_MSEC; statep->alphaFactor = (1 << 4); /* 1/16 in Q8 number */ /* Fixme move to loadtest thread startup? */ lt->enable = 0; lt->loadPeriod = 10; lt->loadPercentage = 0; lt->useThread = 1; lt->threadPriority = 99; lt->timeSlices = 1; lt->timerStarted = 0; lt->threadPid = -1; init_idle_profile(&datap->idle_handle); cpumask_set_cpu(i, &cpu_mask); /* Launch a kernel thread */ if ((o->task == NULL) || IS_ERR(o->task)) { o->task = kthread_run(hostIcpThread, &o->cpu_enum, "hosticp/%d", i); if (IS_ERR(o->task)) { printk(KERN_ERR "Init: failed to start host ICP thread: %ld\n", PTR_ERR(o->task)); return -1; } } sched_setaffinity(o->task->pid, &cpu_mask); cpumask_clear_cpu(i, &cpu_mask); printk(KERN_INFO "called host_cpu_usage_init for CPU%d\n", i); } return 0; } subsys_initcall(host_cpu_usage_init); /* Exit and cleanup (probably not done) */ int __exit host_cpu_usage_exit(void) { int i = 0; if (gSysCtlHeader != NULL) unregister_sysctl_table(gSysCtlHeader); icp_remove_proc(); for (i = 0; i < num_online_cpus(); i++) kthread_stop(obj[i].task); return 0; } /*subsys_exitcall(host_cpu_usage_exit); */ int hostIcpThread(void *data) { int cpu = *((int *)data); ICP_OBJ *o = &obj[cpu]; ICP_STATS *statsp = &o->stats; ICP_STATE *statep = &o->state; ICP_DATA *datap = &o->data; printk(KERN_INFO "*************Starting host ICP thread for CPU%d" "**************\n", cpu); while (1) { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(((HZ * statep->updateIntervalMsec) / 1000) + 1); if (!(statep->enable)) continue; { unsigned int temp[HISTORY_SIZE - 1]; memcpy(&temp[0], &statsp->countHistory[0], HISTORY_SIZE * sizeof(unsigned int)); memcpy(&statsp->countHistory[1], &temp[0], (HISTORY_SIZE - 1) * sizeof(unsigned int)); statsp->countHistory[0] = statsp->currCount; } get_idle_profile(&datap->idle_handle, &datap->idle, &datap->total); if (datap->idle > datap->total) { printk(KERN_ERR "%s: idle=0x%x > total=0x%x\n", __func__, datap->idle, datap->total); datap->idle = datap->total; } if (datap->total < (0xFFFFFFFF / 1000)) { statsp->currCount = (1000 * datap->idle) / (datap->total); } else { statsp->currCount = (datap->idle) / (datap->total / 1000); } if (statsp->minCount > statsp->currCount) statsp->minCount = statsp->currCount; if (statsp->maxCount < statsp->currCount) statsp->maxCount = statsp->currCount; statsp->avgCount = ((statsp->currCount * statep->alphaFactor) + (statsp->avgCount * ((1 << 8) - statep->alphaFactor))) >> 8; statsp->currCount = 1000 - statsp->currCount; statsp->minCount = 1000 - statsp->maxCount; statsp->maxCount = 1000 - statsp->minCount; statsp->avgCount = 1000 - statsp->avgCount; if (statsp->minCount < 0) statsp->minCount = 0; if (statsp->maxCount > 1000) statsp->maxCount = 1000; statep->iterations++; statep->resetTimer += statep->updateIntervalMsec; if ((statep->resetTimer > statep->resetInterval) && (statep->resetInterval)) { memset(&statep->stats, 0, sizeof(ICP_STATS)); statsp->minCount = -1; statep->resetTimer = 0; } } return 0; } /* * init_idle_profile - initialize profiling handle * * parameters: * handle [in/out] - pointer to the profiling handle */ static void init_idle_profile(idle_handle_t *handle) { unsigned long flags; spinlock_t *lockp = &get_cpu_var(idle_lock); spin_lock_irqsave(lockp, flags); handle->last_smtclk = timer_get_tick_count(); handle->last_idle_count = idle_count; spin_unlock_irqrestore(lockp, flags); put_cpu_var(idle_lock); } /* * get_idle_profile - get the idle profiling results for the provided handle * * parameters: * handle [in] - pointer to the profiling handle, must be previously initialized * idle [out] - returns the number of idle cycles since last init or get call * total [out] - returns the number of total cycles since last init or get call * * return: * The number of cycles per second * * note: * To prevent overflowing the cycle counters, the get call must be made no * later than (2^32 / ticks_per_second) seconds from the last init or get call. * For 1024 ticks per second, the time is 4,194,304 seconds = 48.54 days; * For 812500 ticks per second, the time is 5286 seconds = 1 hour 28 minutes. */ static int get_idle_profile(idle_handle_t *handle, u32 *idle, u32 *total) { unsigned long flags; spinlock_t *lockp = &get_cpu_var(idle_lock); u32 now; u32 cur_idle_count; spin_lock_irqsave(lockp, flags); now = timer_get_tick_count(); cur_idle_count = get_cpu_var(idle_count); put_cpu_var(idle_count); *idle = cur_idle_count - handle->last_idle_count; *total = now - handle->last_smtclk; handle->last_idle_count = cur_idle_count; handle->last_smtclk = now; spin_unlock_irqrestore(lockp, flags); put_cpu_var(idle_lock); return timer_get_tick_rate(); }
gpl-2.0
alcobar/asuswrt-merlin
release/src-rt-7.14.114.x/src/linux/linux-2.6.36/drivers/pci/hotplug/rpaphp_slot.c
41
3979
/* * RPA Virtual I/O device functions * Copyright (C) 2004 Linda Xie <lxie@us.ibm.com> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Send feedback to <lxie@us.ibm.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kobject.h> #include <linux/sysfs.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/slab.h> #include <asm/rtas.h> #include "rpaphp.h" /* free up the memory used by a slot */ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = (struct slot *) hotplug_slot->private; dealloc_slot_struct(slot); } void dealloc_slot_struct(struct slot *slot) { kfree(slot->hotplug_slot->info); kfree(slot->name); kfree(slot->hotplug_slot); kfree(slot); } struct slot *alloc_slot_struct(struct device_node *dn, int drc_index, char *drc_name, int power_domain) { struct slot *slot; slot = kzalloc(sizeof(struct slot), GFP_KERNEL); if (!slot) goto error_nomem; slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL); if (!slot->hotplug_slot) goto error_slot; slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL); if (!slot->hotplug_slot->info) goto error_hpslot; slot->name = kstrdup(drc_name, GFP_KERNEL); if (!slot->name) goto error_info; slot->dn = dn; slot->index = drc_index; slot->power_domain = power_domain; slot->hotplug_slot->private = slot; slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops; slot->hotplug_slot->release = &rpaphp_release_slot; return (slot); error_info: kfree(slot->hotplug_slot->info); error_hpslot: kfree(slot->hotplug_slot); error_slot: kfree(slot); error_nomem: return NULL; } static int is_registered(struct slot *slot) { struct slot *tmp_slot; list_for_each_entry(tmp_slot, &rpaphp_slot_head, rpaphp_slot_list) { if (!strcmp(tmp_slot->name, slot->name)) return 1; } return 0; } int rpaphp_deregister_slot(struct slot *slot) { int retval = 0; struct hotplug_slot *php_slot = slot->hotplug_slot; dbg("%s - Entry: deregistering slot=%s\n", __func__, slot->name); list_del(&slot->rpaphp_slot_list); retval = pci_hp_deregister(php_slot); if (retval) err("Problem unregistering a slot %s\n", slot->name); dbg("%s - Exit: rc[%d]\n", __func__, retval); return retval; } EXPORT_SYMBOL_GPL(rpaphp_deregister_slot); int rpaphp_register_slot(struct slot *slot) { struct hotplug_slot *php_slot = slot->hotplug_slot; int retval; int slotno; dbg("%s registering slot:path[%s] index[%x], name[%s] pdomain[%x] type[%d]\n", __func__, slot->dn->full_name, slot->index, slot->name, slot->power_domain, slot->type); /* should not try to register the same slot twice */ if (is_registered(slot)) { err("rpaphp_register_slot: slot[%s] is already registered\n", slot->name); return -EAGAIN; } if (slot->dn->child) slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); else slotno = -1; retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name); if (retval) { err("pci_hp_register failed with error %d\n", retval); return retval; } /* add slot to our internal list */ list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); info("Slot [%s] registered\n", slot->name); return 0; }
gpl-2.0
AndreyPopovNew/asuswrt-merlin-rt-n
release/src-rt-7.x.main/src/linux/linux-2.6.36/net/dccp/timer.c
41
7467
/* * net/dccp/timer.c * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/dccp.h> #include <linux/skbuff.h> #include "dccp.h" /* sysctl variables governing numbers of retransmission attempts */ int sysctl_dccp_request_retries __read_mostly = TCP_SYN_RETRIES; int sysctl_dccp_retries1 __read_mostly = TCP_RETR1; int sysctl_dccp_retries2 __read_mostly = TCP_RETR2; static void dccp_write_err(struct sock *sk) { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_done(sk); DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); } /* A write timeout has occurred. Process the after effects. */ static int dccp_write_timeout(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); int retry_until; if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { if (icsk->icsk_retransmits != 0) dst_negative_advice(sk); retry_until = icsk->icsk_syn_retries ? : sysctl_dccp_request_retries; } else { if (icsk->icsk_retransmits >= sysctl_dccp_retries1) { /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black hole detection. :-( It is place to make it. It is not made. I do not want to make it. It is disguisting. It does not work in any case. Let me to cite the same draft, which requires for us to implement this: "The one security concern raised by this memo is that ICMP black holes are often caused by over-zealous security administrators who block all ICMP messages. It is vitally important that those who design and deploy security systems understand the impact of strict filtering on upper-layer protocols. The safest web site in the world is worthless if most TCP implementations cannot transfer data from it. It would be far nicer to have all of the black holes fixed rather than fixing all of the TCP implementations." Golden words :-). */ dst_negative_advice(sk); } retry_until = sysctl_dccp_retries2; } if (icsk->icsk_retransmits >= retry_until) { /* Has it gone just too far? */ dccp_write_err(sk); return 1; } return 0; } /* * The DCCP retransmit timer. */ static void dccp_retransmit_timer(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); /* * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was * sent, no need to retransmit, this sock is dead. */ if (dccp_write_timeout(sk)) return; /* * We want to know the number of packets retransmitted, not the * total number of retransmissions of clones of original packets. */ if (icsk->icsk_retransmits == 0) DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); if (dccp_retransmit_skb(sk) != 0) { /* * Retransmission failed because of local congestion, * do not backoff. */ if (--icsk->icsk_retransmits == 0) icsk->icsk_retransmits = 1; inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL), DCCP_RTO_MAX); return; } icsk->icsk_backoff++; icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, DCCP_RTO_MAX); if (icsk->icsk_retransmits > sysctl_dccp_retries1) __sk_dst_reset(sk); } static void dccp_write_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); int event = 0; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later */ sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20)); goto out; } if (sk->sk_state == DCCP_CLOSED || !icsk->icsk_pending) goto out; if (time_after(icsk->icsk_timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); goto out; } event = icsk->icsk_pending; icsk->icsk_pending = 0; switch (event) { case ICSK_TIME_RETRANS: dccp_retransmit_timer(sk); break; } out: bh_unlock_sock(sk); sock_put(sk); } /* * Timer for listening sockets */ static void dccp_response_timer(struct sock *sk) { inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); } static void dccp_keepalive_timer(unsigned long data) { struct sock *sk = (struct sock *)data; /* Only process if socket is not in use. */ bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ inet_csk_reset_keepalive_timer(sk, HZ / 20); goto out; } if (sk->sk_state == DCCP_LISTEN) { dccp_response_timer(sk); goto out; } out: bh_unlock_sock(sk); sock_put(sk); } /* This is the same as tcp_delack_timer, sans prequeue & mem_reclaim stuff */ static void dccp_delack_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct inet_connection_sock *icsk = inet_csk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ icsk->icsk_ack.blocked = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN); goto out; } if (sk->sk_state == DCCP_CLOSED || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) goto out; if (time_after(icsk->icsk_ack.timeout, jiffies)) { sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); goto out; } icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; if (inet_csk_ack_scheduled(sk)) { if (!icsk->icsk_ack.pingpong) { /* Delayed ACK missed: inflate ATO. */ icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto); } else { /* Delayed ACK missed: leave pingpong mode and * deflate ATO. */ icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; } dccp_send_ack(sk); NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); } out: bh_unlock_sock(sk); sock_put(sk); } /* Transmit-delay timer: used by the CCIDs to delay actual send time */ static void dccp_write_xmit_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct dccp_sock *dp = dccp_sk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); else dccp_write_xmit(sk, 0); bh_unlock_sock(sk); sock_put(sk); } static void dccp_init_write_xmit_timer(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); setup_timer(&dp->dccps_xmit_timer, dccp_write_xmit_timer, (unsigned long)sk); } void dccp_init_xmit_timers(struct sock *sk) { dccp_init_write_xmit_timer(sk); inet_csk_init_xmit_timers(sk, &dccp_write_timer, &dccp_delack_timer, &dccp_keepalive_timer); } static ktime_t dccp_timestamp_seed; /** * dccp_timestamp - 10s of microseconds time source * Returns the number of 10s of microseconds since loading DCCP. This is native * DCCP time difference format (RFC 4340, sec. 13). * Please note: This will wrap around about circa every 11.9 hours. */ u32 dccp_timestamp(void) { s64 delta = ktime_us_delta(ktime_get_real(), dccp_timestamp_seed); do_div(delta, 10); return delta; } EXPORT_SYMBOL_GPL(dccp_timestamp); void __init dccp_timestamping_init(void) { dccp_timestamp_seed = ktime_get_real(); }
gpl-2.0
wkritzinger/asuswrt-merlin
release/src-rt-7.x.main/src/linux/linux-2.6.36/fs/ncpfs/inode.c
41
26893
/* * inode.c * * Copyright (C) 1995, 1996 by Volker Lendecke * Modified for big endian by J.F. Chadima and David S. Miller * Modified 1997 Peter Waltenberg, Bill Hawes, David Woodhouse for 2.1 dcache * Modified 1998 Wolfram Pienkoss for NLS * Modified 2000 Ben Harris, University of Cambridge for NFS NS meta-info * */ #include <linux/module.h> #include <asm/system.h> #include <asm/uaccess.h> #include <asm/byteorder.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/smp_lock.h> #include <linux/vfs.h> #include <linux/mount.h> #include <linux/seq_file.h> #include <linux/ncp_fs.h> #include <net/sock.h> #include "ncplib_kernel.h" #include "getopt.h" #define NCP_DEFAULT_FILE_MODE 0600 #define NCP_DEFAULT_DIR_MODE 0700 #define NCP_DEFAULT_TIME_OUT 10 #define NCP_DEFAULT_RETRY_COUNT 20 static void ncp_evict_inode(struct inode *); static void ncp_put_super(struct super_block *); static int ncp_statfs(struct dentry *, struct kstatfs *); static int ncp_show_options(struct seq_file *, struct vfsmount *); static struct kmem_cache * ncp_inode_cachep; static struct inode *ncp_alloc_inode(struct super_block *sb) { struct ncp_inode_info *ei; ei = (struct ncp_inode_info *)kmem_cache_alloc(ncp_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } static void ncp_destroy_inode(struct inode *inode) { kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode)); } static void init_once(void *foo) { struct ncp_inode_info *ei = (struct ncp_inode_info *) foo; mutex_init(&ei->open_mutex); inode_init_once(&ei->vfs_inode); } static int init_inodecache(void) { ncp_inode_cachep = kmem_cache_create("ncp_inode_cache", sizeof(struct ncp_inode_info), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD), init_once); if (ncp_inode_cachep == NULL) return -ENOMEM; return 0; } static void destroy_inodecache(void) { kmem_cache_destroy(ncp_inode_cachep); } static int ncp_remount(struct super_block *sb, int *flags, char* data) { *flags |= MS_NODIRATIME; return 0; } static const struct super_operations ncp_sops = { .alloc_inode = ncp_alloc_inode, .destroy_inode = ncp_destroy_inode, .drop_inode = generic_delete_inode, .evict_inode = ncp_evict_inode, .put_super = ncp_put_super, .statfs = ncp_statfs, .remount_fs = ncp_remount, .show_options = ncp_show_options, }; /* * Fill in the ncpfs-specific information in the inode. */ static void ncp_update_dirent(struct inode *inode, struct ncp_entry_info *nwinfo) { NCP_FINFO(inode)->DosDirNum = nwinfo->i.DosDirNum; NCP_FINFO(inode)->dirEntNum = nwinfo->i.dirEntNum; NCP_FINFO(inode)->volNumber = nwinfo->volume; } void ncp_update_inode(struct inode *inode, struct ncp_entry_info *nwinfo) { ncp_update_dirent(inode, nwinfo); NCP_FINFO(inode)->nwattr = nwinfo->i.attributes; NCP_FINFO(inode)->access = nwinfo->access; memcpy(NCP_FINFO(inode)->file_handle, nwinfo->file_handle, sizeof(nwinfo->file_handle)); DPRINTK("ncp_update_inode: updated %s, volnum=%d, dirent=%u\n", nwinfo->i.entryName, NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum); } static void ncp_update_dates(struct inode *inode, struct nw_info_struct *nwi) { /* NFS namespace mode overrides others if it's set. */ DPRINTK(KERN_DEBUG "ncp_update_dates_and_mode: (%s) nfs.mode=0%o\n", nwi->entryName, nwi->nfs.mode); if (nwi->nfs.mode) { inode->i_mode = nwi->nfs.mode; } inode->i_blocks = (inode->i_size + NCP_BLOCK_SIZE - 1) >> NCP_BLOCK_SHIFT; inode->i_mtime.tv_sec = ncp_date_dos2unix(nwi->modifyTime, nwi->modifyDate); inode->i_ctime.tv_sec = ncp_date_dos2unix(nwi->creationTime, nwi->creationDate); inode->i_atime.tv_sec = ncp_date_dos2unix(0, nwi->lastAccessDate); inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_nsec = 0; inode->i_ctime.tv_nsec = 0; } static void ncp_update_attrs(struct inode *inode, struct ncp_entry_info *nwinfo) { struct nw_info_struct *nwi = &nwinfo->i; struct ncp_server *server = NCP_SERVER(inode); if (nwi->attributes & aDIR) { inode->i_mode = server->m.dir_mode; /* for directories dataStreamSize seems to be some Object ID ??? */ inode->i_size = NCP_BLOCK_SIZE; } else { inode->i_mode = server->m.file_mode; inode->i_size = le32_to_cpu(nwi->dataStreamSize); #ifdef CONFIG_NCPFS_EXTRAS if ((server->m.flags & (NCP_MOUNT_EXTRAS|NCP_MOUNT_SYMLINKS)) && (nwi->attributes & aSHARED)) { switch (nwi->attributes & (aHIDDEN|aSYSTEM)) { case aHIDDEN: if (server->m.flags & NCP_MOUNT_SYMLINKS) { if (/* (inode->i_size >= NCP_MIN_SYMLINK_SIZE) && */ (inode->i_size <= NCP_MAX_SYMLINK_SIZE)) { inode->i_mode = (inode->i_mode & ~S_IFMT) | S_IFLNK; NCP_FINFO(inode)->flags |= NCPI_KLUDGE_SYMLINK; break; } } /* FALLTHROUGH */ case 0: if (server->m.flags & NCP_MOUNT_EXTRAS) inode->i_mode |= S_IRUGO; break; case aSYSTEM: if (server->m.flags & NCP_MOUNT_EXTRAS) inode->i_mode |= (inode->i_mode >> 2) & S_IXUGO; break; /* case aSYSTEM|aHIDDEN: */ default: /* reserved combination */ break; } } #endif } if (nwi->attributes & aRONLY) inode->i_mode &= ~S_IWUGO; } void ncp_update_inode2(struct inode* inode, struct ncp_entry_info *nwinfo) { NCP_FINFO(inode)->flags = 0; if (!atomic_read(&NCP_FINFO(inode)->opened)) { NCP_FINFO(inode)->nwattr = nwinfo->i.attributes; ncp_update_attrs(inode, nwinfo); } ncp_update_dates(inode, &nwinfo->i); ncp_update_dirent(inode, nwinfo); } /* * Fill in the inode based on the ncp_entry_info structure. */ static void ncp_set_attr(struct inode *inode, struct ncp_entry_info *nwinfo) { struct ncp_server *server = NCP_SERVER(inode); NCP_FINFO(inode)->flags = 0; ncp_update_attrs(inode, nwinfo); DDPRINTK("ncp_read_inode: inode->i_mode = %u\n", inode->i_mode); inode->i_nlink = 1; inode->i_uid = server->m.uid; inode->i_gid = server->m.gid; ncp_update_dates(inode, &nwinfo->i); ncp_update_inode(inode, nwinfo); } #if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS) static const struct inode_operations ncp_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = ncp_notify_change, }; #endif /* * Get a new inode. */ struct inode * ncp_iget(struct super_block *sb, struct ncp_entry_info *info) { struct inode *inode; if (info == NULL) { printk(KERN_ERR "ncp_iget: info is NULL\n"); return NULL; } inode = new_inode(sb); if (inode) { atomic_set(&NCP_FINFO(inode)->opened, info->opened); inode->i_ino = info->ino; ncp_set_attr(inode, info); if (S_ISREG(inode->i_mode)) { inode->i_op = &ncp_file_inode_operations; inode->i_fop = &ncp_file_operations; } else if (S_ISDIR(inode->i_mode)) { inode->i_op = &ncp_dir_inode_operations; inode->i_fop = &ncp_dir_operations; #ifdef CONFIG_NCPFS_NFS_NS } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) || S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { init_special_inode(inode, inode->i_mode, new_decode_dev(info->i.nfs.rdev)); #endif #if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS) } else if (S_ISLNK(inode->i_mode)) { inode->i_op = &ncp_symlink_inode_operations; inode->i_data.a_ops = &ncp_symlink_aops; #endif } else { make_bad_inode(inode); } insert_inode_hash(inode); } else printk(KERN_ERR "ncp_iget: iget failed!\n"); return inode; } static void ncp_evict_inode(struct inode *inode) { truncate_inode_pages(&inode->i_data, 0); end_writeback(inode); if (S_ISDIR(inode->i_mode)) { DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino); } if (ncp_make_closed(inode) != 0) { /* We can't do anything but complain. */ printk(KERN_ERR "ncp_evict_inode: could not close\n"); } } static void ncp_stop_tasks(struct ncp_server *server) { struct sock* sk = server->ncp_sock->sk; sk->sk_error_report = server->error_report; sk->sk_data_ready = server->data_ready; sk->sk_write_space = server->write_space; del_timer_sync(&server->timeout_tm); flush_scheduled_work(); } static int ncp_show_options(struct seq_file *seq, struct vfsmount *mnt) { struct ncp_server *server = NCP_SBP(mnt->mnt_sb); unsigned int tmp; if (server->m.uid != 0) seq_printf(seq, ",uid=%u", server->m.uid); if (server->m.gid != 0) seq_printf(seq, ",gid=%u", server->m.gid); if (server->m.mounted_uid != 0) seq_printf(seq, ",owner=%u", server->m.mounted_uid); tmp = server->m.file_mode & S_IALLUGO; if (tmp != NCP_DEFAULT_FILE_MODE) seq_printf(seq, ",mode=0%o", tmp); tmp = server->m.dir_mode & S_IALLUGO; if (tmp != NCP_DEFAULT_DIR_MODE) seq_printf(seq, ",dirmode=0%o", tmp); if (server->m.time_out != NCP_DEFAULT_TIME_OUT * HZ / 100) { tmp = server->m.time_out * 100 / HZ; seq_printf(seq, ",timeout=%u", tmp); } if (server->m.retry_count != NCP_DEFAULT_RETRY_COUNT) seq_printf(seq, ",retry=%u", server->m.retry_count); if (server->m.flags != 0) seq_printf(seq, ",flags=%lu", server->m.flags); if (server->m.wdog_pid != NULL) seq_printf(seq, ",wdogpid=%u", pid_vnr(server->m.wdog_pid)); return 0; } static const struct ncp_option ncp_opts[] = { { "uid", OPT_INT, 'u' }, { "gid", OPT_INT, 'g' }, { "owner", OPT_INT, 'o' }, { "mode", OPT_INT, 'm' }, { "dirmode", OPT_INT, 'd' }, { "timeout", OPT_INT, 't' }, { "retry", OPT_INT, 'r' }, { "flags", OPT_INT, 'f' }, { "wdogpid", OPT_INT, 'w' }, { "ncpfd", OPT_INT, 'n' }, { "infofd", OPT_INT, 'i' }, /* v5 */ { "version", OPT_INT, 'v' }, { NULL, 0, 0 } }; static int ncp_parse_options(struct ncp_mount_data_kernel *data, char *options) { int optval; char *optarg; unsigned long optint; int version = 0; int ret; data->flags = 0; data->int_flags = 0; data->mounted_uid = 0; data->wdog_pid = NULL; data->ncp_fd = ~0; data->time_out = NCP_DEFAULT_TIME_OUT; data->retry_count = NCP_DEFAULT_RETRY_COUNT; data->uid = 0; data->gid = 0; data->file_mode = NCP_DEFAULT_FILE_MODE; data->dir_mode = NCP_DEFAULT_DIR_MODE; data->info_fd = -1; data->mounted_vol[0] = 0; while ((optval = ncp_getopt("ncpfs", &options, ncp_opts, NULL, &optarg, &optint)) != 0) { ret = optval; if (ret < 0) goto err; switch (optval) { case 'u': data->uid = optint; break; case 'g': data->gid = optint; break; case 'o': data->mounted_uid = optint; break; case 'm': data->file_mode = optint; break; case 'd': data->dir_mode = optint; break; case 't': data->time_out = optint; break; case 'r': data->retry_count = optint; break; case 'f': data->flags = optint; break; case 'w': data->wdog_pid = find_get_pid(optint); break; case 'n': data->ncp_fd = optint; break; case 'i': data->info_fd = optint; break; case 'v': ret = -ECHRNG; if (optint < NCP_MOUNT_VERSION_V4) goto err; if (optint > NCP_MOUNT_VERSION_V5) goto err; version = optint; break; } } return 0; err: put_pid(data->wdog_pid); data->wdog_pid = NULL; return ret; } static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent) { struct ncp_mount_data_kernel data; struct ncp_server *server; struct file *ncp_filp; struct inode *root_inode; struct inode *sock_inode; struct socket *sock; int error; int default_bufsize; #ifdef CONFIG_NCPFS_PACKET_SIGNING int options; #endif struct ncp_entry_info finfo; data.wdog_pid = NULL; server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL); if (!server) return -ENOMEM; sb->s_fs_info = server; error = -EFAULT; if (raw_data == NULL) goto out; switch (*(int*)raw_data) { case NCP_MOUNT_VERSION: { struct ncp_mount_data* md = (struct ncp_mount_data*)raw_data; data.flags = md->flags; data.int_flags = NCP_IMOUNT_LOGGEDIN_POSSIBLE; data.mounted_uid = md->mounted_uid; data.wdog_pid = find_get_pid(md->wdog_pid); data.ncp_fd = md->ncp_fd; data.time_out = md->time_out; data.retry_count = md->retry_count; data.uid = md->uid; data.gid = md->gid; data.file_mode = md->file_mode; data.dir_mode = md->dir_mode; data.info_fd = -1; memcpy(data.mounted_vol, md->mounted_vol, NCP_VOLNAME_LEN+1); } break; case NCP_MOUNT_VERSION_V4: { struct ncp_mount_data_v4* md = (struct ncp_mount_data_v4*)raw_data; data.flags = md->flags; data.int_flags = 0; data.mounted_uid = md->mounted_uid; data.wdog_pid = find_get_pid(md->wdog_pid); data.ncp_fd = md->ncp_fd; data.time_out = md->time_out; data.retry_count = md->retry_count; data.uid = md->uid; data.gid = md->gid; data.file_mode = md->file_mode; data.dir_mode = md->dir_mode; data.info_fd = -1; data.mounted_vol[0] = 0; } break; default: error = -ECHRNG; if (memcmp(raw_data, "vers", 4) == 0) { error = ncp_parse_options(&data, raw_data); } if (error) goto out; break; } error = -EBADF; ncp_filp = fget(data.ncp_fd); if (!ncp_filp) goto out; error = -ENOTSOCK; sock_inode = ncp_filp->f_path.dentry->d_inode; if (!S_ISSOCK(sock_inode->i_mode)) goto out_fput; sock = SOCKET_I(sock_inode); if (!sock) goto out_fput; if (sock->type == SOCK_STREAM) default_bufsize = 0xF000; else default_bufsize = 1024; sb->s_flags |= MS_NODIRATIME; /* probably even noatime */ sb->s_maxbytes = 0xFFFFFFFFU; sb->s_blocksize = 1024; /* Eh... Is this correct? */ sb->s_blocksize_bits = 10; sb->s_magic = NCP_SUPER_MAGIC; sb->s_op = &ncp_sops; sb->s_bdi = &server->bdi; server = NCP_SBP(sb); memset(server, 0, sizeof(*server)); error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY); if (error) goto out_bdi; server->ncp_filp = ncp_filp; server->ncp_sock = sock; if (data.info_fd != -1) { struct socket *info_sock; error = -EBADF; server->info_filp = fget(data.info_fd); if (!server->info_filp) goto out_fput; error = -ENOTSOCK; sock_inode = server->info_filp->f_path.dentry->d_inode; if (!S_ISSOCK(sock_inode->i_mode)) goto out_fput2; info_sock = SOCKET_I(sock_inode); if (!info_sock) goto out_fput2; error = -EBADFD; if (info_sock->type != SOCK_STREAM) goto out_fput2; server->info_sock = info_sock; } /* server->lock = 0; */ mutex_init(&server->mutex); server->packet = NULL; /* server->buffer_size = 0; */ /* server->conn_status = 0; */ /* server->root_dentry = NULL; */ /* server->root_setuped = 0; */ #ifdef CONFIG_NCPFS_PACKET_SIGNING /* server->sign_wanted = 0; */ /* server->sign_active = 0; */ #endif server->auth.auth_type = NCP_AUTH_NONE; /* server->auth.object_name_len = 0; */ /* server->auth.object_name = NULL; */ /* server->auth.object_type = 0; */ /* server->priv.len = 0; */ /* server->priv.data = NULL; */ server->m = data; /* Althought anything producing this is buggy, it happens now because of PATH_MAX changes.. */ if (server->m.time_out < 1) { server->m.time_out = 10; printk(KERN_INFO "You need to recompile your ncpfs utils..\n"); } server->m.time_out = server->m.time_out * HZ / 100; server->m.file_mode = (server->m.file_mode & S_IRWXUGO) | S_IFREG; server->m.dir_mode = (server->m.dir_mode & S_IRWXUGO) | S_IFDIR; #ifdef CONFIG_NCPFS_NLS /* load the default NLS charsets */ server->nls_vol = load_nls_default(); server->nls_io = load_nls_default(); #endif /* CONFIG_NCPFS_NLS */ server->dentry_ttl = 0; /* no caching */ INIT_LIST_HEAD(&server->tx.requests); mutex_init(&server->rcv.creq_mutex); server->tx.creq = NULL; server->rcv.creq = NULL; server->data_ready = sock->sk->sk_data_ready; server->write_space = sock->sk->sk_write_space; server->error_report = sock->sk->sk_error_report; sock->sk->sk_user_data = server; init_timer(&server->timeout_tm); #undef NCP_PACKET_SIZE #define NCP_PACKET_SIZE 131072 error = -ENOMEM; server->packet_size = NCP_PACKET_SIZE; server->packet = vmalloc(NCP_PACKET_SIZE); if (server->packet == NULL) goto out_nls; server->txbuf = vmalloc(NCP_PACKET_SIZE); if (server->txbuf == NULL) goto out_packet; server->rxbuf = vmalloc(NCP_PACKET_SIZE); if (server->rxbuf == NULL) goto out_txbuf; sock->sk->sk_data_ready = ncp_tcp_data_ready; sock->sk->sk_error_report = ncp_tcp_error_report; if (sock->type == SOCK_STREAM) { server->rcv.ptr = (unsigned char*)&server->rcv.buf; server->rcv.len = 10; server->rcv.state = 0; INIT_WORK(&server->rcv.tq, ncp_tcp_rcv_proc); INIT_WORK(&server->tx.tq, ncp_tcp_tx_proc); sock->sk->sk_write_space = ncp_tcp_write_space; } else { INIT_WORK(&server->rcv.tq, ncpdgram_rcv_proc); INIT_WORK(&server->timeout_tq, ncpdgram_timeout_proc); server->timeout_tm.data = (unsigned long)server; server->timeout_tm.function = ncpdgram_timeout_call; } ncp_lock_server(server); error = ncp_connect(server); ncp_unlock_server(server); if (error < 0) goto out_rxbuf; DPRINTK("ncp_fill_super: NCP_SBP(sb) = %x\n", (int) NCP_SBP(sb)); error = -EMSGSIZE; /* -EREMOTESIDEINCOMPATIBLE */ #ifdef CONFIG_NCPFS_PACKET_SIGNING if (ncp_negotiate_size_and_options(server, default_bufsize, NCP_DEFAULT_OPTIONS, &(server->buffer_size), &options) == 0) { if (options != NCP_DEFAULT_OPTIONS) { if (ncp_negotiate_size_and_options(server, default_bufsize, options & 2, &(server->buffer_size), &options) != 0) { goto out_disconnect; } } if (options & 2) server->sign_wanted = 1; } else #endif /* CONFIG_NCPFS_PACKET_SIGNING */ if (ncp_negotiate_buffersize(server, default_bufsize, &(server->buffer_size)) != 0) goto out_disconnect; DPRINTK("ncpfs: bufsize = %d\n", server->buffer_size); memset(&finfo, 0, sizeof(finfo)); finfo.i.attributes = aDIR; finfo.i.dataStreamSize = 0; /* ignored */ finfo.i.dirEntNum = 0; finfo.i.DosDirNum = 0; #ifdef CONFIG_NCPFS_SMALLDOS finfo.i.NSCreator = NW_NS_DOS; #endif finfo.volume = NCP_NUMBER_OF_VOLUMES; /* set dates of mountpoint to Jan 1, 1986; 00:00 */ finfo.i.creationTime = finfo.i.modifyTime = cpu_to_le16(0x0000); finfo.i.creationDate = finfo.i.modifyDate = finfo.i.lastAccessDate = cpu_to_le16(0x0C21); finfo.i.nameLen = 0; finfo.i.entryName[0] = '\0'; finfo.opened = 0; finfo.ino = 2; /* tradition */ server->name_space[finfo.volume] = NW_NS_DOS; error = -ENOMEM; root_inode = ncp_iget(sb, &finfo); if (!root_inode) goto out_disconnect; DPRINTK("ncp_fill_super: root vol=%d\n", NCP_FINFO(root_inode)->volNumber); sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto out_no_root; sb->s_root->d_op = &ncp_root_dentry_operations; return 0; out_no_root: iput(root_inode); out_disconnect: ncp_lock_server(server); ncp_disconnect(server); ncp_unlock_server(server); out_rxbuf: ncp_stop_tasks(server); vfree(server->rxbuf); out_txbuf: vfree(server->txbuf); out_packet: vfree(server->packet); out_nls: #ifdef CONFIG_NCPFS_NLS unload_nls(server->nls_io); unload_nls(server->nls_vol); #endif out_fput2: if (server->info_filp) fput(server->info_filp); out_fput: bdi_destroy(&server->bdi); out_bdi: /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>: * * The previously used put_filp(ncp_filp); was bogus, since * it doesn't perform proper unlocking. */ fput(ncp_filp); out: put_pid(data.wdog_pid); sb->s_fs_info = NULL; kfree(server); return error; } static void ncp_put_super(struct super_block *sb) { struct ncp_server *server = NCP_SBP(sb); lock_kernel(); ncp_lock_server(server); ncp_disconnect(server); ncp_unlock_server(server); ncp_stop_tasks(server); #ifdef CONFIG_NCPFS_NLS /* unload the NLS charsets */ unload_nls(server->nls_vol); unload_nls(server->nls_io); #endif /* CONFIG_NCPFS_NLS */ if (server->info_filp) fput(server->info_filp); fput(server->ncp_filp); kill_pid(server->m.wdog_pid, SIGTERM, 1); put_pid(server->m.wdog_pid); bdi_destroy(&server->bdi); kfree(server->priv.data); kfree(server->auth.object_name); vfree(server->rxbuf); vfree(server->txbuf); vfree(server->packet); sb->s_fs_info = NULL; kfree(server); unlock_kernel(); } static int ncp_statfs(struct dentry *dentry, struct kstatfs *buf) { struct dentry* d; struct inode* i; struct ncp_inode_info* ni; struct ncp_server* s; struct ncp_volume_info vi; struct super_block *sb = dentry->d_sb; int err; __u8 dh; d = sb->s_root; if (!d) { goto dflt; } i = d->d_inode; if (!i) { goto dflt; } ni = NCP_FINFO(i); if (!ni) { goto dflt; } s = NCP_SBP(sb); if (!s) { goto dflt; } if (!s->m.mounted_vol[0]) { goto dflt; } err = ncp_dirhandle_alloc(s, ni->volNumber, ni->DosDirNum, &dh); if (err) { goto dflt; } err = ncp_get_directory_info(s, dh, &vi); ncp_dirhandle_free(s, dh); if (err) { goto dflt; } buf->f_type = NCP_SUPER_MAGIC; buf->f_bsize = vi.sectors_per_block * 512; buf->f_blocks = vi.total_blocks; buf->f_bfree = vi.free_blocks; buf->f_bavail = vi.free_blocks; buf->f_files = vi.total_dir_entries; buf->f_ffree = vi.available_dir_entries; buf->f_namelen = 12; return 0; /* We cannot say how much disk space is left on a mounted NetWare Server, because free space is distributed over volumes, and the current user might have disk quotas. So free space is not that simple to determine. Our decision here is to err conservatively. */ dflt:; buf->f_type = NCP_SUPER_MAGIC; buf->f_bsize = NCP_BLOCK_SIZE; buf->f_blocks = 0; buf->f_bfree = 0; buf->f_bavail = 0; buf->f_namelen = 12; return 0; } int ncp_notify_change(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int result = 0; __le32 info_mask; struct nw_modify_dos_info info; struct ncp_server *server; result = -EIO; lock_kernel(); server = NCP_SERVER(inode); if ((!server) || !ncp_conn_valid(server)) goto out; /* ageing the dentry to force validation */ ncp_age_dentry(server, dentry); result = inode_change_ok(inode, attr); if (result < 0) goto out; result = -EPERM; if (((attr->ia_valid & ATTR_UID) && (attr->ia_uid != server->m.uid))) goto out; if (((attr->ia_valid & ATTR_GID) && (attr->ia_gid != server->m.gid))) goto out; if (((attr->ia_valid & ATTR_MODE) && (attr->ia_mode & ~(S_IFREG | S_IFDIR | S_IRWXUGO)))) goto out; info_mask = 0; memset(&info, 0, sizeof(info)); if ((attr->ia_valid & ATTR_MODE) != 0) { umode_t newmode = attr->ia_mode; info_mask |= DM_ATTRIBUTES; if (S_ISDIR(inode->i_mode)) { newmode &= server->m.dir_mode; } else { #ifdef CONFIG_NCPFS_EXTRAS if (server->m.flags & NCP_MOUNT_EXTRAS) { /* any non-default execute bit set */ if (newmode & ~server->m.file_mode & S_IXUGO) info.attributes |= aSHARED | aSYSTEM; /* read for group/world and not in default file_mode */ else if (newmode & ~server->m.file_mode & S_IRUGO) info.attributes |= aSHARED; } else #endif newmode &= server->m.file_mode; } if (newmode & S_IWUGO) info.attributes &= ~(aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT); else info.attributes |= (aRONLY|aRENAMEINHIBIT|aDELETEINHIBIT); #ifdef CONFIG_NCPFS_NFS_NS if (ncp_is_nfs_extras(server, NCP_FINFO(inode)->volNumber)) { result = ncp_modify_nfs_info(server, NCP_FINFO(inode)->volNumber, NCP_FINFO(inode)->dirEntNum, attr->ia_mode, 0); if (result != 0) goto out; info.attributes &= ~(aSHARED | aSYSTEM); { /* mark partial success */ struct iattr tmpattr; tmpattr.ia_valid = ATTR_MODE; tmpattr.ia_mode = attr->ia_mode; setattr_copy(inode, &tmpattr); mark_inode_dirty(inode); } } #endif } /* Do SIZE before attributes, otherwise mtime together with size does not work... */ if ((attr->ia_valid & ATTR_SIZE) != 0) { int written; DPRINTK("ncpfs: trying to change size to %ld\n", attr->ia_size); if ((result = ncp_make_open(inode, O_WRONLY)) < 0) { result = -EACCES; goto out; } ncp_write_kernel(NCP_SERVER(inode), NCP_FINFO(inode)->file_handle, attr->ia_size, 0, "", &written); /* According to ndir, the changes only take effect after closing the file */ ncp_inode_close(inode); result = ncp_make_closed(inode); if (result) goto out; if (attr->ia_size != i_size_read(inode)) { result = vmtruncate(inode, attr->ia_size); if (result) goto out; mark_inode_dirty(inode); } } if ((attr->ia_valid & ATTR_CTIME) != 0) { info_mask |= (DM_CREATE_TIME | DM_CREATE_DATE); ncp_date_unix2dos(attr->ia_ctime.tv_sec, &info.creationTime, &info.creationDate); } if ((attr->ia_valid & ATTR_MTIME) != 0) { info_mask |= (DM_MODIFY_TIME | DM_MODIFY_DATE); ncp_date_unix2dos(attr->ia_mtime.tv_sec, &info.modifyTime, &info.modifyDate); } if ((attr->ia_valid & ATTR_ATIME) != 0) { __le16 dummy; info_mask |= (DM_LAST_ACCESS_DATE); ncp_date_unix2dos(attr->ia_atime.tv_sec, &dummy, &info.lastAccessDate); } if (info_mask != 0) { result = ncp_modify_file_or_subdir_dos_info(NCP_SERVER(inode), inode, info_mask, &info); if (result != 0) { result = -EACCES; if (info_mask == (DM_CREATE_TIME | DM_CREATE_DATE)) { /* NetWare seems not to allow this. I do not know why. So, just tell the user everything went fine. This is a terrible hack, but I do not know how to do this correctly. */ result = 0; } else goto out; } #ifdef CONFIG_NCPFS_STRONG if ((!result) && (info_mask & DM_ATTRIBUTES)) NCP_FINFO(inode)->nwattr = info.attributes; #endif } if (result) goto out; setattr_copy(inode, attr); mark_inode_dirty(inode); out: unlock_kernel(); return result; } static int ncp_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_nodev(fs_type, flags, data, ncp_fill_super, mnt); } static struct file_system_type ncp_fs_type = { .owner = THIS_MODULE, .name = "ncpfs", .get_sb = ncp_get_sb, .kill_sb = kill_anon_super, .fs_flags = FS_BINARY_MOUNTDATA, }; static int __init init_ncp_fs(void) { int err; DPRINTK("ncpfs: init_ncp_fs called\n"); err = init_inodecache(); if (err) goto out1; err = register_filesystem(&ncp_fs_type); if (err) goto out; return 0; out: destroy_inodecache(); out1: return err; } static void __exit exit_ncp_fs(void) { DPRINTK("ncpfs: exit_ncp_fs called\n"); unregister_filesystem(&ncp_fs_type); destroy_inodecache(); } module_init(init_ncp_fs) module_exit(exit_ncp_fs) MODULE_LICENSE("GPL");
gpl-2.0
CrawX/linux-fslc
sound/soc/jz4740/jz4740-i2s.c
297
11579
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/dmaengine_pcm.h> #include <asm/mach-jz4740/dma.h> #include "jz4740-i2s.h" #define JZ_REG_AIC_CONF 0x00 #define JZ_REG_AIC_CTRL 0x04 #define JZ_REG_AIC_I2S_FMT 0x10 #define JZ_REG_AIC_FIFO_STATUS 0x14 #define JZ_REG_AIC_I2S_STATUS 0x1c #define JZ_REG_AIC_CLK_DIV 0x30 #define JZ_REG_AIC_FIFO 0x34 #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_MASK (0xf << 12) #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_MASK (0xf << 8) #define JZ_AIC_CONF_OVERFLOW_PLAY_LAST BIT(6) #define JZ_AIC_CONF_INTERNAL_CODEC BIT(5) #define JZ_AIC_CONF_I2S BIT(4) #define JZ_AIC_CONF_RESET BIT(3) #define JZ_AIC_CONF_BIT_CLK_MASTER BIT(2) #define JZ_AIC_CONF_SYNC_CLK_MASTER BIT(1) #define JZ_AIC_CONF_ENABLE BIT(0) #define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12 #define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8 #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19) #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16) #define JZ_AIC_CTRL_ENABLE_RX_DMA BIT(15) #define JZ_AIC_CTRL_ENABLE_TX_DMA BIT(14) #define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11) #define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10) #define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9) #define JZ_AIC_CTRL_FLUSH BIT(8) #define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6) #define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5) #define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4) #define JZ_AIC_CTRL_ENABLE_TFS_INT BIT(3) #define JZ_AIC_CTRL_ENABLE_LOOPBACK BIT(2) #define JZ_AIC_CTRL_ENABLE_PLAYBACK BIT(1) #define JZ_AIC_CTRL_ENABLE_CAPTURE BIT(0) #define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET 19 #define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16 #define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12) #define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4) #define JZ_AIC_I2S_FMT_MSB BIT(0) #define JZ_AIC_I2S_STATUS_BUSY BIT(2) #define JZ_AIC_CLK_DIV_MASK 0xf struct jz4740_i2s { struct resource *mem; void __iomem *base; dma_addr_t phys_base; struct clk *clk_aic; struct clk *clk_i2s; struct snd_dmaengine_dai_dma_data playback_dma_data; struct snd_dmaengine_dai_dma_data capture_dma_data; }; static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s, unsigned int reg) { return readl(i2s->base + reg); } static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s, unsigned int reg, uint32_t value) { writel(value, i2s->base + reg); } static int jz4740_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf, ctrl; if (dai->active) return 0; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); ctrl |= JZ_AIC_CTRL_FLUSH; jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); clk_prepare_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) return; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable_unprepare(i2s->clk_i2s); } static int jz4740_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t ctrl; uint32_t mask; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) mask = JZ_AIC_CTRL_ENABLE_PLAYBACK | JZ_AIC_CTRL_ENABLE_TX_DMA; else mask = JZ_AIC_CTRL_ENABLE_CAPTURE | JZ_AIC_CTRL_ENABLE_RX_DMA; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ctrl |= mask; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ctrl &= ~mask; break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); return 0; } static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t format = 0; uint32_t conf; conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER; format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK; break; case SND_SOC_DAIFMT_CBM_CFS: conf |= JZ_AIC_CONF_SYNC_CLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFM: conf |= JZ_AIC_CONF_BIT_CLK_MASTER; break; case SND_SOC_DAIFMT_CBM_CFM: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_MSB: format |= JZ_AIC_I2S_FMT_MSB; break; case SND_SOC_DAIFMT_I2S: break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; default: return -EINVAL; } jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); jz4740_i2s_write(i2s, JZ_REG_AIC_I2S_FMT, format); return 0; } static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); unsigned int sample_size; uint32_t ctrl; ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S8: sample_size = 0; break; case SNDRV_PCM_FORMAT_S16: sample_size = 1; break; default: return -EINVAL; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { ctrl &= ~JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET; if (params_channels(params) == 1) ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO; else ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO; } else { ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK; ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET; } jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl); return 0; } static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); struct clk *parent; int ret = 0; switch (clk_id) { case JZ4740_I2S_CLKSRC_EXT: parent = clk_get(NULL, "ext"); clk_set_parent(i2s->clk_i2s, parent); break; case JZ4740_I2S_CLKSRC_PLL: parent = clk_get(NULL, "pll half"); clk_set_parent(i2s->clk_i2s, parent); ret = clk_set_rate(i2s->clk_i2s, freq); break; default: return -EINVAL; } clk_put(parent); return ret; } static int jz4740_i2s_suspend(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; if (dai->active) { conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf &= ~JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); clk_disable_unprepare(i2s->clk_i2s); } clk_disable_unprepare(i2s->clk_aic); return 0; } static int jz4740_i2s_resume(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_prepare_enable(i2s->clk_aic); if (dai->active) { clk_prepare_enable(i2s->clk_i2s); conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF); conf |= JZ_AIC_CONF_ENABLE; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); } return 0; } static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s) { struct snd_dmaengine_dai_dma_data *dma_data; /* Playback */ dma_data = &i2s->playback_dma_data; dma_data->maxburst = 16; dma_data->slave_id = JZ4740_DMA_TYPE_AIC_TRANSMIT; dma_data->addr = i2s->phys_base + JZ_REG_AIC_FIFO; /* Capture */ dma_data = &i2s->capture_dma_data; dma_data->maxburst = 16; dma_data->slave_id = JZ4740_DMA_TYPE_AIC_RECEIVE; dma_data->addr = i2s->phys_base + JZ_REG_AIC_FIFO; } static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); uint32_t conf; clk_prepare_enable(i2s->clk_aic); jz4740_i2c_init_pcm_config(i2s); snd_soc_dai_init_dma_data(dai, &i2s->playback_dma_data, &i2s->capture_dma_data); conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) | (8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) | JZ_AIC_CONF_OVERFLOW_PLAY_LAST | JZ_AIC_CONF_I2S | JZ_AIC_CONF_INTERNAL_CODEC; jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET); jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf); return 0; } static int jz4740_i2s_dai_remove(struct snd_soc_dai *dai) { struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai); clk_disable_unprepare(i2s->clk_aic); return 0; } static const struct snd_soc_dai_ops jz4740_i2s_dai_ops = { .startup = jz4740_i2s_startup, .shutdown = jz4740_i2s_shutdown, .trigger = jz4740_i2s_trigger, .hw_params = jz4740_i2s_hw_params, .set_fmt = jz4740_i2s_set_fmt, .set_sysclk = jz4740_i2s_set_sysclk, }; #define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ SNDRV_PCM_FMTBIT_S16_LE) static struct snd_soc_dai_driver jz4740_i2s_dai = { .probe = jz4740_i2s_dai_probe, .remove = jz4740_i2s_dai_remove, .playback = { .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_48000, .formats = JZ4740_I2S_FMTS, }, .symmetric_rates = 1, .ops = &jz4740_i2s_dai_ops, .suspend = jz4740_i2s_suspend, .resume = jz4740_i2s_resume, }; static const struct snd_soc_component_driver jz4740_i2s_component = { .name = "jz4740-i2s", }; static int jz4740_i2s_dev_probe(struct platform_device *pdev) { struct jz4740_i2s *i2s; struct resource *mem; int ret; i2s = devm_kzalloc(&pdev->dev, sizeof(*i2s), GFP_KERNEL); if (!i2s) return -ENOMEM; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); i2s->base = devm_ioremap_resource(&pdev->dev, mem); if (IS_ERR(i2s->base)) return PTR_ERR(i2s->base); i2s->phys_base = mem->start; i2s->clk_aic = devm_clk_get(&pdev->dev, "aic"); if (IS_ERR(i2s->clk_aic)) return PTR_ERR(i2s->clk_aic); i2s->clk_i2s = devm_clk_get(&pdev->dev, "i2s"); if (IS_ERR(i2s->clk_i2s)) return PTR_ERR(i2s->clk_i2s); platform_set_drvdata(pdev, i2s); ret = devm_snd_soc_register_component(&pdev->dev, &jz4740_i2s_component, &jz4740_i2s_dai, 1); if (ret) return ret; return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, SND_DMAENGINE_PCM_FLAG_COMPAT); } static struct platform_driver jz4740_i2s_driver = { .probe = jz4740_i2s_dev_probe, .driver = { .name = "jz4740-i2s", .owner = THIS_MODULE, }, }; module_platform_driver(jz4740_i2s_driver); MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>"); MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:jz4740-i2s");
gpl-2.0
gauravdatir/linux
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf110.c
553
1587
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "priv.h" #include "fuc/gf110.fuc4.h" struct nvkm_oclass * gf110_pmu_oclass = &(struct nvkm_pmu_impl) { .base.handle = NV_SUBDEV(PMU, 0xd0), .base.ofuncs = &(struct nvkm_ofuncs) { .ctor = _nvkm_pmu_ctor, .dtor = _nvkm_pmu_dtor, .init = _nvkm_pmu_init, .fini = _nvkm_pmu_fini, }, .code.data = gf110_pmu_code, .code.size = sizeof(gf110_pmu_code), .data.data = gf110_pmu_data, .data.size = sizeof(gf110_pmu_data), }.base;
gpl-2.0
cwyy/kernel
arch/mips/bcm63xx/gpio.c
553
2825
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * Copyright (C) 2008 Florian Fainelli <florian@openwrt.org> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <bcm63xx_cpu.h> #include <bcm63xx_gpio.h> #include <bcm63xx_io.h> #include <bcm63xx_regs.h> static DEFINE_SPINLOCK(bcm63xx_gpio_lock); static u32 gpio_out_low, gpio_out_high; static void bcm63xx_gpio_set(struct gpio_chip *chip, unsigned gpio, int val) { u32 reg; u32 mask; u32 *v; unsigned long flags; if (gpio >= chip->ngpio) BUG(); if (gpio < 32) { reg = GPIO_DATA_LO_REG; mask = 1 << gpio; v = &gpio_out_low; } else { reg = GPIO_DATA_HI_REG; mask = 1 << (gpio - 32); v = &gpio_out_high; } spin_lock_irqsave(&bcm63xx_gpio_lock, flags); if (val) *v |= mask; else *v &= ~mask; bcm_gpio_writel(*v, reg); spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags); } static int bcm63xx_gpio_get(struct gpio_chip *chip, unsigned gpio) { u32 reg; u32 mask; if (gpio >= chip->ngpio) BUG(); if (gpio < 32) { reg = GPIO_DATA_LO_REG; mask = 1 << gpio; } else { reg = GPIO_DATA_HI_REG; mask = 1 << (gpio - 32); } return !!(bcm_gpio_readl(reg) & mask); } static int bcm63xx_gpio_set_direction(struct gpio_chip *chip, unsigned gpio, int dir) { u32 reg; u32 mask; u32 tmp; unsigned long flags; if (gpio >= chip->ngpio) BUG(); if (gpio < 32) { reg = GPIO_CTL_LO_REG; mask = 1 << gpio; } else { reg = GPIO_CTL_HI_REG; mask = 1 << (gpio - 32); } spin_lock_irqsave(&bcm63xx_gpio_lock, flags); tmp = bcm_gpio_readl(reg); if (dir == GPIO_DIR_IN) tmp &= ~mask; else tmp |= mask; bcm_gpio_writel(tmp, reg); spin_unlock_irqrestore(&bcm63xx_gpio_lock, flags); return 0; } static int bcm63xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_IN); } static int bcm63xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { bcm63xx_gpio_set(chip, gpio, value); return bcm63xx_gpio_set_direction(chip, gpio, GPIO_DIR_OUT); } static struct gpio_chip bcm63xx_gpio_chip = { .label = "bcm63xx-gpio", .direction_input = bcm63xx_gpio_direction_input, .direction_output = bcm63xx_gpio_direction_output, .get = bcm63xx_gpio_get, .set = bcm63xx_gpio_set, .base = 0, }; int __init bcm63xx_gpio_init(void) { bcm63xx_gpio_chip.ngpio = bcm63xx_gpio_count(); pr_info("registering %d GPIOs\n", bcm63xx_gpio_chip.ngpio); return gpiochip_add(&bcm63xx_gpio_chip); } arch_initcall(bcm63xx_gpio_init);
gpl-2.0
jakieu/linux-2.6-imx
drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv4c.c
553
1438
/* * Copyright 2014 Ilia Mirkin * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ilia Mirkin */ #include "nv04.h" struct nvkm_oclass * nv4c_mc_oclass = &(struct nvkm_mc_oclass) { .base.handle = NV_SUBDEV(MC, 0x4c), .base.ofuncs = &(struct nvkm_ofuncs) { .ctor = nv04_mc_ctor, .dtor = _nvkm_mc_dtor, .init = nv44_mc_init, .fini = _nvkm_mc_fini, }, .intr = nv04_mc_intr, }.base;
gpl-2.0
ISTweak/android_kernel_sharp_msm7x30
drivers/video/pxa168fb.c
809
20154
/* * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller * * Copyright (C) 2008 Marvell International Ltd. * All rights reserved. * * 2009-02-16 adapted from original version for PXA168/910 * Jun Nie <njun@marvell.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/uaccess.h> #include <video/pxa168fb.h> #include "pxa168fb.h" #define DEFAULT_REFRESH 60 /* Hz */ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) { /* * Pseudocolor mode? */ if (var->bits_per_pixel == 8) return PIX_FMT_PSEUDOCOLOR; /* * Check for 565/1555. */ if (var->bits_per_pixel == 16 && var->red.length <= 5 && var->green.length <= 6 && var->blue.length <= 5) { if (var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB565; else return PIX_FMT_BGR565; } if (var->transp.length == 1 && var->green.length <= 5) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB1555; else return PIX_FMT_BGR1555; } /* fall through */ } /* * Check for 888/A888. */ if (var->bits_per_pixel <= 32 && var->red.length <= 8 && var->green.length <= 8 && var->blue.length <= 8) { if (var->bits_per_pixel == 24 && var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888PACK; else return PIX_FMT_BGR888PACK; } if (var->bits_per_pixel == 32 && var->transp.length == 8) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGBA888; else return PIX_FMT_BGRA888; } else { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888UNPACK; else return PIX_FMT_BGR888UNPACK; } /* fall through */ } return -EINVAL; } static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt) { switch (pix_fmt) { case PIX_FMT_RGB565: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR565: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGB1555: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_BGR1555: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_RGB888PACK: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR888PACK: var->bits_per_pixel = 24; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGBA888: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_BGRA888: var->bits_per_pixel = 32; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_PSEUDOCOLOR: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; } } static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var, struct fb_videomode *mode, int pix_fmt, int ystretch) { struct fb_info *info = fbi->info; set_pix_fmt(var, pix_fmt); var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = max(var->xres, var->xres_virtual); if (ystretch) var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); else var->yres_virtual = max(var->yres, var->yres_virtual); var->grayscale = 0; var->accel_flags = FB_ACCEL_NONE; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = FB_VMODE_NONINTERLACED; var->rotate = FB_ROTATE_UR; } static int pxa168fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; int pix_fmt; /* * Determine which pixel format we're going to use. */ pix_fmt = determine_best_pix_fmt(var); if (pix_fmt < 0) return pix_fmt; set_pix_fmt(var, pix_fmt); fbi->pix_fmt = pix_fmt; /* * Basic geometry sanity checks. */ if (var->xoffset + var->xres > var->xres_virtual) return -EINVAL; if (var->yoffset + var->yres > var->yres_virtual) return -EINVAL; if (var->xres + var->right_margin + var->hsync_len + var->left_margin > 2048) return -EINVAL; if (var->yres + var->lower_margin + var->vsync_len + var->upper_margin > 2048) return -EINVAL; /* * Check size of framebuffer. */ if (var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3) > info->fix.smem_len) return -EINVAL; return 0; } /* * The hardware clock divider has an integer and a fractional * stage: * * clk2 = clk_in / integer_divider * clk_out = clk2 * (1 - (fractional_divider >> 12)) * * Calculate integer and fractional divider for given clk_in * and clk_out. */ static void set_clock_divider(struct pxa168fb_info *fbi, const struct fb_videomode *m) { int divider_int; int needed_pixclk; u64 div_result; u32 x = 0; /* * Notice: The field pixclock is used by linux fb * is in pixel second. E.g. struct fb_videomode & * struct fb_var_screeninfo */ /* * Check input values. */ if (!m || !m->pixclock || !m->refresh) { dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n"); return; } /* * Using PLL/AXI clock. */ x = 0x80000000; /* * Calc divider according to refresh rate. */ div_result = 1000000000000ll; do_div(div_result, m->pixclock); needed_pixclk = (u32)div_result; divider_int = clk_get_rate(fbi->clk) / needed_pixclk; /* check whether divisor is too small. */ if (divider_int < 2) { dev_warn(fbi->dev, "Warning: clock source is too slow." "Try smaller resolution\n"); divider_int = 2; } /* * Set setting to reg. */ x |= divider_int; writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV); } static void set_dma_control0(struct pxa168fb_info *fbi) { u32 x; /* * Set bit to enable graphics DMA. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); x |= fbi->active ? 0x00000100 : 0; fbi->active = 0; /* * If we are in a pseudo-color mode, we need to enable * palette lookup. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) x |= 0x10000000; /* * Configure hardware pixel format. */ x &= ~(0xF << 16); x |= (fbi->pix_fmt >> 1) << 16; /* * Check red and blue pixel swap. * 1. source data swap * 2. panel output data swap */ x &= ~(1 << 12); x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0); } static void set_dma_control1(struct pxa168fb_info *fbi, int sync) { u32 x; /* * Configure default bits: vsync triggers DMA, gated clock * enable, power save enable, configure alpha registers to * display 100% graphics, and set pixel command. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1); x |= 0x2032ff81; /* * We trigger DMA on the falling edge of vsync if vsync is * active low, or on the rising edge if vsync is active high. */ if (!(sync & FB_SYNC_VERT_HIGH_ACT)) x |= 0x08000000; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1); } static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int pixel_offset; unsigned long addr; pixel_offset = (yoffset * var->xres_virtual) + xoffset; addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3)); writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0); } static void set_dumb_panel_control(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct pxa168fb_mach_info *mi = fbi->dev->platform_data; u32 x; /* * Preserve enable flag. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001; x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28; x |= mi->gpio_output_data << 20; x |= mi->gpio_output_mask << 12; x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0; x |= mi->invert_composite_blank ? 0x00000040 : 0; x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0; x |= mi->invert_pix_val_ena ? 0x00000010 : 0; x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008; x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004; x |= mi->invert_pixclock ? 0x00000002 : 0; writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL); } static void set_dumb_screen_dimensions(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *v = &info->var; int x; int y; x = v->xres + v->right_margin + v->hsync_len + v->left_margin; y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin; writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL); } static int pxa168fb_set_par(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct fb_videomode mode; u32 x; struct pxa168fb_mach_info *mi; mi = fbi->dev->platform_data; /* * Set additional mode info. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) info->fix.visual = FB_VISUAL_PSEUDOCOLOR; else info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; info->fix.ypanstep = var->yres; /* * Disable panel output while we setup the display. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL); /* * Configure global panel parameters. */ writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_V_H_ACTIVE); /* * convet var to video mode */ fb_var_to_videomode(&mode, &info->var); /* Calculate clock divisor. */ set_clock_divider(fbi, &mode); /* Configure dma ctrl regs. */ set_dma_control0(fbi); set_dma_control1(fbi, info->var.sync); /* * Configure graphics DMA parameters. */ x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH); x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3); writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GRA_HPXL_VLN); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GZM_HPXL_VLN); /* * Configure dumb panel ctrl regs & timings. */ set_dumb_panel_control(info); set_dumb_screen_dimensions(info); writel((var->left_margin << 16) | var->right_margin, fbi->reg_base + LCD_SPU_H_PORCH); writel((var->upper_margin << 16) | var->lower_margin, fbi->reg_base + LCD_SPU_V_PORCH); /* * Re-enable panel output. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL); return 0; } static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset; } static u32 to_rgb(u16 red, u16 green, u16 blue) { red >>= 8; green >>= 8; blue >>= 8; return (red << 16) | (green << 8) | blue; } static int pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int trans, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; u32 val; if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) { val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue , &info->var.blue); fbi->pseudo_palette[regno] = val; } if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) { val = to_rgb(red, green, blue); writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT); writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL); } return 0; } static int pxa168fb_blank(int blank, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1; set_dumb_panel_control(info); return 0; } static int pxa168fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { set_graphics_start(info, var->xoffset, var->yoffset); return 0; } static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id) { struct pxa168fb_info *fbi = dev_id; u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR); if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) { writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK), fbi->reg_base + SPU_IRQ_ISR); return IRQ_HANDLED; } return IRQ_NONE; } static struct fb_ops pxa168fb_ops = { .owner = THIS_MODULE, .fb_check_var = pxa168fb_check_var, .fb_set_par = pxa168fb_set_par, .fb_setcolreg = pxa168fb_setcolreg, .fb_blank = pxa168fb_blank, .fb_pan_display = pxa168fb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int __init pxa168fb_init_mode(struct fb_info *info, struct pxa168fb_mach_info *mi) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int ret = 0; u32 total_w, total_h, refresh; u64 div_result; const struct fb_videomode *m; /* * Set default value */ refresh = DEFAULT_REFRESH; /* try to find best video mode. */ m = fb_find_best_mode(&info->var, &info->modelist); if (m) fb_videomode_to_var(&info->var, m); /* Init settings. */ var->xres_virtual = var->xres; var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n", var->xres, var->yres); /* correct pixclock. */ total_w = var->xres + var->left_margin + var->right_margin + var->hsync_len; total_h = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; div_result = 1000000000000ll; do_div(div_result, total_w * total_h * refresh); var->pixclock = (u32)div_result; return ret; } static int __init pxa168fb_probe(struct platform_device *pdev) { struct pxa168fb_mach_info *mi; struct fb_info *info = 0; struct pxa168fb_info *fbi = 0; struct resource *res; struct clk *clk; int irq, ret; mi = pdev->dev.platform_data; if (mi == NULL) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } clk = clk_get(&pdev->dev, "LCDCLK"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "unable to get LCDCLK"); return PTR_ERR(clk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no IO memory defined\n"); return -ENOENT; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ defined\n"); return -ENOENT; } info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); if (info == NULL) { clk_put(clk); return -ENOMEM; } /* Initialize private data */ fbi = info->par; fbi->info = info; fbi->clk = clk; fbi->dev = info->dev = &pdev->dev; fbi->panel_rbswap = mi->panel_rbswap; fbi->is_blanked = 0; fbi->active = mi->active; /* * Initialise static fb parameters. */ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; info->node = -1; strlcpy(info->fix.id, mi->id, 16); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.mmio_start = res->start; info->fix.mmio_len = res->end - res->start + 1; info->fix.accel = FB_ACCEL_NONE; info->fbops = &pxa168fb_ops; info->pseudo_palette = fbi->pseudo_palette; /* * Map LCD controller registers. */ fbi->reg_base = ioremap_nocache(res->start, resource_size(res)); if (fbi->reg_base == NULL) { ret = -ENOMEM; goto failed; } /* * Allocate framebuffer memory. */ info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE); info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len, &fbi->fb_start_dma, GFP_KERNEL); if (info->screen_base == NULL) { ret = -ENOMEM; goto failed; } info->fix.smem_start = (unsigned long)fbi->fb_start_dma; set_graphics_start(info, 0, 0); /* * Set video mode according to platform data. */ set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1); fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist); /* * init video mode data. */ pxa168fb_init_mode(info, mi); ret = pxa168fb_check_var(&info->var, info); if (ret) goto failed_free_fbmem; /* * Fill in sane defaults. */ ret = pxa168fb_check_var(&info->var, info); if (ret) goto failed; /* * enable controller clock */ clk_enable(fbi->clk); pxa168fb_set_par(info); /* * Configure default register values. */ writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR); writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL); writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1); writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN); writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0); writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1), fbi->reg_base + LCD_SPU_SRAM_PARA1); /* * Allocate color map. */ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { ret = -ENOMEM; goto failed_free_clk; } /* * Register irq handler. */ ret = request_irq(irq, pxa168fb_handle_irq, IRQF_SHARED, info->fix.id, fbi); if (ret < 0) { dev_err(&pdev->dev, "unable to request IRQ\n"); ret = -ENXIO; goto failed_free_cmap; } /* * Enable GFX interrupt */ writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA); /* * Register framebuffer. */ ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret); ret = -ENXIO; goto failed_free_irq; } platform_set_drvdata(pdev, fbi); return 0; failed_free_irq: free_irq(irq, fbi); failed_free_cmap: fb_dealloc_cmap(&info->cmap); failed_free_clk: clk_disable(fbi->clk); failed_free_fbmem: dma_free_coherent(fbi->dev, info->fix.smem_len, info->screen_base, fbi->fb_start_dma); failed: kfree(info); clk_put(clk); dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); return ret; } static struct platform_driver pxa168fb_driver = { .driver = { .name = "pxa168-fb", .owner = THIS_MODULE, }, .probe = pxa168fb_probe, }; static int __devinit pxa168fb_init(void) { return platform_driver_register(&pxa168fb_driver); } module_init(pxa168fb_init); MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> " "Green Wan <gwan@marvell.com>"); MODULE_DESCRIPTION("Framebuffer driver for PXA168/910"); MODULE_LICENSE("GPL");
gpl-2.0
FlorinPetriuc/linux-1-moved-to-OpenChannelSSD-linux-nba_target
drivers/hwmon/vexpress.c
1321
7283
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Copyright (C) 2012 ARM Limited */ #define DRVNAME "vexpress-hwmon" #define pr_fmt(fmt) DRVNAME ": " fmt #include <linux/device.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/vexpress.h> struct vexpress_hwmon_data { struct device *hwmon_dev; struct regmap *reg; }; static ssize_t vexpress_hwmon_label_show(struct device *dev, struct device_attribute *dev_attr, char *buffer) { const char *label = of_get_property(dev->of_node, "label", NULL); return snprintf(buffer, PAGE_SIZE, "%s\n", label); } static ssize_t vexpress_hwmon_u32_show(struct device *dev, struct device_attribute *dev_attr, char *buffer) { struct vexpress_hwmon_data *data = dev_get_drvdata(dev); int err; u32 value; err = regmap_read(data->reg, 0, &value); if (err) return err; return snprintf(buffer, PAGE_SIZE, "%u\n", value / to_sensor_dev_attr(dev_attr)->index); } static ssize_t vexpress_hwmon_u64_show(struct device *dev, struct device_attribute *dev_attr, char *buffer) { struct vexpress_hwmon_data *data = dev_get_drvdata(dev); int err; u32 value_hi, value_lo; err = regmap_read(data->reg, 0, &value_lo); if (err) return err; err = regmap_read(data->reg, 1, &value_hi); if (err) return err; return snprintf(buffer, PAGE_SIZE, "%llu\n", div_u64(((u64)value_hi << 32) | value_lo, to_sensor_dev_attr(dev_attr)->index)); } static umode_t vexpress_hwmon_attr_is_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = kobj_to_dev(kobj); struct device_attribute *dev_attr = container_of(attr, struct device_attribute, attr); if (dev_attr->show == vexpress_hwmon_label_show && !of_get_property(dev->of_node, "label", NULL)) return 0; return attr->mode; } struct vexpress_hwmon_type { const char *name; const struct attribute_group **attr_groups; }; #if !defined(CONFIG_REGULATOR_VEXPRESS) static DEVICE_ATTR(in1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, vexpress_hwmon_u32_show, NULL, 1000); static struct attribute *vexpress_hwmon_attrs_volt[] = { &dev_attr_in1_label.attr, &sensor_dev_attr_in1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_volt = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_volt, }; static struct vexpress_hwmon_type vexpress_hwmon_volt = { .name = "vexpress_volt", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_volt, NULL, }, }; #endif static DEVICE_ATTR(curr1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, vexpress_hwmon_u32_show, NULL, 1000); static struct attribute *vexpress_hwmon_attrs_amp[] = { &dev_attr_curr1_label.attr, &sensor_dev_attr_curr1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_amp = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_amp, }; static struct vexpress_hwmon_type vexpress_hwmon_amp = { .name = "vexpress_amp", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_amp, NULL }, }; static DEVICE_ATTR(temp1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, vexpress_hwmon_u32_show, NULL, 1000); static struct attribute *vexpress_hwmon_attrs_temp[] = { &dev_attr_temp1_label.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_temp = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_temp, }; static struct vexpress_hwmon_type vexpress_hwmon_temp = { .name = "vexpress_temp", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_temp, NULL }, }; static DEVICE_ATTR(power1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, vexpress_hwmon_u32_show, NULL, 1); static struct attribute *vexpress_hwmon_attrs_power[] = { &dev_attr_power1_label.attr, &sensor_dev_attr_power1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_power = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_power, }; static struct vexpress_hwmon_type vexpress_hwmon_power = { .name = "vexpress_power", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_power, NULL }, }; static DEVICE_ATTR(energy1_label, S_IRUGO, vexpress_hwmon_label_show, NULL); static SENSOR_DEVICE_ATTR(energy1_input, S_IRUGO, vexpress_hwmon_u64_show, NULL, 1); static struct attribute *vexpress_hwmon_attrs_energy[] = { &dev_attr_energy1_label.attr, &sensor_dev_attr_energy1_input.dev_attr.attr, NULL }; static struct attribute_group vexpress_hwmon_group_energy = { .is_visible = vexpress_hwmon_attr_is_visible, .attrs = vexpress_hwmon_attrs_energy, }; static struct vexpress_hwmon_type vexpress_hwmon_energy = { .name = "vexpress_energy", .attr_groups = (const struct attribute_group *[]) { &vexpress_hwmon_group_energy, NULL }, }; static const struct of_device_id vexpress_hwmon_of_match[] = { #if !defined(CONFIG_REGULATOR_VEXPRESS) { .compatible = "arm,vexpress-volt", .data = &vexpress_hwmon_volt, }, #endif { .compatible = "arm,vexpress-amp", .data = &vexpress_hwmon_amp, }, { .compatible = "arm,vexpress-temp", .data = &vexpress_hwmon_temp, }, { .compatible = "arm,vexpress-power", .data = &vexpress_hwmon_power, }, { .compatible = "arm,vexpress-energy", .data = &vexpress_hwmon_energy, }, {} }; MODULE_DEVICE_TABLE(of, vexpress_hwmon_of_match); static int vexpress_hwmon_probe(struct platform_device *pdev) { const struct of_device_id *match; struct vexpress_hwmon_data *data; const struct vexpress_hwmon_type *type; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; platform_set_drvdata(pdev, data); match = of_match_device(vexpress_hwmon_of_match, &pdev->dev); if (!match) return -ENODEV; type = match->data; data->reg = devm_regmap_init_vexpress_config(&pdev->dev); if (IS_ERR(data->reg)) return PTR_ERR(data->reg); data->hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev, type->name, data, type->attr_groups); return PTR_ERR_OR_ZERO(data->hwmon_dev); } static struct platform_driver vexpress_hwmon_driver = { .probe = vexpress_hwmon_probe, .driver = { .name = DRVNAME, .of_match_table = vexpress_hwmon_of_match, }, }; module_platform_driver(vexpress_hwmon_driver); MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); MODULE_DESCRIPTION("Versatile Express hwmon sensors driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:vexpress-hwmon");
gpl-2.0
sandeep1027/kernel-3.10
drivers/staging/comedi/drivers/dt2814.c
2089
7911
/* comedi/drivers/dt2814.c Hardware driver for Data Translation DT2814 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: dt2814 Description: Data Translation DT2814 Author: ds Status: complete Devices: [Data Translation] DT2814 (dt2814) Configuration options: [0] - I/O port base address [1] - IRQ This card has 16 analog inputs multiplexed onto a 12 bit ADC. There is a minimally useful onboard clock. The base frequency for the clock is selected by jumpers, and the clock divider can be selected via programmed I/O. Unfortunately, the clock divider can only be a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In addition, the clock does not seem to be very accurate. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/ioport.h> #include <linux/delay.h> #include "comedi_fc.h" #define DT2814_SIZE 2 #define DT2814_CSR 0 #define DT2814_DATA 1 /* * flags */ #define DT2814_FINISH 0x80 #define DT2814_ERR 0x40 #define DT2814_BUSY 0x20 #define DT2814_ENB 0x10 #define DT2814_CHANMASK 0x0f struct dt2814_private { int ntrig; int curadchan; }; #define DT2814_TIMEOUT 10 #define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */ static int dt2814_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int n, i, hi, lo; int chan; int status = 0; for (n = 0; n < insn->n; n++) { chan = CR_CHAN(insn->chanspec); outb(chan, dev->iobase + DT2814_CSR); for (i = 0; i < DT2814_TIMEOUT; i++) { status = inb(dev->iobase + DT2814_CSR); printk(KERN_INFO "dt2814: status: %02x\n", status); udelay(10); if (status & DT2814_FINISH) break; } if (i >= DT2814_TIMEOUT) { printk(KERN_INFO "dt2814: status: %02x\n", status); return -ETIMEDOUT; } hi = inb(dev->iobase + DT2814_DATA); lo = inb(dev->iobase + DT2814_DATA); data[n] = (hi << 4) | (lo >> 4); } return n; } static int dt2814_ns_to_timer(unsigned int *ns, unsigned int flags) { int i; unsigned int f; /* XXX ignores flags */ f = 10000; /* ns */ for (i = 0; i < 8; i++) { if ((2 * (*ns)) < (f * 11)) break; f *= 10; } *ns = f; return i; } static int dt2814_ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int err = 0; int tmp; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ if (err) return 2; /* Step 3: check if arguments are trivially valid */ err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000); err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, DT2814_MAX_SPEED); err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); if (cmd->stop_src == TRIG_COUNT) err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 2); else /* TRIG_NONE */ err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); if (err) return 3; /* step 4: fix up any arguments */ tmp = cmd->scan_begin_arg; dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); if (tmp != cmd->scan_begin_arg) err++; if (err) return 4; return 0; } static int dt2814_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct dt2814_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int chan; int trigvar; trigvar = dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK); chan = CR_CHAN(cmd->chanlist[0]); devpriv->ntrig = cmd->stop_arg; outb(chan | DT2814_ENB | (trigvar << 5), dev->iobase + DT2814_CSR); return 0; } static irqreturn_t dt2814_interrupt(int irq, void *d) { int lo, hi; struct comedi_device *dev = d; struct dt2814_private *devpriv = dev->private; struct comedi_subdevice *s; int data; if (!dev->attached) { comedi_error(dev, "spurious interrupt"); return IRQ_HANDLED; } s = &dev->subdevices[0]; hi = inb(dev->iobase + DT2814_DATA); lo = inb(dev->iobase + DT2814_DATA); data = (hi << 4) | (lo >> 4); if (!(--devpriv->ntrig)) { int i; outb(0, dev->iobase + DT2814_CSR); /* note: turning off timed mode triggers another sample. */ for (i = 0; i < DT2814_TIMEOUT; i++) { if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH) break; } inb(dev->iobase + DT2814_DATA); inb(dev->iobase + DT2814_DATA); s->async->events |= COMEDI_CB_EOA; } comedi_event(dev, s); return IRQ_HANDLED; } static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct dt2814_private *devpriv; int i, irq; int ret; struct comedi_subdevice *s; ret = comedi_request_region(dev, it->options[0], DT2814_SIZE); if (ret) return ret; outb(0, dev->iobase + DT2814_CSR); udelay(100); if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) { printk(KERN_ERR "reset error (fatal)\n"); return -EIO; } i = inb(dev->iobase + DT2814_DATA); i = inb(dev->iobase + DT2814_DATA); irq = it->options[1]; #if 0 if (irq < 0) { save_flags(flags); sti(); irqs = probe_irq_on(); outb(0, dev->iobase + DT2814_CSR); udelay(100); irq = probe_irq_off(irqs); restore_flags(flags); if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) printk(KERN_DEBUG "error probing irq (bad)\n"); i = inb(dev->iobase + DT2814_DATA); i = inb(dev->iobase + DT2814_DATA); } #endif dev->irq = 0; if (irq > 0) { if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) { printk(KERN_WARNING "(irq %d unavailable)\n", irq); } else { printk(KERN_INFO "( irq = %d )\n", irq); dev->irq = irq; } } else if (irq == 0) { printk(KERN_WARNING "(no irq)\n"); } else { #if 0 printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n"); #else printk(KERN_WARNING "(irq probe not implemented)\n"); #endif } ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; s = &dev->subdevices[0]; dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ; s->n_chan = 16; /* XXX */ s->len_chanlist = 1; s->insn_read = dt2814_ai_insn_read; s->do_cmd = dt2814_ai_cmd; s->do_cmdtest = dt2814_ai_cmdtest; s->maxdata = 0xfff; s->range_table = &range_unknown; /* XXX */ return 0; } static struct comedi_driver dt2814_driver = { .driver_name = "dt2814", .module = THIS_MODULE, .attach = dt2814_attach, .detach = comedi_legacy_detach, }; module_comedi_driver(dt2814_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
PennPanda/litmus-rt
drivers/block/drbd/drbd_receiver.c
2089
149489
/* drbd_receiver.c This file is part of DRBD by Philipp Reisner and Lars Ellenberg. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. drbd is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. drbd is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with drbd; see the file COPYING. If not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <asm/uaccess.h> #include <net/sock.h> #include <linux/drbd.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/in.h> #include <linux/mm.h> #include <linux/memcontrol.h> #include <linux/mm_inline.h> #include <linux/slab.h> #include <linux/pkt_sched.h> #define __KERNEL_SYSCALLS__ #include <linux/unistd.h> #include <linux/vmalloc.h> #include <linux/random.h> #include <linux/string.h> #include <linux/scatterlist.h> #include "drbd_int.h" #include "drbd_req.h" #include "drbd_vli.h" struct packet_info { enum drbd_packet cmd; unsigned int size; unsigned int vnr; void *data; }; enum finish_epoch { FE_STILL_LIVE, FE_DESTROYED, FE_RECYCLED, }; static int drbd_do_features(struct drbd_tconn *tconn); static int drbd_do_auth(struct drbd_tconn *tconn); static int drbd_disconnected(struct drbd_conf *mdev); static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event); static int e_end_block(struct drbd_work *, int); #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) /* * some helper functions to deal with single linked page lists, * page->private being our "next" pointer. */ /* If at least n pages are linked at head, get n pages off. * Otherwise, don't modify head, and return NULL. * Locking is the responsibility of the caller. */ static struct page *page_chain_del(struct page **head, int n) { struct page *page; struct page *tmp; BUG_ON(!n); BUG_ON(!head); page = *head; if (!page) return NULL; while (page) { tmp = page_chain_next(page); if (--n == 0) break; /* found sufficient pages */ if (tmp == NULL) /* insufficient pages, don't use any of them. */ return NULL; page = tmp; } /* add end of list marker for the returned list */ set_page_private(page, 0); /* actual return value, and adjustment of head */ page = *head; *head = tmp; return page; } /* may be used outside of locks to find the tail of a (usually short) * "private" page chain, before adding it back to a global chain head * with page_chain_add() under a spinlock. */ static struct page *page_chain_tail(struct page *page, int *len) { struct page *tmp; int i = 1; while ((tmp = page_chain_next(page))) ++i, page = tmp; if (len) *len = i; return page; } static int page_chain_free(struct page *page) { struct page *tmp; int i = 0; page_chain_for_each_safe(page, tmp) { put_page(page); ++i; } return i; } static void page_chain_add(struct page **head, struct page *chain_first, struct page *chain_last) { #if 1 struct page *tmp; tmp = page_chain_tail(chain_first, NULL); BUG_ON(tmp != chain_last); #endif /* add chain to head */ set_page_private(chain_last, (unsigned long)*head); *head = chain_first; } static struct page *__drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number) { struct page *page = NULL; struct page *tmp = NULL; unsigned int i = 0; /* Yes, testing drbd_pp_vacant outside the lock is racy. * So what. It saves a spin_lock. */ if (drbd_pp_vacant >= number) { spin_lock(&drbd_pp_lock); page = page_chain_del(&drbd_pp_pool, number); if (page) drbd_pp_vacant -= number; spin_unlock(&drbd_pp_lock); if (page) return page; } /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ for (i = 0; i < number; i++) { tmp = alloc_page(GFP_TRY); if (!tmp) break; set_page_private(tmp, (unsigned long)page); page = tmp; } if (i == number) return page; /* Not enough pages immediately available this time. * No need to jump around here, drbd_alloc_pages will retry this * function "soon". */ if (page) { tmp = page_chain_tail(page, NULL); spin_lock(&drbd_pp_lock); page_chain_add(&drbd_pp_pool, page, tmp); drbd_pp_vacant += i; spin_unlock(&drbd_pp_lock); } return NULL; } static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev, struct list_head *to_be_freed) { struct drbd_peer_request *peer_req; struct list_head *le, *tle; /* The EEs are always appended to the end of the list. Since they are sent in order over the wire, they have to finish in order. As soon as we see the first not finished we can stop to examine the list... */ list_for_each_safe(le, tle, &mdev->net_ee) { peer_req = list_entry(le, struct drbd_peer_request, w.list); if (drbd_peer_req_has_active_page(peer_req)) break; list_move(le, to_be_freed); } } static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) { LIST_HEAD(reclaimed); struct drbd_peer_request *peer_req, *t; spin_lock_irq(&mdev->tconn->req_lock); reclaim_finished_net_peer_reqs(mdev, &reclaimed); spin_unlock_irq(&mdev->tconn->req_lock); list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) drbd_free_net_peer_req(mdev, peer_req); } /** * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled) * @mdev: DRBD device. * @number: number of pages requested * @retry: whether to retry, if not enough pages are available right now * * Tries to allocate number pages, first from our own page pool, then from * the kernel, unless this allocation would exceed the max_buffers setting. * Possibly retry until DRBD frees sufficient pages somewhere else. * * Returns a page chain linked via page->private. */ struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number, bool retry) { struct page *page = NULL; struct net_conf *nc; DEFINE_WAIT(wait); int mxb; /* Yes, we may run up to @number over max_buffers. If we * follow it strictly, the admin will get it wrong anyways. */ rcu_read_lock(); nc = rcu_dereference(mdev->tconn->net_conf); mxb = nc ? nc->max_buffers : 1000000; rcu_read_unlock(); if (atomic_read(&mdev->pp_in_use) < mxb) page = __drbd_alloc_pages(mdev, number); while (page == NULL) { prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); drbd_kick_lo_and_reclaim_net(mdev); if (atomic_read(&mdev->pp_in_use) < mxb) { page = __drbd_alloc_pages(mdev, number); if (page) break; } if (!retry) break; if (signal_pending(current)) { dev_warn(DEV, "drbd_alloc_pages interrupted!\n"); break; } schedule(); } finish_wait(&drbd_pp_wait, &wait); if (page) atomic_add(number, &mdev->pp_in_use); return page; } /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages. * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock); * Either links the page chain back to the global pool, * or returns all pages to the system. */ static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net) { atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; if (page == NULL) return; if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count) i = page_chain_free(page); else { struct page *tmp; tmp = page_chain_tail(page, &i); spin_lock(&drbd_pp_lock); page_chain_add(&drbd_pp_pool, page, tmp); drbd_pp_vacant += i; spin_unlock(&drbd_pp_lock); } i = atomic_sub_return(i, a); if (i < 0) dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", is_net ? "pp_in_use_by_net" : "pp_in_use", i); wake_up(&drbd_pp_wait); } /* You need to hold the req_lock: _drbd_wait_ee_list_empty() You must not have the req_lock: drbd_free_peer_req() drbd_alloc_peer_req() drbd_free_peer_reqs() drbd_ee_fix_bhs() drbd_finish_peer_reqs() drbd_clear_done_ee() drbd_wait_ee_list_empty() */ struct drbd_peer_request * drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector, unsigned int data_size, gfp_t gfp_mask) __must_hold(local) { struct drbd_peer_request *peer_req; struct page *page = NULL; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) return NULL; peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); if (!peer_req) { if (!(gfp_mask & __GFP_NOWARN)) dev_err(DEV, "%s: allocation failed\n", __func__); return NULL; } if (data_size) { page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); if (!page) goto fail; } drbd_clear_interval(&peer_req->i); peer_req->i.size = data_size; peer_req->i.sector = sector; peer_req->i.local = false; peer_req->i.waiting = false; peer_req->epoch = NULL; peer_req->w.mdev = mdev; peer_req->pages = page; atomic_set(&peer_req->pending_bios, 0); peer_req->flags = 0; /* * The block_id is opaque to the receiver. It is not endianness * converted, and sent back to the sender unchanged. */ peer_req->block_id = id; return peer_req; fail: mempool_free(peer_req, drbd_ee_mempool); return NULL; } void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, int is_net) { if (peer_req->flags & EE_HAS_DIGEST) kfree(peer_req->digest); drbd_free_pages(mdev, peer_req->pages, is_net); D_ASSERT(atomic_read(&peer_req->pending_bios) == 0); D_ASSERT(drbd_interval_empty(&peer_req->i)); mempool_free(peer_req, drbd_ee_mempool); } int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list) { LIST_HEAD(work_list); struct drbd_peer_request *peer_req, *t; int count = 0; int is_net = list == &mdev->net_ee; spin_lock_irq(&mdev->tconn->req_lock); list_splice_init(list, &work_list); spin_unlock_irq(&mdev->tconn->req_lock); list_for_each_entry_safe(peer_req, t, &work_list, w.list) { __drbd_free_peer_req(mdev, peer_req, is_net); count++; } return count; } /* * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier. */ static int drbd_finish_peer_reqs(struct drbd_conf *mdev) { LIST_HEAD(work_list); LIST_HEAD(reclaimed); struct drbd_peer_request *peer_req, *t; int err = 0; spin_lock_irq(&mdev->tconn->req_lock); reclaim_finished_net_peer_reqs(mdev, &reclaimed); list_splice_init(&mdev->done_ee, &work_list); spin_unlock_irq(&mdev->tconn->req_lock); list_for_each_entry_safe(peer_req, t, &reclaimed, w.list) drbd_free_net_peer_req(mdev, peer_req); /* possible callbacks here: * e_end_block, and e_end_resync_block, e_send_superseded. * all ignore the last argument. */ list_for_each_entry_safe(peer_req, t, &work_list, w.list) { int err2; /* list_del not necessary, next/prev members not touched */ err2 = peer_req->w.cb(&peer_req->w, !!err); if (!err) err = err2; drbd_free_peer_req(mdev, peer_req); } wake_up(&mdev->ee_wait); return err; } static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) { DEFINE_WAIT(wait); /* avoids spin_lock/unlock * and calling prepare_to_wait in the fast path */ while (!list_empty(head)) { prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&mdev->tconn->req_lock); io_schedule(); finish_wait(&mdev->ee_wait, &wait); spin_lock_irq(&mdev->tconn->req_lock); } } static void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) { spin_lock_irq(&mdev->tconn->req_lock); _drbd_wait_ee_list_empty(mdev, head); spin_unlock_irq(&mdev->tconn->req_lock); } static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags) { mm_segment_t oldfs; struct kvec iov = { .iov_base = buf, .iov_len = size, }; struct msghdr msg = { .msg_iovlen = 1, .msg_iov = (struct iovec *)&iov, .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) }; int rv; oldfs = get_fs(); set_fs(KERNEL_DS); rv = sock_recvmsg(sock, &msg, size, msg.msg_flags); set_fs(oldfs); return rv; } static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size) { int rv; rv = drbd_recv_short(tconn->data.socket, buf, size, 0); if (rv < 0) { if (rv == -ECONNRESET) conn_info(tconn, "sock was reset by peer\n"); else if (rv != -ERESTARTSYS) conn_err(tconn, "sock_recvmsg returned %d\n", rv); } else if (rv == 0) { if (test_bit(DISCONNECT_SENT, &tconn->flags)) { long t; rcu_read_lock(); t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10; rcu_read_unlock(); t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t); if (t) goto out; } conn_info(tconn, "sock was shut down by peer\n"); } if (rv != size) conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD); out: return rv; } static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size) { int err; err = drbd_recv(tconn, buf, size); if (err != size) { if (err >= 0) err = -EIO; } else err = 0; return err; } static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size) { int err; err = drbd_recv_all(tconn, buf, size); if (err && !signal_pending(current)) conn_warn(tconn, "short read (expected size %d)\n", (int)size); return err; } /* quoting tcp(7): * On individual connections, the socket buffer size must be set prior to the * listen(2) or connect(2) calls in order to have it take effect. * This is our wrapper to do so. */ static void drbd_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) { /* open coded SO_SNDBUF, SO_RCVBUF */ if (snd) { sock->sk->sk_sndbuf = snd; sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } if (rcv) { sock->sk->sk_rcvbuf = rcv; sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } } static struct socket *drbd_try_connect(struct drbd_tconn *tconn) { const char *what; struct socket *sock; struct sockaddr_in6 src_in6; struct sockaddr_in6 peer_in6; struct net_conf *nc; int err, peer_addr_len, my_addr_len; int sndbuf_size, rcvbuf_size, connect_int; int disconnect_on_error = 1; rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); if (!nc) { rcu_read_unlock(); return NULL; } sndbuf_size = nc->sndbuf_size; rcvbuf_size = nc->rcvbuf_size; connect_int = nc->connect_int; rcu_read_unlock(); my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6)); memcpy(&src_in6, &tconn->my_addr, my_addr_len); if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6) src_in6.sin6_port = 0; else ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6)); memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len); what = "sock_create_kern"; err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family, SOCK_STREAM, IPPROTO_TCP, &sock); if (err < 0) { sock = NULL; goto out; } sock->sk->sk_rcvtimeo = sock->sk->sk_sndtimeo = connect_int * HZ; drbd_setbufsize(sock, sndbuf_size, rcvbuf_size); /* explicitly bind to the configured IP as source IP * for the outgoing connections. * This is needed for multihomed hosts and to be * able to use lo: interfaces for drbd. * Make sure to use 0 as port number, so linux selects * a free one dynamically. */ what = "bind before connect"; err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len); if (err < 0) goto out; /* connect may fail, peer not yet available. * stay C_WF_CONNECTION, don't go Disconnecting! */ disconnect_on_error = 0; what = "connect"; err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0); out: if (err < 0) { if (sock) { sock_release(sock); sock = NULL; } switch (-err) { /* timeout, busy, signal pending */ case ETIMEDOUT: case EAGAIN: case EINPROGRESS: case EINTR: case ERESTARTSYS: /* peer not (yet) available, network problem */ case ECONNREFUSED: case ENETUNREACH: case EHOSTDOWN: case EHOSTUNREACH: disconnect_on_error = 0; break; default: conn_err(tconn, "%s failed, err = %d\n", what, err); } if (disconnect_on_error) conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); } return sock; } struct accept_wait_data { struct drbd_tconn *tconn; struct socket *s_listen; struct completion door_bell; void (*original_sk_state_change)(struct sock *sk); }; static void drbd_incoming_connection(struct sock *sk) { struct accept_wait_data *ad = sk->sk_user_data; void (*state_change)(struct sock *sk); state_change = ad->original_sk_state_change; if (sk->sk_state == TCP_ESTABLISHED) complete(&ad->door_bell); state_change(sk); } static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad) { int err, sndbuf_size, rcvbuf_size, my_addr_len; struct sockaddr_in6 my_addr; struct socket *s_listen; struct net_conf *nc; const char *what; rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); if (!nc) { rcu_read_unlock(); return -EIO; } sndbuf_size = nc->sndbuf_size; rcvbuf_size = nc->rcvbuf_size; rcu_read_unlock(); my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6)); memcpy(&my_addr, &tconn->my_addr, my_addr_len); what = "sock_create_kern"; err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family, SOCK_STREAM, IPPROTO_TCP, &s_listen); if (err) { s_listen = NULL; goto out; } s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size); what = "bind before listen"; err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len); if (err < 0) goto out; ad->s_listen = s_listen; write_lock_bh(&s_listen->sk->sk_callback_lock); ad->original_sk_state_change = s_listen->sk->sk_state_change; s_listen->sk->sk_state_change = drbd_incoming_connection; s_listen->sk->sk_user_data = ad; write_unlock_bh(&s_listen->sk->sk_callback_lock); what = "listen"; err = s_listen->ops->listen(s_listen, 5); if (err < 0) goto out; return 0; out: if (s_listen) sock_release(s_listen); if (err < 0) { if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { conn_err(tconn, "%s failed, err = %d\n", what, err); conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); } } return -EIO; } static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad) { write_lock_bh(&sk->sk_callback_lock); sk->sk_state_change = ad->original_sk_state_change; sk->sk_user_data = NULL; write_unlock_bh(&sk->sk_callback_lock); } static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad) { int timeo, connect_int, err = 0; struct socket *s_estab = NULL; struct net_conf *nc; rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); if (!nc) { rcu_read_unlock(); return NULL; } connect_int = nc->connect_int; rcu_read_unlock(); timeo = connect_int * HZ; /* 28.5% random jitter */ timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7; err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo); if (err <= 0) return NULL; err = kernel_accept(ad->s_listen, &s_estab, 0); if (err < 0) { if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { conn_err(tconn, "accept failed, err = %d\n", err); conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); } } if (s_estab) unregister_state_change(s_estab->sk, ad); return s_estab; } static int decode_header(struct drbd_tconn *, void *, struct packet_info *); static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock, enum drbd_packet cmd) { if (!conn_prepare_command(tconn, sock)) return -EIO; return conn_send_command(tconn, sock, cmd, 0, NULL, 0); } static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock) { unsigned int header_size = drbd_header_size(tconn); struct packet_info pi; int err; err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0); if (err != header_size) { if (err >= 0) err = -EIO; return err; } err = decode_header(tconn, tconn->data.rbuf, &pi); if (err) return err; return pi.cmd; } /** * drbd_socket_okay() - Free the socket if its connection is not okay * @sock: pointer to the pointer to the socket. */ static int drbd_socket_okay(struct socket **sock) { int rr; char tb[4]; if (!*sock) return false; rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); if (rr > 0 || rr == -EAGAIN) { return true; } else { sock_release(*sock); *sock = NULL; return false; } } /* Gets called if a connection is established, or if a new minor gets created in a connection */ int drbd_connected(struct drbd_conf *mdev) { int err; atomic_set(&mdev->packet_seq, 0); mdev->peer_seq = 0; mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ? &mdev->tconn->cstate_mutex : &mdev->own_state_mutex; err = drbd_send_sync_param(mdev); if (!err) err = drbd_send_sizes(mdev, 0, 0); if (!err) err = drbd_send_uuids(mdev); if (!err) err = drbd_send_current_state(mdev); clear_bit(USE_DEGR_WFC_T, &mdev->flags); clear_bit(RESIZE_PENDING, &mdev->flags); atomic_set(&mdev->ap_in_flight, 0); mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ return err; } /* * return values: * 1 yes, we have a valid connection * 0 oops, did not work out, please try again * -1 peer talks different language, * no point in trying again, please go standalone. * -2 We do not have a network config... */ static int conn_connect(struct drbd_tconn *tconn) { struct drbd_socket sock, msock; struct drbd_conf *mdev; struct net_conf *nc; int vnr, timeout, h, ok; bool discard_my_data; enum drbd_state_rv rv; struct accept_wait_data ad = { .tconn = tconn, .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell), }; clear_bit(DISCONNECT_SENT, &tconn->flags); if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS) return -2; mutex_init(&sock.mutex); sock.sbuf = tconn->data.sbuf; sock.rbuf = tconn->data.rbuf; sock.socket = NULL; mutex_init(&msock.mutex); msock.sbuf = tconn->meta.sbuf; msock.rbuf = tconn->meta.rbuf; msock.socket = NULL; /* Assume that the peer only understands protocol 80 until we know better. */ tconn->agreed_pro_version = 80; if (prepare_listen_socket(tconn, &ad)) return 0; do { struct socket *s; s = drbd_try_connect(tconn); if (s) { if (!sock.socket) { sock.socket = s; send_first_packet(tconn, &sock, P_INITIAL_DATA); } else if (!msock.socket) { clear_bit(RESOLVE_CONFLICTS, &tconn->flags); msock.socket = s; send_first_packet(tconn, &msock, P_INITIAL_META); } else { conn_err(tconn, "Logic error in conn_connect()\n"); goto out_release_sockets; } } if (sock.socket && msock.socket) { rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); timeout = nc->ping_timeo * HZ / 10; rcu_read_unlock(); schedule_timeout_interruptible(timeout); ok = drbd_socket_okay(&sock.socket); ok = drbd_socket_okay(&msock.socket) && ok; if (ok) break; } retry: s = drbd_wait_for_connect(tconn, &ad); if (s) { int fp = receive_first_packet(tconn, s); drbd_socket_okay(&sock.socket); drbd_socket_okay(&msock.socket); switch (fp) { case P_INITIAL_DATA: if (sock.socket) { conn_warn(tconn, "initial packet S crossed\n"); sock_release(sock.socket); sock.socket = s; goto randomize; } sock.socket = s; break; case P_INITIAL_META: set_bit(RESOLVE_CONFLICTS, &tconn->flags); if (msock.socket) { conn_warn(tconn, "initial packet M crossed\n"); sock_release(msock.socket); msock.socket = s; goto randomize; } msock.socket = s; break; default: conn_warn(tconn, "Error receiving initial packet\n"); sock_release(s); randomize: if (prandom_u32() & 1) goto retry; } } if (tconn->cstate <= C_DISCONNECTING) goto out_release_sockets; if (signal_pending(current)) { flush_signals(current); smp_rmb(); if (get_t_state(&tconn->receiver) == EXITING) goto out_release_sockets; } ok = drbd_socket_okay(&sock.socket); ok = drbd_socket_okay(&msock.socket) && ok; } while (!ok); if (ad.s_listen) sock_release(ad.s_listen); sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ sock.socket->sk->sk_allocation = GFP_NOIO; msock.socket->sk->sk_allocation = GFP_NOIO; sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE; /* NOT YET ... * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10; * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; * first set it to the P_CONNECTION_FEATURES timeout, * which we set to 4x the configured ping_timeout. */ rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); sock.socket->sk->sk_sndtimeo = sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10; msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ; timeout = nc->timeout * HZ / 10; discard_my_data = nc->discard_my_data; rcu_read_unlock(); msock.socket->sk->sk_sndtimeo = timeout; /* we don't want delays. * we use TCP_CORK where appropriate, though */ drbd_tcp_nodelay(sock.socket); drbd_tcp_nodelay(msock.socket); tconn->data.socket = sock.socket; tconn->meta.socket = msock.socket; tconn->last_received = jiffies; h = drbd_do_features(tconn); if (h <= 0) return h; if (tconn->cram_hmac_tfm) { /* drbd_request_state(mdev, NS(conn, WFAuth)); */ switch (drbd_do_auth(tconn)) { case -1: conn_err(tconn, "Authentication of peer failed\n"); return -1; case 0: conn_err(tconn, "Authentication of peer failed, trying again.\n"); return 0; } } tconn->data.socket->sk->sk_sndtimeo = timeout; tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; if (drbd_send_protocol(tconn) == -EOPNOTSUPP) return -1; set_bit(STATE_SENT, &tconn->flags); rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { kref_get(&mdev->kref); /* Prevent a race between resync-handshake and * being promoted to Primary. * * Grab and release the state mutex, so we know that any current * drbd_set_role() is finished, and any incoming drbd_set_role * will see the STATE_SENT flag, and wait for it to be cleared. */ mutex_lock(mdev->state_mutex); mutex_unlock(mdev->state_mutex); rcu_read_unlock(); if (discard_my_data) set_bit(DISCARD_MY_DATA, &mdev->flags); else clear_bit(DISCARD_MY_DATA, &mdev->flags); drbd_connected(mdev); kref_put(&mdev->kref, &drbd_minor_destroy); rcu_read_lock(); } rcu_read_unlock(); rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE); if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) { clear_bit(STATE_SENT, &tconn->flags); return 0; } drbd_thread_start(&tconn->asender); mutex_lock(&tconn->conf_update); /* The discard_my_data flag is a single-shot modifier to the next * connection attempt, the handshake of which is now well underway. * No need for rcu style copying of the whole struct * just to clear a single value. */ tconn->net_conf->discard_my_data = 0; mutex_unlock(&tconn->conf_update); return h; out_release_sockets: if (ad.s_listen) sock_release(ad.s_listen); if (sock.socket) sock_release(sock.socket); if (msock.socket) sock_release(msock.socket); return -1; } static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi) { unsigned int header_size = drbd_header_size(tconn); if (header_size == sizeof(struct p_header100) && *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) { struct p_header100 *h = header; if (h->pad != 0) { conn_err(tconn, "Header padding is not zero\n"); return -EINVAL; } pi->vnr = be16_to_cpu(h->volume); pi->cmd = be16_to_cpu(h->command); pi->size = be32_to_cpu(h->length); } else if (header_size == sizeof(struct p_header95) && *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) { struct p_header95 *h = header; pi->cmd = be16_to_cpu(h->command); pi->size = be32_to_cpu(h->length); pi->vnr = 0; } else if (header_size == sizeof(struct p_header80) && *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) { struct p_header80 *h = header; pi->cmd = be16_to_cpu(h->command); pi->size = be16_to_cpu(h->length); pi->vnr = 0; } else { conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n", be32_to_cpu(*(__be32 *)header), tconn->agreed_pro_version); return -EINVAL; } pi->data = header + header_size; return 0; } static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi) { void *buffer = tconn->data.rbuf; int err; err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn)); if (err) return err; err = decode_header(tconn, buffer, pi); tconn->last_received = jiffies; return err; } static void drbd_flush(struct drbd_tconn *tconn) { int rv; struct drbd_conf *mdev; int vnr; if (tconn->write_ordering >= WO_bdev_flush) { rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { if (!get_ldev(mdev)) continue; kref_get(&mdev->kref); rcu_read_unlock(); rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_NOIO, NULL); if (rv) { dev_info(DEV, "local disk flush failed with status %d\n", rv); /* would rather check on EOPNOTSUPP, but that is not reliable. * don't try again for ANY return value != 0 * if (rv == -EOPNOTSUPP) */ drbd_bump_write_ordering(tconn, WO_drain_io); } put_ldev(mdev); kref_put(&mdev->kref, &drbd_minor_destroy); rcu_read_lock(); if (rv) break; } rcu_read_unlock(); } } /** * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. * @mdev: DRBD device. * @epoch: Epoch object. * @ev: Epoch event. */ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn, struct drbd_epoch *epoch, enum epoch_event ev) { int epoch_size; struct drbd_epoch *next_epoch; enum finish_epoch rv = FE_STILL_LIVE; spin_lock(&tconn->epoch_lock); do { next_epoch = NULL; epoch_size = atomic_read(&epoch->epoch_size); switch (ev & ~EV_CLEANUP) { case EV_PUT: atomic_dec(&epoch->active); break; case EV_GOT_BARRIER_NR: set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); break; case EV_BECAME_LAST: /* nothing to do*/ break; } if (epoch_size != 0 && atomic_read(&epoch->active) == 0 && (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { if (!(ev & EV_CLEANUP)) { spin_unlock(&tconn->epoch_lock); drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size); spin_lock(&tconn->epoch_lock); } #if 0 /* FIXME: dec unacked on connection, once we have * something to count pending connection packets in. */ if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) dec_unacked(epoch->tconn); #endif if (tconn->current_epoch != epoch) { next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); list_del(&epoch->list); ev = EV_BECAME_LAST | (ev & EV_CLEANUP); tconn->epochs--; kfree(epoch); if (rv == FE_STILL_LIVE) rv = FE_DESTROYED; } else { epoch->flags = 0; atomic_set(&epoch->epoch_size, 0); /* atomic_set(&epoch->active, 0); is already zero */ if (rv == FE_STILL_LIVE) rv = FE_RECYCLED; } } if (!next_epoch) break; epoch = next_epoch; } while (1); spin_unlock(&tconn->epoch_lock); return rv; } /** * drbd_bump_write_ordering() - Fall back to an other write ordering method * @tconn: DRBD connection. * @wo: Write ordering method to try. */ void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo) { struct disk_conf *dc; struct drbd_conf *mdev; enum write_ordering_e pwo; int vnr; static char *write_ordering_str[] = { [WO_none] = "none", [WO_drain_io] = "drain", [WO_bdev_flush] = "flush", }; pwo = tconn->write_ordering; wo = min(pwo, wo); rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { if (!get_ldev_if_state(mdev, D_ATTACHING)) continue; dc = rcu_dereference(mdev->ldev->disk_conf); if (wo == WO_bdev_flush && !dc->disk_flushes) wo = WO_drain_io; if (wo == WO_drain_io && !dc->disk_drain) wo = WO_none; put_ldev(mdev); } rcu_read_unlock(); tconn->write_ordering = wo; if (pwo != tconn->write_ordering || wo == WO_bdev_flush) conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]); } /** * drbd_submit_peer_request() * @mdev: DRBD device. * @peer_req: peer request * @rw: flag field, see bio->bi_rw * * May spread the pages to multiple bios, * depending on bio_add_page restrictions. * * Returns 0 if all bios have been submitted, * -ENOMEM if we could not allocate enough bios, * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a * single page to an empty bio (which should never happen and likely indicates * that the lower level IO stack is in some way broken). This has been observed * on certain Xen deployments. */ /* TODO allocate from our own bio_set. */ int drbd_submit_peer_request(struct drbd_conf *mdev, struct drbd_peer_request *peer_req, const unsigned rw, const int fault_type) { struct bio *bios = NULL; struct bio *bio; struct page *page = peer_req->pages; sector_t sector = peer_req->i.sector; unsigned ds = peer_req->i.size; unsigned n_bios = 0; unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; int err = -ENOMEM; /* In most cases, we will only need one bio. But in case the lower * level restrictions happen to be different at this offset on this * side than those of the sending peer, we may need to submit the * request in more than one bio. * * Plain bio_alloc is good enough here, this is no DRBD internally * generated bio, but a bio allocated on behalf of the peer. */ next_bio: bio = bio_alloc(GFP_NOIO, nr_pages); if (!bio) { dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); goto fail; } /* > peer_req->i.sector, unless this is the first bio */ bio->bi_sector = sector; bio->bi_bdev = mdev->ldev->backing_bdev; bio->bi_rw = rw; bio->bi_private = peer_req; bio->bi_end_io = drbd_peer_request_endio; bio->bi_next = bios; bios = bio; ++n_bios; page_chain_for_each(page) { unsigned len = min_t(unsigned, ds, PAGE_SIZE); if (!bio_add_page(bio, page, len, 0)) { /* A single page must always be possible! * But in case it fails anyways, * we deal with it, and complain (below). */ if (bio->bi_vcnt == 0) { dev_err(DEV, "bio_add_page failed for len=%u, " "bi_vcnt=0 (bi_sector=%llu)\n", len, (unsigned long long)bio->bi_sector); err = -ENOSPC; goto fail; } goto next_bio; } ds -= len; sector += len >> 9; --nr_pages; } D_ASSERT(page == NULL); D_ASSERT(ds == 0); atomic_set(&peer_req->pending_bios, n_bios); do { bio = bios; bios = bios->bi_next; bio->bi_next = NULL; drbd_generic_make_request(mdev, fault_type, bio); } while (bios); return 0; fail: while (bios) { bio = bios; bios = bios->bi_next; bio_put(bio); } return err; } static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) { struct drbd_interval *i = &peer_req->i; drbd_remove_interval(&mdev->write_requests, i); drbd_clear_interval(i); /* Wake up any processes waiting for this peer request to complete. */ if (i->waiting) wake_up(&mdev->misc_wait); } void conn_wait_active_ee_empty(struct drbd_tconn *tconn) { struct drbd_conf *mdev; int vnr; rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { kref_get(&mdev->kref); rcu_read_unlock(); drbd_wait_ee_list_empty(mdev, &mdev->active_ee); kref_put(&mdev->kref, &drbd_minor_destroy); rcu_read_lock(); } rcu_read_unlock(); } static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi) { int rv; struct p_barrier *p = pi->data; struct drbd_epoch *epoch; /* FIXME these are unacked on connection, * not a specific (peer)device. */ tconn->current_epoch->barrier_nr = p->barrier; tconn->current_epoch->tconn = tconn; rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR); /* P_BARRIER_ACK may imply that the corresponding extent is dropped from * the activity log, which means it would not be resynced in case the * R_PRIMARY crashes now. * Therefore we must send the barrier_ack after the barrier request was * completed. */ switch (tconn->write_ordering) { case WO_none: if (rv == FE_RECYCLED) return 0; /* receiver context, in the writeout path of the other node. * avoid potential distributed deadlock */ epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); if (epoch) break; else conn_warn(tconn, "Allocation of an epoch failed, slowing down\n"); /* Fall through */ case WO_bdev_flush: case WO_drain_io: conn_wait_active_ee_empty(tconn); drbd_flush(tconn); if (atomic_read(&tconn->current_epoch->epoch_size)) { epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); if (epoch) break; } return 0; default: conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering); return -EIO; } epoch->flags = 0; atomic_set(&epoch->epoch_size, 0); atomic_set(&epoch->active, 0); spin_lock(&tconn->epoch_lock); if (atomic_read(&tconn->current_epoch->epoch_size)) { list_add(&epoch->list, &tconn->current_epoch->list); tconn->current_epoch = epoch; tconn->epochs++; } else { /* The current_epoch got recycled while we allocated this one... */ kfree(epoch); } spin_unlock(&tconn->epoch_lock); return 0; } /* used from receive_RSDataReply (recv_resync_read) * and from receive_Data */ static struct drbd_peer_request * read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) { const sector_t capacity = drbd_get_capacity(mdev->this_bdev); struct drbd_peer_request *peer_req; struct page *page; int dgs, ds, err; void *dig_in = mdev->tconn->int_dig_in; void *dig_vv = mdev->tconn->int_dig_vv; unsigned long *data; dgs = 0; if (mdev->tconn->peer_integrity_tfm) { dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm); /* * FIXME: Receive the incoming digest into the receive buffer * here, together with its struct p_data? */ err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs); if (err) return NULL; data_size -= dgs; } if (!expect(IS_ALIGNED(data_size, 512))) return NULL; if (!expect(data_size <= DRBD_MAX_BIO_SIZE)) return NULL; /* even though we trust out peer, * we sometimes have to double check. */ if (sector + (data_size>>9) > capacity) { dev_err(DEV, "request from peer beyond end of local disk: " "capacity: %llus < sector: %llus + size: %u\n", (unsigned long long)capacity, (unsigned long long)sector, data_size); return NULL; } /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO); if (!peer_req) return NULL; if (!data_size) return peer_req; ds = data_size; page = peer_req->pages; page_chain_for_each(page) { unsigned len = min_t(int, ds, PAGE_SIZE); data = kmap(page); err = drbd_recv_all_warn(mdev->tconn, data, len); if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { dev_err(DEV, "Fault injection: Corrupting data on receive\n"); data[0] = data[0] ^ (unsigned long)-1; } kunmap(page); if (err) { drbd_free_peer_req(mdev, peer_req); return NULL; } ds -= len; } if (dgs) { drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv); if (memcmp(dig_in, dig_vv, dgs)) { dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", (unsigned long long)sector, data_size); drbd_free_peer_req(mdev, peer_req); return NULL; } } mdev->recv_cnt += data_size>>9; return peer_req; } /* drbd_drain_block() just takes a data block * out of the socket input buffer, and discards it. */ static int drbd_drain_block(struct drbd_conf *mdev, int data_size) { struct page *page; int err = 0; void *data; if (!data_size) return 0; page = drbd_alloc_pages(mdev, 1, 1); data = kmap(page); while (data_size) { unsigned int len = min_t(int, data_size, PAGE_SIZE); err = drbd_recv_all_warn(mdev->tconn, data, len); if (err) break; data_size -= len; } kunmap(page); drbd_free_pages(mdev, page, 0); return err; } static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, sector_t sector, int data_size) { struct bio_vec *bvec; struct bio *bio; int dgs, err, i, expect; void *dig_in = mdev->tconn->int_dig_in; void *dig_vv = mdev->tconn->int_dig_vv; dgs = 0; if (mdev->tconn->peer_integrity_tfm) { dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm); err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs); if (err) return err; data_size -= dgs; } /* optimistically update recv_cnt. if receiving fails below, * we disconnect anyways, and counters will be reset. */ mdev->recv_cnt += data_size>>9; bio = req->master_bio; D_ASSERT(sector == bio->bi_sector); bio_for_each_segment(bvec, bio, i) { void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; expect = min_t(int, data_size, bvec->bv_len); err = drbd_recv_all_warn(mdev->tconn, mapped, expect); kunmap(bvec->bv_page); if (err) return err; data_size -= expect; } if (dgs) { drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv); if (memcmp(dig_in, dig_vv, dgs)) { dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); return -EINVAL; } } D_ASSERT(data_size == 0); return 0; } /* * e_end_resync_block() is called in asender context via * drbd_finish_peer_reqs(). */ static int e_end_resync_block(struct drbd_work *w, int unused) { struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); struct drbd_conf *mdev = w->mdev; sector_t sector = peer_req->i.sector; int err; D_ASSERT(drbd_interval_empty(&peer_req->i)); if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { drbd_set_in_sync(mdev, sector, peer_req->i.size); err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req); } else { /* Record failure to sync */ drbd_rs_failed_io(mdev, sector, peer_req->i.size); err = drbd_send_ack(mdev, P_NEG_ACK, peer_req); } dec_unacked(mdev); return err; } static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) { struct drbd_peer_request *peer_req; peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size); if (!peer_req) goto fail; dec_rs_pending(mdev); inc_unacked(mdev); /* corresponding dec_unacked() in e_end_resync_block() * respective _drbd_clear_done_ee */ peer_req->w.cb = e_end_resync_block; spin_lock_irq(&mdev->tconn->req_lock); list_add(&peer_req->w.list, &mdev->sync_ee); spin_unlock_irq(&mdev->tconn->req_lock); atomic_add(data_size >> 9, &mdev->rs_sect_ev); if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0) return 0; /* don't care for the reason here */ dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->tconn->req_lock); list_del(&peer_req->w.list); spin_unlock_irq(&mdev->tconn->req_lock); drbd_free_peer_req(mdev, peer_req); fail: put_ldev(mdev); return -EIO; } static struct drbd_request * find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id, sector_t sector, bool missing_ok, const char *func) { struct drbd_request *req; /* Request object according to our peer */ req = (struct drbd_request *)(unsigned long)id; if (drbd_contains_interval(root, sector, &req->i) && req->i.local) return req; if (!missing_ok) { dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func, (unsigned long)id, (unsigned long long)sector); } return NULL; } static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct drbd_request *req; sector_t sector; int err; struct p_data *p = pi->data; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; sector = be64_to_cpu(p->sector); spin_lock_irq(&mdev->tconn->req_lock); req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__); spin_unlock_irq(&mdev->tconn->req_lock); if (unlikely(!req)) return -EIO; /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid * special casing it there for the various failure cases. * still no race with drbd_fail_pending_reads */ err = recv_dless_read(mdev, req, sector, pi->size); if (!err) req_mod(req, DATA_RECEIVED); /* else: nothing. handled from drbd_disconnect... * I don't think we may complete this just yet * in case we are "on-disconnect: freeze" */ return err; } static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; sector_t sector; int err; struct p_data *p = pi->data; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; sector = be64_to_cpu(p->sector); D_ASSERT(p->block_id == ID_SYNCER); if (get_ldev(mdev)) { /* data is submitted to disk within recv_resync_read. * corresponding put_ldev done below on error, * or in drbd_peer_request_endio. */ err = recv_resync_read(mdev, sector, pi->size); } else { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Can not write resync data to local disk.\n"); err = drbd_drain_block(mdev, pi->size); drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size); } atomic_add(pi->size >> 9, &mdev->rs_sect_in); return err; } static void restart_conflicting_writes(struct drbd_conf *mdev, sector_t sector, int size) { struct drbd_interval *i; struct drbd_request *req; drbd_for_each_overlap(i, &mdev->write_requests, sector, size) { if (!i->local) continue; req = container_of(i, struct drbd_request, i); if (req->rq_state & RQ_LOCAL_PENDING || !(req->rq_state & RQ_POSTPONED)) continue; /* as it is RQ_POSTPONED, this will cause it to * be queued on the retry workqueue. */ __req_mod(req, CONFLICT_RESOLVED, NULL); } } /* * e_end_block() is called in asender context via drbd_finish_peer_reqs(). */ static int e_end_block(struct drbd_work *w, int cancel) { struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); struct drbd_conf *mdev = w->mdev; sector_t sector = peer_req->i.sector; int err = 0, pcmd; if (peer_req->flags & EE_SEND_WRITE_ACK) { if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { pcmd = (mdev->state.conn >= C_SYNC_SOURCE && mdev->state.conn <= C_PAUSED_SYNC_T && peer_req->flags & EE_MAY_SET_IN_SYNC) ? P_RS_WRITE_ACK : P_WRITE_ACK; err = drbd_send_ack(mdev, pcmd, peer_req); if (pcmd == P_RS_WRITE_ACK) drbd_set_in_sync(mdev, sector, peer_req->i.size); } else { err = drbd_send_ack(mdev, P_NEG_ACK, peer_req); /* we expect it to be marked out of sync anyways... * maybe assert this? */ } dec_unacked(mdev); } /* we delete from the conflict detection hash _after_ we sent out the * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ if (peer_req->flags & EE_IN_INTERVAL_TREE) { spin_lock_irq(&mdev->tconn->req_lock); D_ASSERT(!drbd_interval_empty(&peer_req->i)); drbd_remove_epoch_entry_interval(mdev, peer_req); if (peer_req->flags & EE_RESTART_REQUESTS) restart_conflicting_writes(mdev, sector, peer_req->i.size); spin_unlock_irq(&mdev->tconn->req_lock); } else D_ASSERT(drbd_interval_empty(&peer_req->i)); drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); return err; } static int e_send_ack(struct drbd_work *w, enum drbd_packet ack) { struct drbd_conf *mdev = w->mdev; struct drbd_peer_request *peer_req = container_of(w, struct drbd_peer_request, w); int err; err = drbd_send_ack(mdev, ack, peer_req); dec_unacked(mdev); return err; } static int e_send_superseded(struct drbd_work *w, int unused) { return e_send_ack(w, P_SUPERSEDED); } static int e_send_retry_write(struct drbd_work *w, int unused) { struct drbd_tconn *tconn = w->mdev->tconn; return e_send_ack(w, tconn->agreed_pro_version >= 100 ? P_RETRY_WRITE : P_SUPERSEDED); } static bool seq_greater(u32 a, u32 b) { /* * We assume 32-bit wrap-around here. * For 24-bit wrap-around, we would have to shift: * a <<= 8; b <<= 8; */ return (s32)a - (s32)b > 0; } static u32 seq_max(u32 a, u32 b) { return seq_greater(a, b) ? a : b; } static bool need_peer_seq(struct drbd_conf *mdev) { struct drbd_tconn *tconn = mdev->tconn; int tp; /* * We only need to keep track of the last packet_seq number of our peer * if we are in dual-primary mode and we have the resolve-conflicts flag set; see * handle_write_conflicts(). */ rcu_read_lock(); tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; rcu_read_unlock(); return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags); } static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq) { unsigned int newest_peer_seq; if (need_peer_seq(mdev)) { spin_lock(&mdev->peer_seq_lock); newest_peer_seq = seq_max(mdev->peer_seq, peer_seq); mdev->peer_seq = newest_peer_seq; spin_unlock(&mdev->peer_seq_lock); /* wake up only if we actually changed mdev->peer_seq */ if (peer_seq == newest_peer_seq) wake_up(&mdev->seq_wait); } } static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2) { return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9))); } /* maybe change sync_ee into interval trees as well? */ static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) { struct drbd_peer_request *rs_req; bool rv = 0; spin_lock_irq(&mdev->tconn->req_lock); list_for_each_entry(rs_req, &mdev->sync_ee, w.list) { if (overlaps(peer_req->i.sector, peer_req->i.size, rs_req->i.sector, rs_req->i.size)) { rv = 1; break; } } spin_unlock_irq(&mdev->tconn->req_lock); return rv; } /* Called from receive_Data. * Synchronize packets on sock with packets on msock. * * This is here so even when a P_DATA packet traveling via sock overtook an Ack * packet traveling on msock, they are still processed in the order they have * been sent. * * Note: we don't care for Ack packets overtaking P_DATA packets. * * In case packet_seq is larger than mdev->peer_seq number, there are * outstanding packets on the msock. We wait for them to arrive. * In case we are the logically next packet, we update mdev->peer_seq * ourselves. Correctly handles 32bit wrap around. * * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have * 1<<9 == 512 seconds aka ages for the 32bit wrap around... * * returns 0 if we may process the packet, * -ERESTARTSYS if we were interrupted (by disconnect signal). */ static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq) { DEFINE_WAIT(wait); long timeout; int ret; if (!need_peer_seq(mdev)) return 0; spin_lock(&mdev->peer_seq_lock); for (;;) { if (!seq_greater(peer_seq - 1, mdev->peer_seq)) { mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); ret = 0; break; } if (signal_pending(current)) { ret = -ERESTARTSYS; break; } prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); spin_unlock(&mdev->peer_seq_lock); rcu_read_lock(); timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10; rcu_read_unlock(); timeout = schedule_timeout(timeout); spin_lock(&mdev->peer_seq_lock); if (!timeout) { ret = -ETIMEDOUT; dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n"); break; } } spin_unlock(&mdev->peer_seq_lock); finish_wait(&mdev->seq_wait, &wait); return ret; } /* see also bio_flags_to_wire() * DRBD_REQ_*, because we need to semantically map the flags to data packet * flags and back. We may replicate to other kernel versions. */ static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) { return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | (dpf & DP_FUA ? REQ_FUA : 0) | (dpf & DP_FLUSH ? REQ_FLUSH : 0) | (dpf & DP_DISCARD ? REQ_DISCARD : 0); } static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector, unsigned int size) { struct drbd_interval *i; repeat: drbd_for_each_overlap(i, &mdev->write_requests, sector, size) { struct drbd_request *req; struct bio_and_error m; if (!i->local) continue; req = container_of(i, struct drbd_request, i); if (!(req->rq_state & RQ_POSTPONED)) continue; req->rq_state &= ~RQ_POSTPONED; __req_mod(req, NEG_ACKED, &m); spin_unlock_irq(&mdev->tconn->req_lock); if (m.bio) complete_master_bio(mdev, &m); spin_lock_irq(&mdev->tconn->req_lock); goto repeat; } } static int handle_write_conflicts(struct drbd_conf *mdev, struct drbd_peer_request *peer_req) { struct drbd_tconn *tconn = mdev->tconn; bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags); sector_t sector = peer_req->i.sector; const unsigned int size = peer_req->i.size; struct drbd_interval *i; bool equal; int err; /* * Inserting the peer request into the write_requests tree will prevent * new conflicting local requests from being added. */ drbd_insert_interval(&mdev->write_requests, &peer_req->i); repeat: drbd_for_each_overlap(i, &mdev->write_requests, sector, size) { if (i == &peer_req->i) continue; if (!i->local) { /* * Our peer has sent a conflicting remote request; this * should not happen in a two-node setup. Wait for the * earlier peer request to complete. */ err = drbd_wait_misc(mdev, i); if (err) goto out; goto repeat; } equal = i->sector == sector && i->size == size; if (resolve_conflicts) { /* * If the peer request is fully contained within the * overlapping request, it can be considered overwritten * and thus superseded; otherwise, it will be retried * once all overlapping requests have completed. */ bool superseded = i->sector <= sector && i->sector + (i->size >> 9) >= sector + (size >> 9); if (!equal) dev_alert(DEV, "Concurrent writes detected: " "local=%llus +%u, remote=%llus +%u, " "assuming %s came first\n", (unsigned long long)i->sector, i->size, (unsigned long long)sector, size, superseded ? "local" : "remote"); inc_unacked(mdev); peer_req->w.cb = superseded ? e_send_superseded : e_send_retry_write; list_add_tail(&peer_req->w.list, &mdev->done_ee); wake_asender(mdev->tconn); err = -ENOENT; goto out; } else { struct drbd_request *req = container_of(i, struct drbd_request, i); if (!equal) dev_alert(DEV, "Concurrent writes detected: " "local=%llus +%u, remote=%llus +%u\n", (unsigned long long)i->sector, i->size, (unsigned long long)sector, size); if (req->rq_state & RQ_LOCAL_PENDING || !(req->rq_state & RQ_POSTPONED)) { /* * Wait for the node with the discard flag to * decide if this request has been superseded * or needs to be retried. * Requests that have been superseded will * disappear from the write_requests tree. * * In addition, wait for the conflicting * request to finish locally before submitting * the conflicting peer request. */ err = drbd_wait_misc(mdev, &req->i); if (err) { _conn_request_state(mdev->tconn, NS(conn, C_TIMEOUT), CS_HARD); fail_postponed_requests(mdev, sector, size); goto out; } goto repeat; } /* * Remember to restart the conflicting requests after * the new peer request has completed. */ peer_req->flags |= EE_RESTART_REQUESTS; } } err = 0; out: if (err) drbd_remove_epoch_entry_interval(mdev, peer_req); return err; } /* mirrored write */ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; sector_t sector; struct drbd_peer_request *peer_req; struct p_data *p = pi->data; u32 peer_seq = be32_to_cpu(p->seq_num); int rw = WRITE; u32 dp_flags; int err, tp; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; if (!get_ldev(mdev)) { int err2; err = wait_for_and_update_peer_seq(mdev, peer_seq); drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size); atomic_inc(&tconn->current_epoch->epoch_size); err2 = drbd_drain_block(mdev, pi->size); if (!err) err = err2; return err; } /* * Corresponding put_ldev done either below (on various errors), or in * drbd_peer_request_endio, if we successfully submit the data at the * end of this function. */ sector = be64_to_cpu(p->sector); peer_req = read_in_block(mdev, p->block_id, sector, pi->size); if (!peer_req) { put_ldev(mdev); return -EIO; } peer_req->w.cb = e_end_block; dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(mdev, dp_flags); if (peer_req->pages == NULL) { D_ASSERT(peer_req->i.size == 0); D_ASSERT(dp_flags & DP_FLUSH); } if (dp_flags & DP_MAY_SET_IN_SYNC) peer_req->flags |= EE_MAY_SET_IN_SYNC; spin_lock(&tconn->epoch_lock); peer_req->epoch = tconn->current_epoch; atomic_inc(&peer_req->epoch->epoch_size); atomic_inc(&peer_req->epoch->active); spin_unlock(&tconn->epoch_lock); rcu_read_lock(); tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries; rcu_read_unlock(); if (tp) { peer_req->flags |= EE_IN_INTERVAL_TREE; err = wait_for_and_update_peer_seq(mdev, peer_seq); if (err) goto out_interrupted; spin_lock_irq(&mdev->tconn->req_lock); err = handle_write_conflicts(mdev, peer_req); if (err) { spin_unlock_irq(&mdev->tconn->req_lock); if (err == -ENOENT) { put_ldev(mdev); return 0; } goto out_interrupted; } } else spin_lock_irq(&mdev->tconn->req_lock); list_add(&peer_req->w.list, &mdev->active_ee); spin_unlock_irq(&mdev->tconn->req_lock); if (mdev->state.conn == C_SYNC_TARGET) wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req)); if (mdev->tconn->agreed_pro_version < 100) { rcu_read_lock(); switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) { case DRBD_PROT_C: dp_flags |= DP_SEND_WRITE_ACK; break; case DRBD_PROT_B: dp_flags |= DP_SEND_RECEIVE_ACK; break; } rcu_read_unlock(); } if (dp_flags & DP_SEND_WRITE_ACK) { peer_req->flags |= EE_SEND_WRITE_ACK; inc_unacked(mdev); /* corresponding dec_unacked() in e_end_block() * respective _drbd_clear_done_ee */ } if (dp_flags & DP_SEND_RECEIVE_ACK) { /* I really don't like it that the receiver thread * sends on the msock, but anyways */ drbd_send_ack(mdev, P_RECV_ACK, peer_req); } if (mdev->state.pdsk < D_INCONSISTENT) { /* In case we have the only disk of the cluster, */ drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size); peer_req->flags |= EE_CALL_AL_COMPLETE_IO; peer_req->flags &= ~EE_MAY_SET_IN_SYNC; drbd_al_begin_io(mdev, &peer_req->i, true); } err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR); if (!err) return 0; /* don't care for the reason here */ dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->tconn->req_lock); list_del(&peer_req->w.list); drbd_remove_epoch_entry_interval(mdev, peer_req); spin_unlock_irq(&mdev->tconn->req_lock); if (peer_req->flags & EE_CALL_AL_COMPLETE_IO) drbd_al_complete_io(mdev, &peer_req->i); out_interrupted: drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP); put_ldev(mdev); drbd_free_peer_req(mdev, peer_req); return err; } /* We may throttle resync, if the lower device seems to be busy, * and current sync rate is above c_min_rate. * * To decide whether or not the lower device is busy, we use a scheme similar * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" * (more than 64 sectors) of activity we cannot account for with our own resync * activity, it obviously is "busy". * * The current sync rate used here uses only the most recent two step marks, * to have a short time average so we can react faster. */ int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) { struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; unsigned long db, dt, dbdt; struct lc_element *tmp; int curr_events; int throttle = 0; unsigned int c_min_rate; rcu_read_lock(); c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate; rcu_read_unlock(); /* feature disabled? */ if (c_min_rate == 0) return 0; spin_lock_irq(&mdev->al_lock); tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); if (tmp) { struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); if (test_bit(BME_PRIORITY, &bm_ext->flags)) { spin_unlock_irq(&mdev->al_lock); return 0; } /* Do not slow down if app IO is already waiting for this extent */ } spin_unlock_irq(&mdev->al_lock); curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + (int)part_stat_read(&disk->part0, sectors[1]) - atomic_read(&mdev->rs_sect_ev); if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { unsigned long rs_left; int i; mdev->rs_last_events = curr_events; /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, * approx. */ i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) rs_left = mdev->ov_left; else rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; if (!dt) dt++; db = mdev->rs_mark_left[i] - rs_left; dbdt = Bit2KB(db/dt); if (dbdt > c_min_rate) throttle = 1; } return throttle; } static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; sector_t sector; sector_t capacity; struct drbd_peer_request *peer_req; struct digest_info *di = NULL; int size, verb; unsigned int fault_type; struct p_block_req *p = pi->data; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; capacity = drbd_get_capacity(mdev->this_bdev); sector = be64_to_cpu(p->sector); size = be32_to_cpu(p->blksize); if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, (unsigned long long)sector, size); return -EINVAL; } if (sector + (size>>9) > capacity) { dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, (unsigned long long)sector, size); return -EINVAL; } if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { verb = 1; switch (pi->cmd) { case P_DATA_REQUEST: drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); break; case P_RS_DATA_REQUEST: case P_CSUM_RS_REQUEST: case P_OV_REQUEST: drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); break; case P_OV_REPLY: verb = 0; dec_rs_pending(mdev); drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); break; default: BUG(); } if (verb && __ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Can not satisfy peer's read request, " "no local data.\n"); /* drain possibly payload */ return drbd_drain_block(mdev, pi->size); } /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD * "criss-cross" setup, that might cause write-out on some other DRBD, * which in turn might block on the other node at this very place. */ peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO); if (!peer_req) { put_ldev(mdev); return -ENOMEM; } switch (pi->cmd) { case P_DATA_REQUEST: peer_req->w.cb = w_e_end_data_req; fault_type = DRBD_FAULT_DT_RD; /* application IO, don't drbd_rs_begin_io */ goto submit; case P_RS_DATA_REQUEST: peer_req->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; /* used in the sector offset progress display */ mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); break; case P_OV_REPLY: case P_CSUM_RS_REQUEST: fault_type = DRBD_FAULT_RS_RD; di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO); if (!di) goto out_free_e; di->digest_size = pi->size; di->digest = (((char *)di)+sizeof(struct digest_info)); peer_req->digest = di; peer_req->flags |= EE_HAS_DIGEST; if (drbd_recv_all(mdev->tconn, di->digest, pi->size)) goto out_free_e; if (pi->cmd == P_CSUM_RS_REQUEST) { D_ASSERT(mdev->tconn->agreed_pro_version >= 89); peer_req->w.cb = w_e_end_csum_rs_req; /* used in the sector offset progress display */ mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); } else if (pi->cmd == P_OV_REPLY) { /* track progress, we may need to throttle */ atomic_add(size >> 9, &mdev->rs_sect_in); peer_req->w.cb = w_e_end_ov_reply; dec_rs_pending(mdev); /* drbd_rs_begin_io done when we sent this request, * but accounting still needs to be done. */ goto submit_for_resync; } break; case P_OV_REQUEST: if (mdev->ov_start_sector == ~(sector_t)0 && mdev->tconn->agreed_pro_version >= 90) { unsigned long now = jiffies; int i; mdev->ov_start_sector = sector; mdev->ov_position = sector; mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); mdev->rs_total = mdev->ov_left; for (i = 0; i < DRBD_SYNC_MARKS; i++) { mdev->rs_mark_left[i] = mdev->ov_left; mdev->rs_mark_time[i] = now; } dev_info(DEV, "Online Verify start sector: %llu\n", (unsigned long long)sector); } peer_req->w.cb = w_e_end_ov_req; fault_type = DRBD_FAULT_RS_RD; break; default: BUG(); } /* Throttle, drbd_rs_begin_io and submit should become asynchronous * wrt the receiver, but it is not as straightforward as it may seem. * Various places in the resync start and stop logic assume resync * requests are processed in order, requeuing this on the worker thread * introduces a bunch of new code for synchronization between threads. * * Unlimited throttling before drbd_rs_begin_io may stall the resync * "forever", throttling after drbd_rs_begin_io will lock that extent * for application writes for the same time. For now, just throttle * here, where the rest of the code expects the receiver to sleep for * a while, anyways. */ /* Throttle before drbd_rs_begin_io, as that locks out application IO; * this defers syncer requests for some time, before letting at least * on request through. The resync controller on the receiving side * will adapt to the incoming rate accordingly. * * We cannot throttle here if remote is Primary/SyncTarget: * we would also throttle its application reads. * In that case, throttling is done on the SyncTarget only. */ if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) schedule_timeout_uninterruptible(HZ/10); if (drbd_rs_begin_io(mdev, sector)) goto out_free_e; submit_for_resync: atomic_add(size >> 9, &mdev->rs_sect_ev); submit: inc_unacked(mdev); spin_lock_irq(&mdev->tconn->req_lock); list_add_tail(&peer_req->w.list, &mdev->read_ee); spin_unlock_irq(&mdev->tconn->req_lock); if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0) return 0; /* don't care for the reason here */ dev_err(DEV, "submit failed, triggering re-connect\n"); spin_lock_irq(&mdev->tconn->req_lock); list_del(&peer_req->w.list); spin_unlock_irq(&mdev->tconn->req_lock); /* no drbd_rs_complete_io(), we are dropping the connection anyways */ out_free_e: put_ldev(mdev); drbd_free_peer_req(mdev, peer_req); return -EIO; } static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) { int self, peer, rv = -100; unsigned long ch_self, ch_peer; enum drbd_after_sb_p after_sb_0p; self = mdev->ldev->md.uuid[UI_BITMAP] & 1; peer = mdev->p_uuid[UI_BITMAP] & 1; ch_peer = mdev->p_uuid[UI_SIZE]; ch_self = mdev->comm_bm_set; rcu_read_lock(); after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p; rcu_read_unlock(); switch (after_sb_0p) { case ASB_CONSENSUS: case ASB_DISCARD_SECONDARY: case ASB_CALL_HELPER: case ASB_VIOLENTLY: dev_err(DEV, "Configuration error.\n"); break; case ASB_DISCONNECT: break; case ASB_DISCARD_YOUNGER_PRI: if (self == 0 && peer == 1) { rv = -1; break; } if (self == 1 && peer == 0) { rv = 1; break; } /* Else fall through to one of the other strategies... */ case ASB_DISCARD_OLDER_PRI: if (self == 0 && peer == 1) { rv = 1; break; } if (self == 1 && peer == 0) { rv = -1; break; } /* Else fall through to one of the other strategies... */ dev_warn(DEV, "Discard younger/older primary did not find a decision\n" "Using discard-least-changes instead\n"); case ASB_DISCARD_ZERO_CHG: if (ch_peer == 0 && ch_self == 0) { rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) ? -1 : 1; break; } else { if (ch_peer == 0) { rv = 1; break; } if (ch_self == 0) { rv = -1; break; } } if (after_sb_0p == ASB_DISCARD_ZERO_CHG) break; case ASB_DISCARD_LEAST_CHG: if (ch_self < ch_peer) rv = -1; else if (ch_self > ch_peer) rv = 1; else /* ( ch_self == ch_peer ) */ /* Well, then use something else. */ rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) ? -1 : 1; break; case ASB_DISCARD_LOCAL: rv = -1; break; case ASB_DISCARD_REMOTE: rv = 1; } return rv; } static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) { int hg, rv = -100; enum drbd_after_sb_p after_sb_1p; rcu_read_lock(); after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p; rcu_read_unlock(); switch (after_sb_1p) { case ASB_DISCARD_YOUNGER_PRI: case ASB_DISCARD_OLDER_PRI: case ASB_DISCARD_LEAST_CHG: case ASB_DISCARD_LOCAL: case ASB_DISCARD_REMOTE: case ASB_DISCARD_ZERO_CHG: dev_err(DEV, "Configuration error.\n"); break; case ASB_DISCONNECT: break; case ASB_CONSENSUS: hg = drbd_asb_recover_0p(mdev); if (hg == -1 && mdev->state.role == R_SECONDARY) rv = hg; if (hg == 1 && mdev->state.role == R_PRIMARY) rv = hg; break; case ASB_VIOLENTLY: rv = drbd_asb_recover_0p(mdev); break; case ASB_DISCARD_SECONDARY: return mdev->state.role == R_PRIMARY ? 1 : -1; case ASB_CALL_HELPER: hg = drbd_asb_recover_0p(mdev); if (hg == -1 && mdev->state.role == R_PRIMARY) { enum drbd_state_rv rv2; /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, * we might be here in C_WF_REPORT_PARAMS which is transient. * we do not need to wait for the after state change work either. */ rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); if (rv2 != SS_SUCCESS) { drbd_khelper(mdev, "pri-lost-after-sb"); } else { dev_warn(DEV, "Successfully gave up primary role.\n"); rv = hg; } } else rv = hg; } return rv; } static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) { int hg, rv = -100; enum drbd_after_sb_p after_sb_2p; rcu_read_lock(); after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p; rcu_read_unlock(); switch (after_sb_2p) { case ASB_DISCARD_YOUNGER_PRI: case ASB_DISCARD_OLDER_PRI: case ASB_DISCARD_LEAST_CHG: case ASB_DISCARD_LOCAL: case ASB_DISCARD_REMOTE: case ASB_CONSENSUS: case ASB_DISCARD_SECONDARY: case ASB_DISCARD_ZERO_CHG: dev_err(DEV, "Configuration error.\n"); break; case ASB_VIOLENTLY: rv = drbd_asb_recover_0p(mdev); break; case ASB_DISCONNECT: break; case ASB_CALL_HELPER: hg = drbd_asb_recover_0p(mdev); if (hg == -1) { enum drbd_state_rv rv2; /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, * we might be here in C_WF_REPORT_PARAMS which is transient. * we do not need to wait for the after state change work either. */ rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); if (rv2 != SS_SUCCESS) { drbd_khelper(mdev, "pri-lost-after-sb"); } else { dev_warn(DEV, "Successfully gave up primary role.\n"); rv = hg; } } else rv = hg; } return rv; } static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, u64 bits, u64 flags) { if (!uuid) { dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); return; } dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", text, (unsigned long long)uuid[UI_CURRENT], (unsigned long long)uuid[UI_BITMAP], (unsigned long long)uuid[UI_HISTORY_START], (unsigned long long)uuid[UI_HISTORY_END], (unsigned long long)bits, (unsigned long long)flags); } /* 100 after split brain try auto recover 2 C_SYNC_SOURCE set BitMap 1 C_SYNC_SOURCE use BitMap 0 no Sync -1 C_SYNC_TARGET use BitMap -2 C_SYNC_TARGET set BitMap -100 after split brain, disconnect -1000 unrelated data -1091 requires proto 91 -1096 requires proto 96 */ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) { u64 self, peer; int i, j; self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); *rule_nr = 10; if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) return 0; *rule_nr = 20; if ((self == UUID_JUST_CREATED || self == (u64)0) && peer != UUID_JUST_CREATED) return -2; *rule_nr = 30; if (self != UUID_JUST_CREATED && (peer == UUID_JUST_CREATED || peer == (u64)0)) return 2; if (self == peer) { int rct, dc; /* roles at crash time */ if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { if (mdev->tconn->agreed_pro_version < 91) return -1091; if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); drbd_uuid_move_history(mdev); mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; mdev->ldev->md.uuid[UI_BITMAP] = 0; drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); *rule_nr = 34; } else { dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); *rule_nr = 36; } return 1; } if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { if (mdev->tconn->agreed_pro_version < 91) return -1091; if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; mdev->p_uuid[UI_BITMAP] = 0UL; drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); *rule_nr = 35; } else { dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); *rule_nr = 37; } return -1; } /* Common power [off|failure] */ rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + (mdev->p_uuid[UI_FLAGS] & 2); /* lowest bit is set when we were primary, * next bit (weight 2) is set when peer was primary */ *rule_nr = 40; switch (rct) { case 0: /* !self_pri && !peer_pri */ return 0; case 1: /* self_pri && !peer_pri */ return 1; case 2: /* !self_pri && peer_pri */ return -1; case 3: /* self_pri && peer_pri */ dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags); return dc ? -1 : 1; } } *rule_nr = 50; peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); if (self == peer) return -1; *rule_nr = 51; peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); if (self == peer) { if (mdev->tconn->agreed_pro_version < 96 ? (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { /* The last P_SYNC_UUID did not get though. Undo the last start of resync as sync source modifications of the peer's UUIDs. */ if (mdev->tconn->agreed_pro_version < 91) return -1091; mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; dev_info(DEV, "Lost last syncUUID packet, corrected:\n"); drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); return -1; } } *rule_nr = 60; self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { peer = mdev->p_uuid[i] & ~((u64)1); if (self == peer) return -2; } *rule_nr = 70; self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); if (self == peer) return 1; *rule_nr = 71; self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); if (self == peer) { if (mdev->tconn->agreed_pro_version < 96 ? (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { /* The last P_SYNC_UUID did not get though. Undo the last start of resync as sync source modifications of our UUIDs. */ if (mdev->tconn->agreed_pro_version < 91) return -1091; __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); return 1; } } *rule_nr = 80; peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { self = mdev->ldev->md.uuid[i] & ~((u64)1); if (self == peer) return 2; } *rule_nr = 90; self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); if (self == peer && self != ((u64)0)) return 100; *rule_nr = 100; for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { self = mdev->ldev->md.uuid[i] & ~((u64)1); for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { peer = mdev->p_uuid[j] & ~((u64)1); if (self == peer) return -100; } } return -1000; } /* drbd_sync_handshake() returns the new conn state on success, or CONN_MASK (-1) on failure. */ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, enum drbd_disk_state peer_disk) __must_hold(local) { enum drbd_conns rv = C_MASK; enum drbd_disk_state mydisk; struct net_conf *nc; int hg, rule_nr, rr_conflict, tentative; mydisk = mdev->state.disk; if (mydisk == D_NEGOTIATING) mydisk = mdev->new_state_tmp.disk; dev_info(DEV, "drbd_sync_handshake:\n"); spin_lock_irq(&mdev->ldev->md.uuid_lock); drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); hg = drbd_uuid_compare(mdev, &rule_nr); spin_unlock_irq(&mdev->ldev->md.uuid_lock); dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); if (hg == -1000) { dev_alert(DEV, "Unrelated data, aborting!\n"); return C_MASK; } if (hg < -1000) { dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); return C_MASK; } if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { int f = (hg == -100) || abs(hg) == 2; hg = mydisk > D_INCONSISTENT ? 1 : -1; if (f) hg = hg*2; dev_info(DEV, "Becoming sync %s due to disk states.\n", hg > 0 ? "source" : "target"); } if (abs(hg) == 100) drbd_khelper(mdev, "initial-split-brain"); rcu_read_lock(); nc = rcu_dereference(mdev->tconn->net_conf); if (hg == 100 || (hg == -100 && nc->always_asbp)) { int pcount = (mdev->state.role == R_PRIMARY) + (peer_role == R_PRIMARY); int forced = (hg == -100); switch (pcount) { case 0: hg = drbd_asb_recover_0p(mdev); break; case 1: hg = drbd_asb_recover_1p(mdev); break; case 2: hg = drbd_asb_recover_2p(mdev); break; } if (abs(hg) < 100) { dev_warn(DEV, "Split-Brain detected, %d primaries, " "automatically solved. Sync from %s node\n", pcount, (hg < 0) ? "peer" : "this"); if (forced) { dev_warn(DEV, "Doing a full sync, since" " UUIDs where ambiguous.\n"); hg = hg*2; } } } if (hg == -100) { if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1)) hg = -1; if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1)) hg = 1; if (abs(hg) < 100) dev_warn(DEV, "Split-Brain detected, manually solved. " "Sync from %s node\n", (hg < 0) ? "peer" : "this"); } rr_conflict = nc->rr_conflict; tentative = nc->tentative; rcu_read_unlock(); if (hg == -100) { /* FIXME this log message is not correct if we end up here * after an attempted attach on a diskless node. * We just refuse to attach -- well, we drop the "connection" * to that disk, in a way... */ dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); drbd_khelper(mdev, "split-brain"); return C_MASK; } if (hg > 0 && mydisk <= D_INCONSISTENT) { dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); return C_MASK; } if (hg < 0 && /* by intention we do not use mydisk here. */ mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { switch (rr_conflict) { case ASB_CALL_HELPER: drbd_khelper(mdev, "pri-lost"); /* fall through */ case ASB_DISCONNECT: dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); return C_MASK; case ASB_VIOLENTLY: dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" "assumption\n"); } } if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) { if (hg == 0) dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); else dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), abs(hg) >= 2 ? "full" : "bit-map based"); return C_MASK; } if (abs(hg) >= 2) { dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", BM_LOCKED_SET_ALLOWED)) return C_MASK; } if (hg > 0) { /* become sync source. */ rv = C_WF_BITMAP_S; } else if (hg < 0) { /* become sync target */ rv = C_WF_BITMAP_T; } else { rv = C_CONNECTED; if (drbd_bm_total_weight(mdev)) { dev_info(DEV, "No resync, but %lu bits in bitmap!\n", drbd_bm_total_weight(mdev)); } } return rv; } static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer) { /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ if (peer == ASB_DISCARD_REMOTE) return ASB_DISCARD_LOCAL; /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ if (peer == ASB_DISCARD_LOCAL) return ASB_DISCARD_REMOTE; /* everything else is valid if they are equal on both sides. */ return peer; } static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi) { struct p_protocol *p = pi->data; enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; int p_proto, p_discard_my_data, p_two_primaries, cf; struct net_conf *nc, *old_net_conf, *new_net_conf = NULL; char integrity_alg[SHARED_SECRET_MAX] = ""; struct crypto_hash *peer_integrity_tfm = NULL; void *int_dig_in = NULL, *int_dig_vv = NULL; p_proto = be32_to_cpu(p->protocol); p_after_sb_0p = be32_to_cpu(p->after_sb_0p); p_after_sb_1p = be32_to_cpu(p->after_sb_1p); p_after_sb_2p = be32_to_cpu(p->after_sb_2p); p_two_primaries = be32_to_cpu(p->two_primaries); cf = be32_to_cpu(p->conn_flags); p_discard_my_data = cf & CF_DISCARD_MY_DATA; if (tconn->agreed_pro_version >= 87) { int err; if (pi->size > sizeof(integrity_alg)) return -EIO; err = drbd_recv_all(tconn, integrity_alg, pi->size); if (err) return err; integrity_alg[SHARED_SECRET_MAX - 1] = 0; } if (pi->cmd != P_PROTOCOL_UPDATE) { clear_bit(CONN_DRY_RUN, &tconn->flags); if (cf & CF_DRY_RUN) set_bit(CONN_DRY_RUN, &tconn->flags); rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); if (p_proto != nc->wire_protocol) { conn_err(tconn, "incompatible %s settings\n", "protocol"); goto disconnect_rcu_unlock; } if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) { conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri"); goto disconnect_rcu_unlock; } if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) { conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri"); goto disconnect_rcu_unlock; } if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) { conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri"); goto disconnect_rcu_unlock; } if (p_discard_my_data && nc->discard_my_data) { conn_err(tconn, "incompatible %s settings\n", "discard-my-data"); goto disconnect_rcu_unlock; } if (p_two_primaries != nc->two_primaries) { conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries"); goto disconnect_rcu_unlock; } if (strcmp(integrity_alg, nc->integrity_alg)) { conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg"); goto disconnect_rcu_unlock; } rcu_read_unlock(); } if (integrity_alg[0]) { int hash_size; /* * We can only change the peer data integrity algorithm * here. Changing our own data integrity algorithm * requires that we send a P_PROTOCOL_UPDATE packet at * the same time; otherwise, the peer has no way to * tell between which packets the algorithm should * change. */ peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC); if (!peer_integrity_tfm) { conn_err(tconn, "peer data-integrity-alg %s not supported\n", integrity_alg); goto disconnect; } hash_size = crypto_hash_digestsize(peer_integrity_tfm); int_dig_in = kmalloc(hash_size, GFP_KERNEL); int_dig_vv = kmalloc(hash_size, GFP_KERNEL); if (!(int_dig_in && int_dig_vv)) { conn_err(tconn, "Allocation of buffers for data integrity checking failed\n"); goto disconnect; } } new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); if (!new_net_conf) { conn_err(tconn, "Allocation of new net_conf failed\n"); goto disconnect; } mutex_lock(&tconn->data.mutex); mutex_lock(&tconn->conf_update); old_net_conf = tconn->net_conf; *new_net_conf = *old_net_conf; new_net_conf->wire_protocol = p_proto; new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p); new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p); new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p); new_net_conf->two_primaries = p_two_primaries; rcu_assign_pointer(tconn->net_conf, new_net_conf); mutex_unlock(&tconn->conf_update); mutex_unlock(&tconn->data.mutex); crypto_free_hash(tconn->peer_integrity_tfm); kfree(tconn->int_dig_in); kfree(tconn->int_dig_vv); tconn->peer_integrity_tfm = peer_integrity_tfm; tconn->int_dig_in = int_dig_in; tconn->int_dig_vv = int_dig_vv; if (strcmp(old_net_conf->integrity_alg, integrity_alg)) conn_info(tconn, "peer data-integrity-alg: %s\n", integrity_alg[0] ? integrity_alg : "(none)"); synchronize_rcu(); kfree(old_net_conf); return 0; disconnect_rcu_unlock: rcu_read_unlock(); disconnect: crypto_free_hash(peer_integrity_tfm); kfree(int_dig_in); kfree(int_dig_vv); conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); return -EIO; } /* helper function * input: alg name, feature name * return: NULL (alg name was "") * ERR_PTR(error) if something goes wrong * or the crypto hash ptr, if it worked out ok. */ struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, const char *alg, const char *name) { struct crypto_hash *tfm; if (!alg[0]) return NULL; tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", alg, name, PTR_ERR(tfm)); return tfm; } return tfm; } static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi) { void *buffer = tconn->data.rbuf; int size = pi->size; while (size) { int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE); s = drbd_recv(tconn, buffer, s); if (s <= 0) { if (s < 0) return s; break; } size -= s; } if (size) return -EIO; return 0; } /* * config_unknown_volume - device configuration command for unknown volume * * When a device is added to an existing connection, the node on which the * device is added first will send configuration commands to its peer but the * peer will not know about the device yet. It will warn and ignore these * commands. Once the device is added on the second node, the second node will * send the same device configuration commands, but in the other direction. * * (We can also end up here if drbd is misconfigured.) */ static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi) { conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n", cmdname(pi->cmd), pi->vnr); return ignore_remaining_packet(tconn, pi); } static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_rs_param_95 *p; unsigned int header_size, data_size, exp_max_sz; struct crypto_hash *verify_tfm = NULL; struct crypto_hash *csums_tfm = NULL; struct net_conf *old_net_conf, *new_net_conf = NULL; struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL; const int apv = tconn->agreed_pro_version; struct fifo_buffer *old_plan = NULL, *new_plan = NULL; int fifo_size = 0; int err; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return config_unknown_volume(tconn, pi); exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) : apv == 88 ? sizeof(struct p_rs_param) + SHARED_SECRET_MAX : apv <= 94 ? sizeof(struct p_rs_param_89) : /* apv >= 95 */ sizeof(struct p_rs_param_95); if (pi->size > exp_max_sz) { dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", pi->size, exp_max_sz); return -EIO; } if (apv <= 88) { header_size = sizeof(struct p_rs_param); data_size = pi->size - header_size; } else if (apv <= 94) { header_size = sizeof(struct p_rs_param_89); data_size = pi->size - header_size; D_ASSERT(data_size == 0); } else { header_size = sizeof(struct p_rs_param_95); data_size = pi->size - header_size; D_ASSERT(data_size == 0); } /* initialize verify_alg and csums_alg */ p = pi->data; memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); err = drbd_recv_all(mdev->tconn, p, header_size); if (err) return err; mutex_lock(&mdev->tconn->conf_update); old_net_conf = mdev->tconn->net_conf; if (get_ldev(mdev)) { new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); if (!new_disk_conf) { put_ldev(mdev); mutex_unlock(&mdev->tconn->conf_update); dev_err(DEV, "Allocation of new disk_conf failed\n"); return -ENOMEM; } old_disk_conf = mdev->ldev->disk_conf; *new_disk_conf = *old_disk_conf; new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate); } if (apv >= 88) { if (apv == 88) { if (data_size > SHARED_SECRET_MAX || data_size == 0) { dev_err(DEV, "verify-alg of wrong size, " "peer wants %u, accepting only up to %u byte\n", data_size, SHARED_SECRET_MAX); err = -EIO; goto reconnect; } err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size); if (err) goto reconnect; /* we expect NUL terminated string */ /* but just in case someone tries to be evil */ D_ASSERT(p->verify_alg[data_size-1] == 0); p->verify_alg[data_size-1] = 0; } else /* apv >= 89 */ { /* we still expect NUL terminated strings */ /* but just in case someone tries to be evil */ D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); p->verify_alg[SHARED_SECRET_MAX-1] = 0; p->csums_alg[SHARED_SECRET_MAX-1] = 0; } if (strcmp(old_net_conf->verify_alg, p->verify_alg)) { if (mdev->state.conn == C_WF_REPORT_PARAMS) { dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", old_net_conf->verify_alg, p->verify_alg); goto disconnect; } verify_tfm = drbd_crypto_alloc_digest_safe(mdev, p->verify_alg, "verify-alg"); if (IS_ERR(verify_tfm)) { verify_tfm = NULL; goto disconnect; } } if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) { if (mdev->state.conn == C_WF_REPORT_PARAMS) { dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", old_net_conf->csums_alg, p->csums_alg); goto disconnect; } csums_tfm = drbd_crypto_alloc_digest_safe(mdev, p->csums_alg, "csums-alg"); if (IS_ERR(csums_tfm)) { csums_tfm = NULL; goto disconnect; } } if (apv > 94 && new_disk_conf) { new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead); new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target); new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target); new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate); fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ; if (fifo_size != mdev->rs_plan_s->size) { new_plan = fifo_alloc(fifo_size); if (!new_plan) { dev_err(DEV, "kmalloc of fifo_buffer failed"); put_ldev(mdev); goto disconnect; } } } if (verify_tfm || csums_tfm) { new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); if (!new_net_conf) { dev_err(DEV, "Allocation of new net_conf failed\n"); goto disconnect; } *new_net_conf = *old_net_conf; if (verify_tfm) { strcpy(new_net_conf->verify_alg, p->verify_alg); new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1; crypto_free_hash(mdev->tconn->verify_tfm); mdev->tconn->verify_tfm = verify_tfm; dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); } if (csums_tfm) { strcpy(new_net_conf->csums_alg, p->csums_alg); new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1; crypto_free_hash(mdev->tconn->csums_tfm); mdev->tconn->csums_tfm = csums_tfm; dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); } rcu_assign_pointer(tconn->net_conf, new_net_conf); } } if (new_disk_conf) { rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf); put_ldev(mdev); } if (new_plan) { old_plan = mdev->rs_plan_s; rcu_assign_pointer(mdev->rs_plan_s, new_plan); } mutex_unlock(&mdev->tconn->conf_update); synchronize_rcu(); if (new_net_conf) kfree(old_net_conf); kfree(old_disk_conf); kfree(old_plan); return 0; reconnect: if (new_disk_conf) { put_ldev(mdev); kfree(new_disk_conf); } mutex_unlock(&mdev->tconn->conf_update); return -EIO; disconnect: kfree(new_plan); if (new_disk_conf) { put_ldev(mdev); kfree(new_disk_conf); } mutex_unlock(&mdev->tconn->conf_update); /* just for completeness: actually not needed, * as this is not reached if csums_tfm was ok. */ crypto_free_hash(csums_tfm); /* but free the verify_tfm again, if csums_tfm did not work out */ crypto_free_hash(verify_tfm); conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); return -EIO; } /* warn if the arguments differ by more than 12.5% */ static void warn_if_differ_considerably(struct drbd_conf *mdev, const char *s, sector_t a, sector_t b) { sector_t d; if (a == 0 || b == 0) return; d = (a > b) ? (a - b) : (b - a); if (d > (a>>3) || d > (b>>3)) dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, (unsigned long long)a, (unsigned long long)b); } static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_sizes *p = pi->data; enum determine_dev_size dd = unchanged; sector_t p_size, p_usize, my_usize; int ldsc = 0; /* local disk size changed */ enum dds_flags ddsf; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return config_unknown_volume(tconn, pi); p_size = be64_to_cpu(p->d_size); p_usize = be64_to_cpu(p->u_size); /* just store the peer's disk size for now. * we still need to figure out whether we accept that. */ mdev->p_size = p_size; if (get_ldev(mdev)) { rcu_read_lock(); my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size; rcu_read_unlock(); warn_if_differ_considerably(mdev, "lower level device sizes", p_size, drbd_get_max_capacity(mdev->ldev)); warn_if_differ_considerably(mdev, "user requested size", p_usize, my_usize); /* if this is the first connect, or an otherwise expected * param exchange, choose the minimum */ if (mdev->state.conn == C_WF_REPORT_PARAMS) p_usize = min_not_zero(my_usize, p_usize); /* Never shrink a device with usable data during connect. But allow online shrinking if we are connected. */ if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) < drbd_get_capacity(mdev->this_bdev) && mdev->state.disk >= D_OUTDATED && mdev->state.conn < C_CONNECTED) { dev_err(DEV, "The peer's disk size is too small!\n"); conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); put_ldev(mdev); return -EIO; } if (my_usize != p_usize) { struct disk_conf *old_disk_conf, *new_disk_conf = NULL; new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL); if (!new_disk_conf) { dev_err(DEV, "Allocation of new disk_conf failed\n"); put_ldev(mdev); return -ENOMEM; } mutex_lock(&mdev->tconn->conf_update); old_disk_conf = mdev->ldev->disk_conf; *new_disk_conf = *old_disk_conf; new_disk_conf->disk_size = p_usize; rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf); mutex_unlock(&mdev->tconn->conf_update); synchronize_rcu(); kfree(old_disk_conf); dev_info(DEV, "Peer sets u_size to %lu sectors\n", (unsigned long)my_usize); } put_ldev(mdev); } ddsf = be16_to_cpu(p->dds_flags); if (get_ldev(mdev)) { dd = drbd_determine_dev_size(mdev, ddsf); put_ldev(mdev); if (dd == dev_size_error) return -EIO; drbd_md_sync(mdev); } else { /* I am diskless, need to accept the peer's size. */ drbd_set_my_capacity(mdev, p_size); } mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size); drbd_reconsider_max_bio_size(mdev); if (get_ldev(mdev)) { if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); ldsc = 1; } put_ldev(mdev); } if (mdev->state.conn > C_WF_REPORT_PARAMS) { if (be64_to_cpu(p->c_size) != drbd_get_capacity(mdev->this_bdev) || ldsc) { /* we have different sizes, probably peer * needs to know my new size... */ drbd_send_sizes(mdev, 0, ddsf); } if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || (dd == grew && mdev->state.conn == C_CONNECTED)) { if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.disk >= D_INCONSISTENT) { if (ddsf & DDSF_NO_RESYNC) dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); else resync_after_online_grow(mdev); } else set_bit(RESYNC_AFTER_NEG, &mdev->flags); } } return 0; } static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_uuids *p = pi->data; u64 *p_uuid; int i, updated_uuids = 0; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return config_unknown_volume(tconn, pi); p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); if (!p_uuid) { dev_err(DEV, "kmalloc of p_uuid failed\n"); return false; } for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) p_uuid[i] = be64_to_cpu(p->uuid[i]); kfree(mdev->p_uuid); mdev->p_uuid = p_uuid; if (mdev->state.conn < C_CONNECTED && mdev->state.disk < D_INCONSISTENT && mdev->state.role == R_PRIMARY && (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", (unsigned long long)mdev->ed_uuid); conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); return -EIO; } if (get_ldev(mdev)) { int skip_initial_sync = mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 && mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && (p_uuid[UI_FLAGS] & 8); if (skip_initial_sync) { dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from receive_uuids", BM_LOCKED_TEST_ALLOWED); _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); _drbd_uuid_set(mdev, UI_BITMAP, 0); _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), CS_VERBOSE, NULL); drbd_md_sync(mdev); updated_uuids = 1; } put_ldev(mdev); } else if (mdev->state.disk < D_INCONSISTENT && mdev->state.role == R_PRIMARY) { /* I am a diskless primary, the peer just created a new current UUID for me. */ updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); } /* Before we test for the disk state, we should wait until an eventually ongoing cluster wide state change is finished. That is important if we are primary and are detaching from our disk. We need to see the new disk state... */ mutex_lock(mdev->state_mutex); mutex_unlock(mdev->state_mutex); if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); if (updated_uuids) drbd_print_uuids(mdev, "receiver updated UUIDs to"); return 0; } /** * convert_state() - Converts the peer's view of the cluster state to our point of view * @ps: The state as seen by the peer. */ static union drbd_state convert_state(union drbd_state ps) { union drbd_state ms; static enum drbd_conns c_tab[] = { [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS, [C_CONNECTED] = C_CONNECTED, [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ [C_VERIFY_S] = C_VERIFY_T, [C_MASK] = C_MASK, }; ms.i = ps.i; ms.conn = c_tab[ps.conn]; ms.peer = ps.role; ms.role = ps.peer; ms.pdsk = ps.disk; ms.disk = ps.pdsk; ms.peer_isp = (ps.aftr_isp | ps.user_isp); return ms; } static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_req_state *p = pi->data; union drbd_state mask, val; enum drbd_state_rv rv; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; mask.i = be32_to_cpu(p->mask); val.i = be32_to_cpu(p->val); if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) && mutex_is_locked(mdev->state_mutex)) { drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); return 0; } mask = convert_state(mask); val = convert_state(val); rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); drbd_send_sr_reply(mdev, rv); drbd_md_sync(mdev); return 0; } static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi) { struct p_req_state *p = pi->data; union drbd_state mask, val; enum drbd_state_rv rv; mask.i = be32_to_cpu(p->mask); val.i = be32_to_cpu(p->val); if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) && mutex_is_locked(&tconn->cstate_mutex)) { conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG); return 0; } mask = convert_state(mask); val = convert_state(val); rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL); conn_send_sr_reply(tconn, rv); return 0; } static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_state *p = pi->data; union drbd_state os, ns, peer_state; enum drbd_disk_state real_peer_disk; enum chg_state_flags cs_flags; int rv; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return config_unknown_volume(tconn, pi); peer_state.i = be32_to_cpu(p->state); real_peer_disk = peer_state.disk; if (peer_state.disk == D_NEGOTIATING) { real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); } spin_lock_irq(&mdev->tconn->req_lock); retry: os = ns = drbd_read_state(mdev); spin_unlock_irq(&mdev->tconn->req_lock); /* If some other part of the code (asender thread, timeout) * already decided to close the connection again, * we must not "re-establish" it here. */ if (os.conn <= C_TEAR_DOWN) return -ECONNRESET; /* If this is the "end of sync" confirmation, usually the peer disk * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits * set) resync started in PausedSyncT, or if the timing of pause-/ * unpause-sync events has been "just right", the peer disk may * transition from D_CONSISTENT to D_UP_TO_DATE as well. */ if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) && real_peer_disk == D_UP_TO_DATE && os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { /* If we are (becoming) SyncSource, but peer is still in sync * preparation, ignore its uptodate-ness to avoid flapping, it * will change to inconsistent once the peer reaches active * syncing states. * It may have changed syncer-paused flags, however, so we * cannot ignore this completely. */ if (peer_state.conn > C_CONNECTED && peer_state.conn < C_SYNC_SOURCE) real_peer_disk = D_INCONSISTENT; /* if peer_state changes to connected at the same time, * it explicitly notifies us that it finished resync. * Maybe we should finish it up, too? */ else if (os.conn >= C_SYNC_SOURCE && peer_state.conn == C_CONNECTED) { if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) drbd_resync_finished(mdev); return 0; } } /* explicit verify finished notification, stop sector reached. */ if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE && peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) { ov_out_of_sync_print(mdev); drbd_resync_finished(mdev); return 0; } /* peer says his disk is inconsistent, while we think it is uptodate, * and this happens while the peer still thinks we have a sync going on, * but we think we are already done with the sync. * We ignore this to avoid flapping pdsk. * This should not happen, if the peer is a recent version of drbd. */ if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) real_peer_disk = D_UP_TO_DATE; if (ns.conn == C_WF_REPORT_PARAMS) ns.conn = C_CONNECTED; if (peer_state.conn == C_AHEAD) ns.conn = C_BEHIND; if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && get_ldev_if_state(mdev, D_NEGOTIATING)) { int cr; /* consider resync */ /* if we established a new connection */ cr = (os.conn < C_CONNECTED); /* if we had an established connection * and one of the nodes newly attaches a disk */ cr |= (os.conn == C_CONNECTED && (peer_state.disk == D_NEGOTIATING || os.disk == D_NEGOTIATING)); /* if we have both been inconsistent, and the peer has been * forced to be UpToDate with --overwrite-data */ cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); /* if we had been plain connected, and the admin requested to * start a sync by "invalidate" or "invalidate-remote" */ cr |= (os.conn == C_CONNECTED && (peer_state.conn >= C_STARTING_SYNC_S && peer_state.conn <= C_WF_BITMAP_T)); if (cr) ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); put_ldev(mdev); if (ns.conn == C_MASK) { ns.conn = C_CONNECTED; if (mdev->state.disk == D_NEGOTIATING) { drbd_force_state(mdev, NS(disk, D_FAILED)); } else if (peer_state.disk == D_NEGOTIATING) { dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); peer_state.disk = D_DISKLESS; real_peer_disk = D_DISKLESS; } else { if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags)) return -EIO; D_ASSERT(os.conn == C_WF_REPORT_PARAMS); conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); return -EIO; } } } spin_lock_irq(&mdev->tconn->req_lock); if (os.i != drbd_read_state(mdev).i) goto retry; clear_bit(CONSIDER_RESYNC, &mdev->flags); ns.peer = peer_state.role; ns.pdsk = real_peer_disk; ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) ns.disk = mdev->new_state_tmp.disk; cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && test_bit(NEW_CUR_UUID, &mdev->flags)) { /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this for temporal network outages! */ spin_unlock_irq(&mdev->tconn->req_lock); dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); tl_clear(mdev->tconn); drbd_uuid_new_current(mdev); clear_bit(NEW_CUR_UUID, &mdev->flags); conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD); return -EIO; } rv = _drbd_set_state(mdev, ns, cs_flags, NULL); ns = drbd_read_state(mdev); spin_unlock_irq(&mdev->tconn->req_lock); if (rv < SS_SUCCESS) { conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD); return -EIO; } if (os.conn > C_WF_REPORT_PARAMS) { if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && peer_state.disk != D_NEGOTIATING ) { /* we want resync, peer has not yet decided to sync... */ /* Nowadays only used when forcing a node into primary role and setting its disk to UpToDate with that */ drbd_send_uuids(mdev); drbd_send_current_state(mdev); } } clear_bit(DISCARD_MY_DATA, &mdev->flags); drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */ return 0; } static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_rs_uuid *p = pi->data; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; wait_event(mdev->misc_wait, mdev->state.conn == C_WF_SYNC_UUID || mdev->state.conn == C_BEHIND || mdev->state.conn < C_CONNECTED || mdev->state.disk < D_NEGOTIATING); /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ /* Here the _drbd_uuid_ functions are right, current should _not_ be rotated into the history */ if (get_ldev_if_state(mdev, D_NEGOTIATING)) { _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); _drbd_uuid_set(mdev, UI_BITMAP, 0UL); drbd_print_uuids(mdev, "updated sync uuid"); drbd_start_resync(mdev, C_SYNC_TARGET); put_ldev(mdev); } else dev_err(DEV, "Ignoring SyncUUID packet!\n"); return 0; } /** * receive_bitmap_plain * * Return 0 when done, 1 when another iteration is needed, and a negative error * code upon failure. */ static int receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size, unsigned long *p, struct bm_xfer_ctx *c) { unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(mdev->tconn); unsigned int num_words = min_t(size_t, data_size / sizeof(*p), c->bm_words - c->word_offset); unsigned int want = num_words * sizeof(*p); int err; if (want != size) { dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size); return -EIO; } if (want == 0) return 0; err = drbd_recv_all(mdev->tconn, p, want); if (err) return err; drbd_bm_merge_lel(mdev, c->word_offset, num_words, p); c->word_offset += num_words; c->bit_offset = c->word_offset * BITS_PER_LONG; if (c->bit_offset > c->bm_bits) c->bit_offset = c->bm_bits; return 1; } static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p) { return (enum drbd_bitmap_code)(p->encoding & 0x0f); } static int dcbp_get_start(struct p_compressed_bm *p) { return (p->encoding & 0x80) != 0; } static int dcbp_get_pad_bits(struct p_compressed_bm *p) { return (p->encoding >> 4) & 0x7; } /** * recv_bm_rle_bits * * Return 0 when done, 1 when another iteration is needed, and a negative error * code upon failure. */ static int recv_bm_rle_bits(struct drbd_conf *mdev, struct p_compressed_bm *p, struct bm_xfer_ctx *c, unsigned int len) { struct bitstream bs; u64 look_ahead; u64 rl; u64 tmp; unsigned long s = c->bit_offset; unsigned long e; int toggle = dcbp_get_start(p); int have; int bits; bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p)); bits = bitstream_get_bits(&bs, &look_ahead, 64); if (bits < 0) return -EIO; for (have = bits; have > 0; s += rl, toggle = !toggle) { bits = vli_decode_bits(&rl, look_ahead); if (bits <= 0) return -EIO; if (toggle) { e = s + rl -1; if (e >= c->bm_bits) { dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); return -EIO; } _drbd_bm_set_bits(mdev, s, e); } if (have < bits) { dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", have, bits, look_ahead, (unsigned int)(bs.cur.b - p->code), (unsigned int)bs.buf_len); return -EIO; } look_ahead >>= bits; have -= bits; bits = bitstream_get_bits(&bs, &tmp, 64 - have); if (bits < 0) return -EIO; look_ahead |= tmp << have; have += bits; } c->bit_offset = s; bm_xfer_ctx_bit_to_word_offset(c); return (s != c->bm_bits); } /** * decode_bitmap_c * * Return 0 when done, 1 when another iteration is needed, and a negative error * code upon failure. */ static int decode_bitmap_c(struct drbd_conf *mdev, struct p_compressed_bm *p, struct bm_xfer_ctx *c, unsigned int len) { if (dcbp_get_code(p) == RLE_VLI_Bits) return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p)); /* other variants had been implemented for evaluation, * but have been dropped as this one turned out to be "best" * during all our tests. */ dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); return -EIO; } void INFO_bm_xfer_stats(struct drbd_conf *mdev, const char *direction, struct bm_xfer_ctx *c) { /* what would it take to transfer it "plaintext" */ unsigned int header_size = drbd_header_size(mdev->tconn); unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size; unsigned int plain = header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) + c->bm_words * sizeof(unsigned long); unsigned int total = c->bytes[0] + c->bytes[1]; unsigned int r; /* total can not be zero. but just in case: */ if (total == 0) return; /* don't report if not compressed */ if (total >= plain) return; /* total < plain. check for overflow, still */ r = (total > UINT_MAX/1000) ? (total / (plain/1000)) : (1000 * total / plain); if (r > 1000) r = 1000; r = 1000 - r; dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " "total %u; compression: %u.%u%%\n", direction, c->bytes[1], c->packets[1], c->bytes[0], c->packets[0], total, r/10, r % 10); } /* Since we are processing the bitfield from lower addresses to higher, it does not matter if the process it in 32 bit chunks or 64 bit chunks as long as it is little endian. (Understand it as byte stream, beginning with the lowest byte...) If we would use big endian we would need to process it from the highest address to the lowest, in order to be agnostic to the 32 vs 64 bits issue. returns 0 on failure, 1 if we successfully received it. */ static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct bm_xfer_ctx c; int err; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); /* you are supposed to send additional out-of-sync information * if you actually set bits during this phase */ c = (struct bm_xfer_ctx) { .bm_bits = drbd_bm_bits(mdev), .bm_words = drbd_bm_words(mdev), }; for(;;) { if (pi->cmd == P_BITMAP) err = receive_bitmap_plain(mdev, pi->size, pi->data, &c); else if (pi->cmd == P_COMPRESSED_BITMAP) { /* MAYBE: sanity check that we speak proto >= 90, * and the feature is enabled! */ struct p_compressed_bm *p = pi->data; if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) { dev_err(DEV, "ReportCBitmap packet too large\n"); err = -EIO; goto out; } if (pi->size <= sizeof(*p)) { dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size); err = -EIO; goto out; } err = drbd_recv_all(mdev->tconn, p, pi->size); if (err) goto out; err = decode_bitmap_c(mdev, p, &c, pi->size); } else { dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd); err = -EIO; goto out; } c.packets[pi->cmd == P_BITMAP]++; c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size; if (err <= 0) { if (err < 0) goto out; break; } err = drbd_recv_header(mdev->tconn, pi); if (err) goto out; } INFO_bm_xfer_stats(mdev, "receive", &c); if (mdev->state.conn == C_WF_BITMAP_T) { enum drbd_state_rv rv; err = drbd_send_bitmap(mdev); if (err) goto out; /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); D_ASSERT(rv == SS_SUCCESS); } else if (mdev->state.conn != C_WF_BITMAP_S) { /* admin may have requested C_DISCONNECTING, * other threads may have noticed network errors */ dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", drbd_conn_str(mdev->state.conn)); } err = 0; out: drbd_bm_unlock(mdev); if (!err && mdev->state.conn == C_WF_BITMAP_S) drbd_start_resync(mdev, C_SYNC_SOURCE); return err; } static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi) { conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n", pi->cmd, pi->size); return ignore_remaining_packet(tconn, pi); } static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi) { /* Make sure we've acked all the TCP data associated * with the data requests being unplugged */ drbd_tcp_quickack(tconn->data.socket); return 0; } static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_block_desc *p = pi->data; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; switch (mdev->state.conn) { case C_WF_SYNC_UUID: case C_WF_BITMAP_T: case C_BEHIND: break; default: dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", drbd_conn_str(mdev->state.conn)); } drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); return 0; } struct data_cmd { int expect_payload; size_t pkt_size; int (*fn)(struct drbd_tconn *, struct packet_info *); }; static struct data_cmd drbd_cmd_handler[] = { [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , [P_BITMAP] = { 1, 0, receive_bitmap } , [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } , [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote }, [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, [P_SYNC_PARAM] = { 1, 0, receive_SyncParam }, [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam }, [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, [P_STATE] = { 0, sizeof(struct p_state), receive_state }, [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state }, [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol }, }; static void drbdd(struct drbd_tconn *tconn) { struct packet_info pi; size_t shs; /* sub header size */ int err; while (get_t_state(&tconn->receiver) == RUNNING) { struct data_cmd *cmd; drbd_thread_current_set_cpu(&tconn->receiver); if (drbd_recv_header(tconn, &pi)) goto err_out; cmd = &drbd_cmd_handler[pi.cmd]; if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) { conn_err(tconn, "Unexpected data packet %s (0x%04x)", cmdname(pi.cmd), pi.cmd); goto err_out; } shs = cmd->pkt_size; if (pi.size > shs && !cmd->expect_payload) { conn_err(tconn, "No payload expected %s l:%d\n", cmdname(pi.cmd), pi.size); goto err_out; } if (shs) { err = drbd_recv_all_warn(tconn, pi.data, shs); if (err) goto err_out; pi.size -= shs; } err = cmd->fn(tconn, &pi); if (err) { conn_err(tconn, "error receiving %s, e: %d l: %d!\n", cmdname(pi.cmd), err, pi.size); goto err_out; } } return; err_out: conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); } void conn_flush_workqueue(struct drbd_tconn *tconn) { struct drbd_wq_barrier barr; barr.w.cb = w_prev_work_done; barr.w.tconn = tconn; init_completion(&barr.done); drbd_queue_work(&tconn->sender_work, &barr.w); wait_for_completion(&barr.done); } static void conn_disconnect(struct drbd_tconn *tconn) { struct drbd_conf *mdev; enum drbd_conns oc; int vnr; if (tconn->cstate == C_STANDALONE) return; /* We are about to start the cleanup after connection loss. * Make sure drbd_make_request knows about that. * Usually we should be in some network failure state already, * but just in case we are not, we fix it up here. */ conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); /* asender does not clean up anything. it must not interfere, either */ drbd_thread_stop(&tconn->asender); drbd_free_sock(tconn); rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { kref_get(&mdev->kref); rcu_read_unlock(); drbd_disconnected(mdev); kref_put(&mdev->kref, &drbd_minor_destroy); rcu_read_lock(); } rcu_read_unlock(); if (!list_empty(&tconn->current_epoch->list)) conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n"); /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ atomic_set(&tconn->current_epoch->epoch_size, 0); tconn->send.seen_any_write_yet = false; conn_info(tconn, "Connection closed\n"); if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN) conn_try_outdate_peer_async(tconn); spin_lock_irq(&tconn->req_lock); oc = tconn->cstate; if (oc >= C_UNCONNECTED) _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE); spin_unlock_irq(&tconn->req_lock); if (oc == C_DISCONNECTING) conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD); } static int drbd_disconnected(struct drbd_conf *mdev) { unsigned int i; /* wait for current activity to cease. */ spin_lock_irq(&mdev->tconn->req_lock); _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); spin_unlock_irq(&mdev->tconn->req_lock); /* We do not have data structures that would allow us to * get the rs_pending_cnt down to 0 again. * * On C_SYNC_TARGET we do not have any data structures describing * the pending RSDataRequest's we have sent. * * On C_SYNC_SOURCE there is no data structure that tracks * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. * And no, it is not the sum of the reference counts in the * resync_LRU. The resync_LRU tracks the whole operation including * the disk-IO, while the rs_pending_cnt only tracks the blocks * on the fly. */ drbd_rs_cancel_all(mdev); mdev->rs_total = 0; mdev->rs_failed = 0; atomic_set(&mdev->rs_pending_cnt, 0); wake_up(&mdev->misc_wait); del_timer_sync(&mdev->resync_timer); resync_timer_fn((unsigned long)mdev); /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, * w_make_resync_request etc. which may still be on the worker queue * to be "canceled" */ drbd_flush_workqueue(mdev); drbd_finish_peer_reqs(mdev); /* This second workqueue flush is necessary, since drbd_finish_peer_reqs() might have issued a work again. The one before drbd_finish_peer_reqs() is necessary to reclain net_ee in drbd_finish_peer_reqs(). */ drbd_flush_workqueue(mdev); /* need to do it again, drbd_finish_peer_reqs() may have populated it * again via drbd_try_clear_on_disk_bm(). */ drbd_rs_cancel_all(mdev); kfree(mdev->p_uuid); mdev->p_uuid = NULL; if (!drbd_suspended(mdev)) tl_clear(mdev->tconn); drbd_md_sync(mdev); /* serialize with bitmap writeout triggered by the state change, * if any. */ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); /* tcp_close and release of sendpage pages can be deferred. I don't * want to use SO_LINGER, because apparently it can be deferred for * more than 20 seconds (longest time I checked). * * Actually we don't care for exactly when the network stack does its * put_page(), but release our reference on these pages right here. */ i = drbd_free_peer_reqs(mdev, &mdev->net_ee); if (i) dev_info(DEV, "net_ee not empty, killed %u entries\n", i); i = atomic_read(&mdev->pp_in_use_by_net); if (i) dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); i = atomic_read(&mdev->pp_in_use); if (i) dev_info(DEV, "pp_in_use = %d, expected 0\n", i); D_ASSERT(list_empty(&mdev->read_ee)); D_ASSERT(list_empty(&mdev->active_ee)); D_ASSERT(list_empty(&mdev->sync_ee)); D_ASSERT(list_empty(&mdev->done_ee)); return 0; } /* * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version * we can agree on is stored in agreed_pro_version. * * feature flags and the reserved array should be enough room for future * enhancements of the handshake protocol, and possible plugins... * * for now, they are expected to be zero, but ignored. */ static int drbd_send_features(struct drbd_tconn *tconn) { struct drbd_socket *sock; struct p_connection_features *p; sock = &tconn->data; p = conn_prepare_command(tconn, sock); if (!p) return -EIO; memset(p, 0, sizeof(*p)); p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0); } /* * return values: * 1 yes, we have a valid connection * 0 oops, did not work out, please try again * -1 peer talks different language, * no point in trying again, please go standalone. */ static int drbd_do_features(struct drbd_tconn *tconn) { /* ASSERT current == tconn->receiver ... */ struct p_connection_features *p; const int expect = sizeof(struct p_connection_features); struct packet_info pi; int err; err = drbd_send_features(tconn); if (err) return 0; err = drbd_recv_header(tconn, &pi); if (err) return 0; if (pi.cmd != P_CONNECTION_FEATURES) { conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n", cmdname(pi.cmd), pi.cmd); return -1; } if (pi.size != expect) { conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n", expect, pi.size); return -1; } p = pi.data; err = drbd_recv_all_warn(tconn, p, expect); if (err) return 0; p->protocol_min = be32_to_cpu(p->protocol_min); p->protocol_max = be32_to_cpu(p->protocol_max); if (p->protocol_max == 0) p->protocol_max = p->protocol_min; if (PRO_VERSION_MAX < p->protocol_min || PRO_VERSION_MIN > p->protocol_max) goto incompat; tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); conn_info(tconn, "Handshake successful: " "Agreed network protocol version %d\n", tconn->agreed_pro_version); return 1; incompat: conn_err(tconn, "incompatible DRBD dialects: " "I support %d-%d, peer supports %d-%d\n", PRO_VERSION_MIN, PRO_VERSION_MAX, p->protocol_min, p->protocol_max); return -1; } #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) static int drbd_do_auth(struct drbd_tconn *tconn) { conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); return -1; } #else #define CHALLENGE_LEN 64 /* Return value: 1 - auth succeeded, 0 - failed, try again (network error), -1 - auth failed, don't try again. */ static int drbd_do_auth(struct drbd_tconn *tconn) { struct drbd_socket *sock; char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ struct scatterlist sg; char *response = NULL; char *right_response = NULL; char *peers_ch = NULL; unsigned int key_len; char secret[SHARED_SECRET_MAX]; /* 64 byte */ unsigned int resp_size; struct hash_desc desc; struct packet_info pi; struct net_conf *nc; int err, rv; /* FIXME: Put the challenge/response into the preallocated socket buffer. */ rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); key_len = strlen(nc->shared_secret); memcpy(secret, nc->shared_secret, key_len); rcu_read_unlock(); desc.tfm = tconn->cram_hmac_tfm; desc.flags = 0; rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len); if (rv) { conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv); rv = -1; goto fail; } get_random_bytes(my_challenge, CHALLENGE_LEN); sock = &tconn->data; if (!conn_prepare_command(tconn, sock)) { rv = 0; goto fail; } rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0, my_challenge, CHALLENGE_LEN); if (!rv) goto fail; err = drbd_recv_header(tconn, &pi); if (err) { rv = 0; goto fail; } if (pi.cmd != P_AUTH_CHALLENGE) { conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n", cmdname(pi.cmd), pi.cmd); rv = 0; goto fail; } if (pi.size > CHALLENGE_LEN * 2) { conn_err(tconn, "expected AuthChallenge payload too big.\n"); rv = -1; goto fail; } peers_ch = kmalloc(pi.size, GFP_NOIO); if (peers_ch == NULL) { conn_err(tconn, "kmalloc of peers_ch failed\n"); rv = -1; goto fail; } err = drbd_recv_all_warn(tconn, peers_ch, pi.size); if (err) { rv = 0; goto fail; } resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm); response = kmalloc(resp_size, GFP_NOIO); if (response == NULL) { conn_err(tconn, "kmalloc of response failed\n"); rv = -1; goto fail; } sg_init_table(&sg, 1); sg_set_buf(&sg, peers_ch, pi.size); rv = crypto_hash_digest(&desc, &sg, sg.length, response); if (rv) { conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv); rv = -1; goto fail; } if (!conn_prepare_command(tconn, sock)) { rv = 0; goto fail; } rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0, response, resp_size); if (!rv) goto fail; err = drbd_recv_header(tconn, &pi); if (err) { rv = 0; goto fail; } if (pi.cmd != P_AUTH_RESPONSE) { conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n", cmdname(pi.cmd), pi.cmd); rv = 0; goto fail; } if (pi.size != resp_size) { conn_err(tconn, "expected AuthResponse payload of wrong size\n"); rv = 0; goto fail; } err = drbd_recv_all_warn(tconn, response , resp_size); if (err) { rv = 0; goto fail; } right_response = kmalloc(resp_size, GFP_NOIO); if (right_response == NULL) { conn_err(tconn, "kmalloc of right_response failed\n"); rv = -1; goto fail; } sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); if (rv) { conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv); rv = -1; goto fail; } rv = !memcmp(response, right_response, resp_size); if (rv) conn_info(tconn, "Peer authenticated using %d bytes HMAC\n", resp_size); else rv = -1; fail: kfree(peers_ch); kfree(response); kfree(right_response); return rv; } #endif int drbdd_init(struct drbd_thread *thi) { struct drbd_tconn *tconn = thi->tconn; int h; conn_info(tconn, "receiver (re)started\n"); do { h = conn_connect(tconn); if (h == 0) { conn_disconnect(tconn); schedule_timeout_interruptible(HZ); } if (h == -1) { conn_warn(tconn, "Discarding network configuration.\n"); conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); } } while (h == 0); if (h > 0) drbdd(tconn); conn_disconnect(tconn); conn_info(tconn, "receiver terminated\n"); return 0; } /* ********* acknowledge sender ******** */ static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi) { struct p_req_state_reply *p = pi->data; int retcode = be32_to_cpu(p->retcode); if (retcode >= SS_SUCCESS) { set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags); } else { set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags); conn_err(tconn, "Requested state change failed by peer: %s (%d)\n", drbd_set_st_err_str(retcode), retcode); } wake_up(&tconn->ping_wait); return 0; } static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_req_state_reply *p = pi->data; int retcode = be32_to_cpu(p->retcode); mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) { D_ASSERT(tconn->agreed_pro_version < 100); return got_conn_RqSReply(tconn, pi); } if (retcode >= SS_SUCCESS) { set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); } else { set_bit(CL_ST_CHG_FAIL, &mdev->flags); dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", drbd_set_st_err_str(retcode), retcode); } wake_up(&mdev->state_wait); return 0; } static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi) { return drbd_send_ping_ack(tconn); } static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi) { /* restore idle timeout */ tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ; if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags)) wake_up(&tconn->ping_wait); return 0; } static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_block_ack *p = pi->data; sector_t sector = be64_to_cpu(p->sector); int blksize = be32_to_cpu(p->blksize); mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; D_ASSERT(mdev->tconn->agreed_pro_version >= 89); update_peer_seq(mdev, be32_to_cpu(p->seq_num)); if (get_ldev(mdev)) { drbd_rs_complete_io(mdev, sector); drbd_set_in_sync(mdev, sector, blksize); /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); put_ldev(mdev); } dec_rs_pending(mdev); atomic_add(blksize >> 9, &mdev->rs_sect_in); return 0; } static int validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector, struct rb_root *root, const char *func, enum drbd_req_event what, bool missing_ok) { struct drbd_request *req; struct bio_and_error m; spin_lock_irq(&mdev->tconn->req_lock); req = find_request(mdev, root, id, sector, missing_ok, func); if (unlikely(!req)) { spin_unlock_irq(&mdev->tconn->req_lock); return -EIO; } __req_mod(req, what, &m); spin_unlock_irq(&mdev->tconn->req_lock); if (m.bio) complete_master_bio(mdev, &m); return 0; } static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_block_ack *p = pi->data; sector_t sector = be64_to_cpu(p->sector); int blksize = be32_to_cpu(p->blksize); enum drbd_req_event what; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; update_peer_seq(mdev, be32_to_cpu(p->seq_num)); if (p->block_id == ID_SYNCER) { drbd_set_in_sync(mdev, sector, blksize); dec_rs_pending(mdev); return 0; } switch (pi->cmd) { case P_RS_WRITE_ACK: what = WRITE_ACKED_BY_PEER_AND_SIS; break; case P_WRITE_ACK: what = WRITE_ACKED_BY_PEER; break; case P_RECV_ACK: what = RECV_ACKED_BY_PEER; break; case P_SUPERSEDED: what = CONFLICT_RESOLVED; break; case P_RETRY_WRITE: what = POSTPONE_WRITE; break; default: BUG(); } return validate_req_change_req_state(mdev, p->block_id, sector, &mdev->write_requests, __func__, what, false); } static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_block_ack *p = pi->data; sector_t sector = be64_to_cpu(p->sector); int size = be32_to_cpu(p->blksize); int err; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; update_peer_seq(mdev, be32_to_cpu(p->seq_num)); if (p->block_id == ID_SYNCER) { dec_rs_pending(mdev); drbd_rs_failed_io(mdev, sector, size); return 0; } err = validate_req_change_req_state(mdev, p->block_id, sector, &mdev->write_requests, __func__, NEG_ACKED, true); if (err) { /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. The master bio might already be completed, therefore the request is no longer in the collision hash. */ /* In Protocol B we might already have got a P_RECV_ACK but then get a P_NEG_ACK afterwards. */ drbd_set_out_of_sync(mdev, sector, size); } return 0; } static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_block_ack *p = pi->data; sector_t sector = be64_to_cpu(p->sector); mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; update_peer_seq(mdev, be32_to_cpu(p->seq_num)); dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n", (unsigned long long)sector, be32_to_cpu(p->blksize)); return validate_req_change_req_state(mdev, p->block_id, sector, &mdev->read_requests, __func__, NEG_ACKED, false); } static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; sector_t sector; int size; struct p_block_ack *p = pi->data; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; sector = be64_to_cpu(p->sector); size = be32_to_cpu(p->blksize); update_peer_seq(mdev, be32_to_cpu(p->seq_num)); dec_rs_pending(mdev); if (get_ldev_if_state(mdev, D_FAILED)) { drbd_rs_complete_io(mdev, sector); switch (pi->cmd) { case P_NEG_RS_DREPLY: drbd_rs_failed_io(mdev, sector, size); case P_RS_CANCEL: break; default: BUG(); } put_ldev(mdev); } return 0; } static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi) { struct p_barrier_ack *p = pi->data; struct drbd_conf *mdev; int vnr; tl_release(tconn, p->barrier, be32_to_cpu(p->set_size)); rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { if (mdev->state.conn == C_AHEAD && atomic_read(&mdev->ap_in_flight) == 0 && !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) { mdev->start_resync_timer.expires = jiffies + HZ; add_timer(&mdev->start_resync_timer); } } rcu_read_unlock(); return 0; } static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi) { struct drbd_conf *mdev; struct p_block_ack *p = pi->data; struct drbd_work *w; sector_t sector; int size; mdev = vnr_to_mdev(tconn, pi->vnr); if (!mdev) return -EIO; sector = be64_to_cpu(p->sector); size = be32_to_cpu(p->blksize); update_peer_seq(mdev, be32_to_cpu(p->seq_num)); if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) drbd_ov_out_of_sync_found(mdev, sector, size); else ov_out_of_sync_print(mdev); if (!get_ldev(mdev)) return 0; drbd_rs_complete_io(mdev, sector); dec_rs_pending(mdev); --mdev->ov_left; /* let's advance progress step marks only for every other megabyte */ if ((mdev->ov_left & 0x200) == 0x200) drbd_advance_rs_marks(mdev, mdev->ov_left); if (mdev->ov_left == 0) { w = kmalloc(sizeof(*w), GFP_NOIO); if (w) { w->cb = w_ov_finished; w->mdev = mdev; drbd_queue_work(&mdev->tconn->sender_work, w); } else { dev_err(DEV, "kmalloc(w) failed."); ov_out_of_sync_print(mdev); drbd_resync_finished(mdev); } } put_ldev(mdev); return 0; } static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi) { return 0; } static int tconn_finish_peer_reqs(struct drbd_tconn *tconn) { struct drbd_conf *mdev; int vnr, not_empty = 0; do { clear_bit(SIGNAL_ASENDER, &tconn->flags); flush_signals(current); rcu_read_lock(); idr_for_each_entry(&tconn->volumes, mdev, vnr) { kref_get(&mdev->kref); rcu_read_unlock(); if (drbd_finish_peer_reqs(mdev)) { kref_put(&mdev->kref, &drbd_minor_destroy); return 1; } kref_put(&mdev->kref, &drbd_minor_destroy); rcu_read_lock(); } set_bit(SIGNAL_ASENDER, &tconn->flags); spin_lock_irq(&tconn->req_lock); idr_for_each_entry(&tconn->volumes, mdev, vnr) { not_empty = !list_empty(&mdev->done_ee); if (not_empty) break; } spin_unlock_irq(&tconn->req_lock); rcu_read_unlock(); } while (not_empty); return 0; } struct asender_cmd { size_t pkt_size; int (*fn)(struct drbd_tconn *tconn, struct packet_info *); }; static struct asender_cmd asender_tbl[] = { [P_PING] = { 0, got_Ping }, [P_PING_ACK] = { 0, got_PingAck }, [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck }, [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply }, [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply }, [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply }, [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck }, }; int drbd_asender(struct drbd_thread *thi) { struct drbd_tconn *tconn = thi->tconn; struct asender_cmd *cmd = NULL; struct packet_info pi; int rv; void *buf = tconn->meta.rbuf; int received = 0; unsigned int header_size = drbd_header_size(tconn); int expect = header_size; bool ping_timeout_active = false; struct net_conf *nc; int ping_timeo, tcp_cork, ping_int; struct sched_param param = { .sched_priority = 2 }; rv = sched_setscheduler(current, SCHED_RR, &param); if (rv < 0) conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv); while (get_t_state(thi) == RUNNING) { drbd_thread_current_set_cpu(thi); rcu_read_lock(); nc = rcu_dereference(tconn->net_conf); ping_timeo = nc->ping_timeo; tcp_cork = nc->tcp_cork; ping_int = nc->ping_int; rcu_read_unlock(); if (test_and_clear_bit(SEND_PING, &tconn->flags)) { if (drbd_send_ping(tconn)) { conn_err(tconn, "drbd_send_ping has failed\n"); goto reconnect; } tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10; ping_timeout_active = true; } /* TODO: conditionally cork; it may hurt latency if we cork without much to send */ if (tcp_cork) drbd_tcp_cork(tconn->meta.socket); if (tconn_finish_peer_reqs(tconn)) { conn_err(tconn, "tconn_finish_peer_reqs() failed\n"); goto reconnect; } /* but unconditionally uncork unless disabled */ if (tcp_cork) drbd_tcp_uncork(tconn->meta.socket); /* short circuit, recv_msg would return EINTR anyways. */ if (signal_pending(current)) continue; rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0); clear_bit(SIGNAL_ASENDER, &tconn->flags); flush_signals(current); /* Note: * -EINTR (on meta) we got a signal * -EAGAIN (on meta) rcvtimeo expired * -ECONNRESET other side closed the connection * -ERESTARTSYS (on data) we got a signal * rv < 0 other than above: unexpected error! * rv == expected: full header or command * rv < expected: "woken" by signal during receive * rv == 0 : "connection shut down by peer" */ if (likely(rv > 0)) { received += rv; buf += rv; } else if (rv == 0) { if (test_bit(DISCONNECT_SENT, &tconn->flags)) { long t; rcu_read_lock(); t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10; rcu_read_unlock(); t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t); if (t) break; } conn_err(tconn, "meta connection shut down by peer.\n"); goto reconnect; } else if (rv == -EAGAIN) { /* If the data socket received something meanwhile, * that is good enough: peer is still alive. */ if (time_after(tconn->last_received, jiffies - tconn->meta.socket->sk->sk_rcvtimeo)) continue; if (ping_timeout_active) { conn_err(tconn, "PingAck did not arrive in time.\n"); goto reconnect; } set_bit(SEND_PING, &tconn->flags); continue; } else if (rv == -EINTR) { continue; } else { conn_err(tconn, "sock_recvmsg returned %d\n", rv); goto reconnect; } if (received == expect && cmd == NULL) { if (decode_header(tconn, tconn->meta.rbuf, &pi)) goto reconnect; cmd = &asender_tbl[pi.cmd]; if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) { conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n", cmdname(pi.cmd), pi.cmd); goto disconnect; } expect = header_size + cmd->pkt_size; if (pi.size != expect - header_size) { conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n", pi.cmd, pi.size); goto reconnect; } } if (received == expect) { bool err; err = cmd->fn(tconn, &pi); if (err) { conn_err(tconn, "%pf failed\n", cmd->fn); goto reconnect; } tconn->last_received = jiffies; if (cmd == &asender_tbl[P_PING_ACK]) { /* restore idle timeout */ tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ; ping_timeout_active = false; } buf = tconn->meta.rbuf; received = 0; expect = header_size; cmd = NULL; } } if (0) { reconnect: conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); conn_md_sync(tconn); } if (0) { disconnect: conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD); } clear_bit(SIGNAL_ASENDER, &tconn->flags); conn_info(tconn, "asender terminated\n"); return 0; }
gpl-2.0
SM-G920P/G92XP-R4_COI9
fs/hfsplus/dir.c
2089
13310
/* * linux/fs/hfsplus/dir.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handling of directories */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/random.h> #include "hfsplus_fs.h" #include "hfsplus_raw.h" #include "xattr.h" static inline void hfsplus_instantiate(struct dentry *dentry, struct inode *inode, u32 cnid) { dentry->d_fsdata = (void *)(unsigned long)cnid; d_instantiate(dentry, inode); } /* Find the entry inside dir named dentry->d_name */ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct inode *inode = NULL; struct hfs_find_data fd; struct super_block *sb; hfsplus_cat_entry entry; int err; u32 cnid, linkid = 0; u16 type; sb = dir->i_sb; dentry->d_fsdata = NULL; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return ERR_PTR(err); hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name); again: err = hfs_brec_read(&fd, &entry, sizeof(entry)); if (err) { if (err == -ENOENT) { hfs_find_exit(&fd); /* No such entry */ inode = NULL; goto out; } goto fail; } type = be16_to_cpu(entry.type); if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { err = -EIO; goto fail; } cnid = be32_to_cpu(entry.folder.id); dentry->d_fsdata = (void *)(unsigned long)cnid; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { err = -EIO; goto fail; } cnid = be32_to_cpu(entry.file.id); if (entry.file.user_info.fdType == cpu_to_be32(HFSP_HARDLINK_TYPE) && entry.file.user_info.fdCreator == cpu_to_be32(HFSP_HFSPLUS_CREATOR) && (entry.file.create_date == HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)-> create_date || entry.file.create_date == HFSPLUS_I(sb->s_root->d_inode)-> create_date) && HFSPLUS_SB(sb)->hidden_dir) { struct qstr str; char name[32]; if (dentry->d_fsdata) { /* * We found a link pointing to another link, * so ignore it and treat it as regular file. */ cnid = (unsigned long)dentry->d_fsdata; linkid = 0; } else { dentry->d_fsdata = (void *)(unsigned long)cnid; linkid = be32_to_cpu(entry.file.permissions.dev); str.len = sprintf(name, "iNode%d", linkid); str.name = name; hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_SB(sb)->hidden_dir->i_ino, &str); goto again; } } else if (!dentry->d_fsdata) dentry->d_fsdata = (void *)(unsigned long)cnid; } else { pr_err("invalid catalog entry type in lookup\n"); err = -EIO; goto fail; } hfs_find_exit(&fd); inode = hfsplus_iget(dir->i_sb, cnid); if (IS_ERR(inode)) return ERR_CAST(inode); if (S_ISREG(inode->i_mode)) HFSPLUS_I(inode)->linkid = linkid; out: d_add(dentry, inode); return NULL; fail: hfs_find_exit(&fd); return ERR_PTR(err); } static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = file_inode(filp); struct super_block *sb = inode->i_sb; int len, err; char strbuf[HFSPLUS_MAX_STRLEN + 1]; hfsplus_cat_entry entry; struct hfs_find_data fd; struct hfsplus_readdir_data *rd; u16 type; if (filp->f_pos >= inode->i_size) return 0; err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (err) return err; hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL); err = hfs_brec_find(&fd, hfs_find_rec_by_key); if (err) goto out; switch ((u32)filp->f_pos) { case 0: /* This is completely artificial... */ if (filldir(dirent, ".", 1, 0, inode->i_ino, DT_DIR)) goto out; filp->f_pos++; /* fall through */ case 1: if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { pr_err("bad catalog folder thread\n"); err = -EIO; goto out; } if (fd.entrylength < HFSPLUS_MIN_THREAD_SZ) { pr_err("truncated catalog thread\n"); err = -EIO; goto out; } if (filldir(dirent, "..", 2, 1, be32_to_cpu(entry.thread.parentID), DT_DIR)) goto out; filp->f_pos++; /* fall through */ default: if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, filp->f_pos - 1); if (err) goto out; } for (;;) { if (be32_to_cpu(fd.key->cat.parent) != inode->i_ino) { pr_err("walked past end of dir\n"); err = -EIO; goto out; } if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { err = -EIO; goto out; } hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); len = HFSPLUS_MAX_STRLEN; err = hfsplus_uni2asc(sb, &fd.key->cat.name, strbuf, &len); if (err) goto out; if (type == HFSPLUS_FOLDER) { if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) { pr_err("small dir entry\n"); err = -EIO; goto out; } if (HFSPLUS_SB(sb)->hidden_dir && HFSPLUS_SB(sb)->hidden_dir->i_ino == be32_to_cpu(entry.folder.id)) goto next; if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.folder.id), DT_DIR)) break; } else if (type == HFSPLUS_FILE) { if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { pr_err("small file entry\n"); err = -EIO; goto out; } if (filldir(dirent, strbuf, len, filp->f_pos, be32_to_cpu(entry.file.id), DT_REG)) break; } else { pr_err("bad catalog entry type\n"); err = -EIO; goto out; } next: filp->f_pos++; if (filp->f_pos >= inode->i_size) goto out; err = hfs_brec_goto(&fd, 1); if (err) goto out; } rd = filp->private_data; if (!rd) { rd = kmalloc(sizeof(struct hfsplus_readdir_data), GFP_KERNEL); if (!rd) { err = -ENOMEM; goto out; } filp->private_data = rd; rd->file = filp; list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); } memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: hfs_find_exit(&fd); return err; } static int hfsplus_dir_release(struct inode *inode, struct file *file) { struct hfsplus_readdir_data *rd = file->private_data; if (rd) { mutex_lock(&inode->i_mutex); list_del(&rd->list); mutex_unlock(&inode->i_mutex); kfree(rd); } return 0; } static int hfsplus_link(struct dentry *src_dentry, struct inode *dst_dir, struct dentry *dst_dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dst_dir->i_sb); struct inode *inode = src_dentry->d_inode; struct inode *src_dir = src_dentry->d_parent->d_inode; struct qstr str; char name[32]; u32 cnid, id; int res; if (HFSPLUS_IS_RSRC(inode)) return -EPERM; if (!S_ISREG(inode->i_mode)) return -EPERM; mutex_lock(&sbi->vh_mutex); if (inode->i_ino == (u32)(unsigned long)src_dentry->d_fsdata) { for (;;) { get_random_bytes(&id, sizeof(cnid)); id &= 0x3fffffff; str.name = name; str.len = sprintf(name, "iNode%d", id); res = hfsplus_rename_cat(inode->i_ino, src_dir, &src_dentry->d_name, sbi->hidden_dir, &str); if (!res) break; if (res != -EEXIST) goto out; } HFSPLUS_I(inode)->linkid = id; cnid = sbi->next_cnid++; src_dentry->d_fsdata = (void *)(unsigned long)cnid; res = hfsplus_create_cat(cnid, src_dir, &src_dentry->d_name, inode); if (res) /* panic? */ goto out; sbi->file_count++; } cnid = sbi->next_cnid++; res = hfsplus_create_cat(cnid, dst_dir, &dst_dentry->d_name, inode); if (res) goto out; inc_nlink(inode); hfsplus_instantiate(dst_dentry, inode, cnid); ihold(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); sbi->file_count++; hfsplus_mark_mdb_dirty(dst_dir->i_sb); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_unlink(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; struct qstr str; char name[32]; u32 cnid; int res; if (HFSPLUS_IS_RSRC(inode)) return -EPERM; mutex_lock(&sbi->vh_mutex); cnid = (u32)(unsigned long)dentry->d_fsdata; if (inode->i_ino == cnid && atomic_read(&HFSPLUS_I(inode)->opencnt)) { str.name = name; str.len = sprintf(name, "temp%lu", inode->i_ino); res = hfsplus_rename_cat(inode->i_ino, dir, &dentry->d_name, sbi->hidden_dir, &str); if (!res) { inode->i_flags |= S_DEAD; drop_nlink(inode); } goto out; } res = hfsplus_delete_cat(cnid, dir, &dentry->d_name); if (res) goto out; if (inode->i_nlink > 0) drop_nlink(inode); if (inode->i_ino == cnid) clear_nlink(inode); if (!inode->i_nlink) { if (inode->i_ino != cnid) { sbi->file_count--; if (!atomic_read(&HFSPLUS_I(inode)->opencnt)) { res = hfsplus_delete_cat(inode->i_ino, sbi->hidden_dir, NULL); if (!res) hfsplus_delete_inode(inode); } else inode->i_flags |= S_DEAD; } else hfsplus_delete_inode(inode); } else sbi->file_count--; inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode = dentry->d_inode; int res; if (inode->i_size != 2) return -ENOTEMPTY; mutex_lock(&sbi->vh_mutex); res = hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); if (res) goto out; clear_nlink(inode); inode->i_ctime = CURRENT_TIME_SEC; hfsplus_delete_inode(inode); mark_inode_dirty(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; int res = -ENOSPC; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO); if (!inode) goto out; res = page_symlink(inode, symname, strlen(symname) + 1); if (res) goto out_err; res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); if (res) goto out_err; res = hfsplus_init_inode_security(inode, dir, &dentry->d_name); if (res == -EOPNOTSUPP) res = 0; /* Operation is not supported. */ else if (res) { /* Try to delete anyway without error analysis. */ hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); goto out_err; } hfsplus_instantiate(dentry, inode, inode->i_ino); mark_inode_dirty(inode); goto out; out_err: clear_nlink(inode); hfsplus_delete_inode(inode); iput(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); struct inode *inode; int res = -ENOSPC; mutex_lock(&sbi->vh_mutex); inode = hfsplus_new_inode(dir->i_sb, mode); if (!inode) goto out; if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) init_special_inode(inode, mode, rdev); res = hfsplus_create_cat(inode->i_ino, dir, &dentry->d_name, inode); if (res) goto failed_mknod; res = hfsplus_init_inode_security(inode, dir, &dentry->d_name); if (res == -EOPNOTSUPP) res = 0; /* Operation is not supported. */ else if (res) { /* Try to delete anyway without error analysis. */ hfsplus_delete_cat(inode->i_ino, dir, &dentry->d_name); goto failed_mknod; } hfsplus_instantiate(dentry, inode, inode->i_ino); mark_inode_dirty(inode); goto out; failed_mknod: clear_nlink(inode); hfsplus_delete_inode(inode); iput(inode); out: mutex_unlock(&sbi->vh_mutex); return res; } static int hfsplus_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return hfsplus_mknod(dir, dentry, mode, 0); } static int hfsplus_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { return hfsplus_mknod(dir, dentry, mode | S_IFDIR, 0); } static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int res; /* Unlink destination if it already exists */ if (new_dentry->d_inode) { if (S_ISDIR(new_dentry->d_inode->i_mode)) res = hfsplus_rmdir(new_dir, new_dentry); else res = hfsplus_unlink(new_dir, new_dentry); if (res) return res; } res = hfsplus_rename_cat((u32)(unsigned long)old_dentry->d_fsdata, old_dir, &old_dentry->d_name, new_dir, &new_dentry->d_name); if (!res) new_dentry->d_fsdata = old_dentry->d_fsdata; return res; } const struct inode_operations hfsplus_dir_inode_operations = { .lookup = hfsplus_lookup, .create = hfsplus_create, .link = hfsplus_link, .unlink = hfsplus_unlink, .mkdir = hfsplus_mkdir, .rmdir = hfsplus_rmdir, .symlink = hfsplus_symlink, .mknod = hfsplus_mknod, .rename = hfsplus_rename, .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = hfsplus_listxattr, .removexattr = hfsplus_removexattr, }; const struct file_operations hfsplus_dir_operations = { .fsync = hfsplus_file_fsync, .read = generic_read_dir, .readdir = hfsplus_readdir, .unlocked_ioctl = hfsplus_ioctl, .llseek = generic_file_llseek, .release = hfsplus_dir_release, };
gpl-2.0
Fusion-Devices/android_kernel_lge_msm8994
drivers/staging/comedi/drivers/amplc_pci224.c
2089
44199
/* comedi/drivers/amplc_pci224.c Driver for Amplicon PCI224 and PCI234 AO boards. Copyright (C) 2005 MEV Ltd. <http://www.mev.co.uk/> COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: amplc_pci224 Description: Amplicon PCI224, PCI234 Author: Ian Abbott <abbotti@mev.co.uk> Devices: [Amplicon] PCI224 (amplc_pci224 or pci224), PCI234 (amplc_pci224 or pci234) Updated: Wed, 22 Oct 2008 12:25:08 +0100 Status: works, but see caveats Supports: - ao_insn read/write - ao_do_cmd mode with the following sources: - start_src TRIG_INT TRIG_EXT - scan_begin_src TRIG_TIMER TRIG_EXT - convert_src TRIG_NOW - scan_end_src TRIG_COUNT - stop_src TRIG_COUNT TRIG_EXT TRIG_NONE The channel list must contain at least one channel with no repeated channels. The scan end count must equal the number of channels in the channel list. There is only one external trigger source so only one of start_src, scan_begin_src or stop_src may use TRIG_EXT. Configuration options - PCI224: [0] - PCI bus of device (optional). [1] - PCI slot of device (optional). If bus/slot is not specified, the first available PCI device will be used. [2] - Select available ranges according to jumper LK1. All channels are set to the same range: 0=Jumper position 1-2 (factory default), 4 software-selectable internal voltage references, giving 4 bipolar and 4 unipolar ranges: [-10V,+10V], [-5V,+5V], [-2.5V,+2.5V], [-1.25V,+1.25V], [0,+10V], [0,+5V], [0,+2.5V], [0,1.25V]. 1=Jumper position 2-3, 1 external voltage reference, giving 1 bipolar and 1 unipolar range: [-Vext,+Vext], [0,+Vext]. Configuration options - PCI234: [0] - PCI bus of device (optional). [1] - PCI slot of device (optional). If bus/slot is not specified, the first available PCI device will be used. [2] - Select internal or external voltage reference according to jumper LK1. This affects all channels: 0=Jumper position 1-2 (factory default), Vref=5V internal. 1=Jumper position 2-3, Vref=Vext external. [3] - Select channel 0 range according to jumper LK2: 0=Jumper position 2-3 (factory default), range [-2*Vref,+2*Vref] (10V bipolar when options[2]=0). 1=Jumper position 1-2, range [-Vref,+Vref] (5V bipolar when options[2]=0). [4] - Select channel 1 range according to jumper LK3: cf. options[3]. [5] - Select channel 2 range according to jumper LK4: cf. options[3]. [6] - Select channel 3 range according to jumper LK5: cf. options[3]. Passing a zero for an option is the same as leaving it unspecified. Caveats: 1) All channels on the PCI224 share the same range. Any change to the range as a result of insn_write or a streaming command will affect the output voltages of all channels, including those not specified by the instruction or command. 2) For the analog output command, the first scan may be triggered falsely at the start of acquisition. This occurs when the DAC scan trigger source is switched from 'none' to 'timer' (scan_begin_src = TRIG_TIMER) or 'external' (scan_begin_src == TRIG_EXT) at the start of acquisition and the trigger source is at logic level 1 at the time of the switch. This is very likely for TRIG_TIMER. For TRIG_EXT, it depends on the state of the external line and whether the CR_INVERT flag has been set. The remaining scans are triggered correctly. */ #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/slab.h> #include "../comedidev.h" #include "comedi_fc.h" #include "8253.h" #define DRIVER_NAME "amplc_pci224" /* * PCI IDs. */ #define PCI_DEVICE_ID_AMPLICON_PCI224 0x0007 #define PCI_DEVICE_ID_AMPLICON_PCI234 0x0008 #define PCI_DEVICE_ID_INVALID 0xffff /* * PCI224/234 i/o space 1 (PCIBAR2) registers. */ #define PCI224_IO1_SIZE 0x20 /* Size of i/o space 1 (8-bit registers) */ #define PCI224_Z2_CT0 0x14 /* 82C54 counter/timer 0 */ #define PCI224_Z2_CT1 0x15 /* 82C54 counter/timer 1 */ #define PCI224_Z2_CT2 0x16 /* 82C54 counter/timer 2 */ #define PCI224_Z2_CTC 0x17 /* 82C54 counter/timer control word */ #define PCI224_ZCLK_SCE 0x1A /* Group Z Clock Configuration Register */ #define PCI224_ZGAT_SCE 0x1D /* Group Z Gate Configuration Register */ #define PCI224_INT_SCE 0x1E /* ISR Interrupt source mask register */ /* /Interrupt status */ /* * PCI224/234 i/o space 2 (PCIBAR3) 16-bit registers. */ #define PCI224_IO2_SIZE 0x10 /* Size of i/o space 2 (16-bit registers). */ #define PCI224_DACDATA 0x00 /* (w-o) DAC FIFO data. */ #define PCI224_SOFTTRIG 0x00 /* (r-o) DAC software scan trigger. */ #define PCI224_DACCON 0x02 /* (r/w) DAC status/configuration. */ #define PCI224_FIFOSIZ 0x04 /* (w-o) FIFO size for wraparound mode. */ #define PCI224_DACCEN 0x06 /* (w-o) DAC channel enable register. */ /* * DACCON values. */ /* (r/w) Scan trigger. */ #define PCI224_DACCON_TRIG_MASK (7 << 0) #define PCI224_DACCON_TRIG_NONE (0 << 0) /* none */ #define PCI224_DACCON_TRIG_SW (1 << 0) /* software trig */ #define PCI224_DACCON_TRIG_EXTP (2 << 0) /* ext +ve edge */ #define PCI224_DACCON_TRIG_EXTN (3 << 0) /* ext -ve edge */ #define PCI224_DACCON_TRIG_Z2CT0 (4 << 0) /* Z2 CT0 out */ #define PCI224_DACCON_TRIG_Z2CT1 (5 << 0) /* Z2 CT1 out */ #define PCI224_DACCON_TRIG_Z2CT2 (6 << 0) /* Z2 CT2 out */ /* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */ #define PCI224_DACCON_POLAR_MASK (1 << 3) #define PCI224_DACCON_POLAR_UNI (0 << 3) /* range [0,Vref] */ #define PCI224_DACCON_POLAR_BI (1 << 3) /* range [-Vref,Vref] */ /* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */ #define PCI224_DACCON_VREF_MASK (3 << 4) #define PCI224_DACCON_VREF_1_25 (0 << 4) /* Vref = 1.25V */ #define PCI224_DACCON_VREF_2_5 (1 << 4) /* Vref = 2.5V */ #define PCI224_DACCON_VREF_5 (2 << 4) /* Vref = 5V */ #define PCI224_DACCON_VREF_10 (3 << 4) /* Vref = 10V */ /* (r/w) Wraparound mode enable (to play back stored waveform). */ #define PCI224_DACCON_FIFOWRAP (1 << 7) /* (r/w) FIFO enable. It MUST be set! */ #define PCI224_DACCON_FIFOENAB (1 << 8) /* (r/w) FIFO interrupt trigger level (most values are not very useful). */ #define PCI224_DACCON_FIFOINTR_MASK (7 << 9) #define PCI224_DACCON_FIFOINTR_EMPTY (0 << 9) /* when empty */ #define PCI224_DACCON_FIFOINTR_NEMPTY (1 << 9) /* when not empty */ #define PCI224_DACCON_FIFOINTR_NHALF (2 << 9) /* when not half full */ #define PCI224_DACCON_FIFOINTR_HALF (3 << 9) /* when half full */ #define PCI224_DACCON_FIFOINTR_NFULL (4 << 9) /* when not full */ #define PCI224_DACCON_FIFOINTR_FULL (5 << 9) /* when full */ /* (r-o) FIFO fill level. */ #define PCI224_DACCON_FIFOFL_MASK (7 << 12) #define PCI224_DACCON_FIFOFL_EMPTY (1 << 12) /* 0 */ #define PCI224_DACCON_FIFOFL_ONETOHALF (0 << 12) /* [1,2048] */ #define PCI224_DACCON_FIFOFL_HALFTOFULL (4 << 12) /* [2049,4095] */ #define PCI224_DACCON_FIFOFL_FULL (6 << 12) /* 4096 */ /* (r-o) DAC busy flag. */ #define PCI224_DACCON_BUSY (1 << 15) /* (w-o) FIFO reset. */ #define PCI224_DACCON_FIFORESET (1 << 12) /* (w-o) Global reset (not sure what it does). */ #define PCI224_DACCON_GLOBALRESET (1 << 13) /* * DAC FIFO size. */ #define PCI224_FIFO_SIZE 4096 /* * DAC FIFO guaranteed minimum room available, depending on reported fill level. * The maximum room available depends on the reported fill level and how much * has been written! */ #define PCI224_FIFO_ROOM_EMPTY PCI224_FIFO_SIZE #define PCI224_FIFO_ROOM_ONETOHALF (PCI224_FIFO_SIZE / 2) #define PCI224_FIFO_ROOM_HALFTOFULL 1 #define PCI224_FIFO_ROOM_FULL 0 /* * Counter/timer clock input configuration sources. */ #define CLK_CLK 0 /* reserved (channel-specific clock) */ #define CLK_10MHZ 1 /* internal 10 MHz clock */ #define CLK_1MHZ 2 /* internal 1 MHz clock */ #define CLK_100KHZ 3 /* internal 100 kHz clock */ #define CLK_10KHZ 4 /* internal 10 kHz clock */ #define CLK_1KHZ 5 /* internal 1 kHz clock */ #define CLK_OUTNM1 6 /* output of channel-1 modulo total */ #define CLK_EXT 7 /* external clock */ /* Macro to construct clock input configuration register value. */ #define CLK_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7)) /* Timebases in ns. */ #define TIMEBASE_10MHZ 100 #define TIMEBASE_1MHZ 1000 #define TIMEBASE_100KHZ 10000 #define TIMEBASE_10KHZ 100000 #define TIMEBASE_1KHZ 1000000 /* * Counter/timer gate input configuration sources. */ #define GAT_VCC 0 /* VCC (i.e. enabled) */ #define GAT_GND 1 /* GND (i.e. disabled) */ #define GAT_EXT 2 /* reserved (external gate input) */ #define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */ /* Macro to construct gate input configuration register value. */ #define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7)) /* * Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI224 and PCI234: * * Channel's Channel's * clock input gate input * Channel CLK_OUTNM1 GAT_NOUTNM2 * ------- ---------- ----------- * Z2-CT0 Z2-CT2-OUT /Z2-CT1-OUT * Z2-CT1 Z2-CT0-OUT /Z2-CT2-OUT * Z2-CT2 Z2-CT1-OUT /Z2-CT0-OUT */ /* * Interrupt enable/status bits */ #define PCI224_INTR_EXT 0x01 /* rising edge on external input */ #define PCI224_INTR_DAC 0x04 /* DAC (FIFO) interrupt */ #define PCI224_INTR_Z2CT1 0x20 /* rising edge on Z2-CT1 output */ #define PCI224_INTR_EDGE_BITS (PCI224_INTR_EXT | PCI224_INTR_Z2CT1) #define PCI224_INTR_LEVEL_BITS PCI224_INTR_DACFIFO /* * Handy macros. */ /* Combine old and new bits. */ #define COMBINE(old, new, mask) (((old) & ~(mask)) | ((new) & (mask))) /* Current CPU. XXX should this be hard_smp_processor_id()? */ #define THISCPU smp_processor_id() /* State bits for use with atomic bit operations. */ #define AO_CMD_STARTED 0 /* * Range tables. */ /* The software selectable internal ranges for PCI224 (option[2] == 0). */ static const struct comedi_lrange range_pci224_internal = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25), } }; static const unsigned short hwrange_pci224_internal[8] = { PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_10, PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_5, PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_2_5, PCI224_DACCON_POLAR_BI | PCI224_DACCON_VREF_1_25, PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_10, PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_5, PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_2_5, PCI224_DACCON_POLAR_UNI | PCI224_DACCON_VREF_1_25, }; /* The software selectable external ranges for PCI224 (option[2] == 1). */ static const struct comedi_lrange range_pci224_external = { 2, { RANGE_ext(-1, 1), /* bipolar [-Vref,+Vref] */ RANGE_ext(0, 1), /* unipolar [0,+Vref] */ } }; static const unsigned short hwrange_pci224_external[2] = { PCI224_DACCON_POLAR_BI, PCI224_DACCON_POLAR_UNI, }; /* The hardware selectable Vref*2 external range for PCI234 * (option[2] == 1, option[3+n] == 0). */ static const struct comedi_lrange range_pci234_ext2 = { 1, { RANGE_ext(-2, 2), } }; /* The hardware selectable Vref external range for PCI234 * (option[2] == 1, option[3+n] == 1). */ static const struct comedi_lrange range_pci234_ext = { 1, { RANGE_ext(-1, 1), } }; /* This serves for all the PCI234 ranges. */ static const unsigned short hwrange_pci234[1] = { PCI224_DACCON_POLAR_BI, /* bipolar - hardware ignores it! */ }; /* * Board descriptions. */ enum pci224_model { any_model, pci224_model, pci234_model }; struct pci224_board { const char *name; unsigned short devid; enum pci224_model model; unsigned int ao_chans; unsigned int ao_bits; }; static const struct pci224_board pci224_boards[] = { { .name = "pci224", .devid = PCI_DEVICE_ID_AMPLICON_PCI224, .model = pci224_model, .ao_chans = 16, .ao_bits = 12, }, { .name = "pci234", .devid = PCI_DEVICE_ID_AMPLICON_PCI234, .model = pci234_model, .ao_chans = 4, .ao_bits = 16, }, { .name = DRIVER_NAME, .devid = PCI_DEVICE_ID_INVALID, .model = any_model, /* wildcard */ }, }; /* this structure is for data unique to this hardware driver. If several hardware drivers keep similar information in this structure, feel free to suggest moving the variable to the struct comedi_device struct. */ struct pci224_private { const unsigned short *hwrange; unsigned long iobase1; unsigned long state; spinlock_t ao_spinlock; unsigned int *ao_readback; short *ao_scan_vals; unsigned char *ao_scan_order; int intr_cpuid; short intr_running; unsigned short daccon; unsigned int cached_div1; unsigned int cached_div2; unsigned int ao_stop_count; short ao_stop_continuous; unsigned short ao_enab; /* max 16 channels so 'short' will do */ unsigned char intsce; }; /* * Called from the 'insn_write' function to perform a single write. */ static void pci224_ao_set_data(struct comedi_device *dev, int chan, int range, unsigned int data) { const struct pci224_board *thisboard = comedi_board(dev); struct pci224_private *devpriv = dev->private; unsigned short mangled; /* Store unmangled data for readback. */ devpriv->ao_readback[chan] = data; /* Enable the channel. */ outw(1 << chan, dev->iobase + PCI224_DACCEN); /* Set range and reset FIFO. */ devpriv->daccon = COMBINE(devpriv->daccon, devpriv->hwrange[range], (PCI224_DACCON_POLAR_MASK | PCI224_DACCON_VREF_MASK)); outw(devpriv->daccon | PCI224_DACCON_FIFORESET, dev->iobase + PCI224_DACCON); /* * Mangle the data. The hardware expects: * - bipolar: 16-bit 2's complement * - unipolar: 16-bit unsigned */ mangled = (unsigned short)data << (16 - thisboard->ao_bits); if ((devpriv->daccon & PCI224_DACCON_POLAR_MASK) == PCI224_DACCON_POLAR_BI) { mangled ^= 0x8000; } /* Write mangled data to the FIFO. */ outw(mangled, dev->iobase + PCI224_DACDATA); /* Trigger the conversion. */ inw(dev->iobase + PCI224_SOFTTRIG); } /* * 'insn_write' function for AO subdevice. */ static int pci224_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan, range; /* Unpack channel and range. */ chan = CR_CHAN(insn->chanspec); range = CR_RANGE(insn->chanspec); /* Writing a list of values to an AO channel is probably not * very useful, but that's how the interface is defined. */ for (i = 0; i < insn->n; i++) pci224_ao_set_data(dev, chan, range, data[i]); return i; } /* * 'insn_read' function for AO subdevice. * * N.B. The value read will not be valid if the DAC channel has * never been written successfully since the device was attached * or since the channel has been used by an AO streaming write * command. */ static int pci224_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct pci224_private *devpriv = dev->private; int i; int chan; chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) data[i] = devpriv->ao_readback[chan]; return i; } /* * Just a wrapper for the inline function 'i8253_cascade_ns_to_timer'. */ static void pci224_cascade_ns_to_timer(int osc_base, unsigned int *d1, unsigned int *d2, unsigned int *nanosec, int round_mode) { i8253_cascade_ns_to_timer(osc_base, d1, d2, nanosec, round_mode); } /* * Kills a command running on the AO subdevice. */ static void pci224_ao_stop(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci224_private *devpriv = dev->private; unsigned long flags; if (!test_and_clear_bit(AO_CMD_STARTED, &devpriv->state)) return; spin_lock_irqsave(&devpriv->ao_spinlock, flags); /* Kill the interrupts. */ devpriv->intsce = 0; outb(0, devpriv->iobase1 + PCI224_INT_SCE); /* * Interrupt routine may or may not be running. We may or may not * have been called from the interrupt routine (directly or * indirectly via a comedi_events() callback routine). It's highly * unlikely that we've been called from some other interrupt routine * but who knows what strange things coders get up to! * * If the interrupt routine is currently running, wait for it to * finish, unless we appear to have been called via the interrupt * routine. */ while (devpriv->intr_running && devpriv->intr_cpuid != THISCPU) { spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); spin_lock_irqsave(&devpriv->ao_spinlock, flags); } spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); /* Reconfigure DAC for insn_write usage. */ outw(0, dev->iobase + PCI224_DACCEN); /* Disable channels. */ devpriv->daccon = COMBINE(devpriv->daccon, PCI224_DACCON_TRIG_SW | PCI224_DACCON_FIFOINTR_EMPTY, PCI224_DACCON_TRIG_MASK | PCI224_DACCON_FIFOINTR_MASK); outw(devpriv->daccon | PCI224_DACCON_FIFORESET, dev->iobase + PCI224_DACCON); } /* * Handles start of acquisition for the AO subdevice. */ static void pci224_ao_start(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci224_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned long flags; set_bit(AO_CMD_STARTED, &devpriv->state); if (!devpriv->ao_stop_continuous && devpriv->ao_stop_count == 0) { /* An empty acquisition! */ pci224_ao_stop(dev, s); s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); } else { /* Enable interrupts. */ spin_lock_irqsave(&devpriv->ao_spinlock, flags); if (cmd->stop_src == TRIG_EXT) devpriv->intsce = PCI224_INTR_EXT | PCI224_INTR_DAC; else devpriv->intsce = PCI224_INTR_DAC; outb(devpriv->intsce, devpriv->iobase1 + PCI224_INT_SCE); spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); } } /* * Handles interrupts from the DAC FIFO. */ static void pci224_ao_handle_fifo(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci224_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; unsigned int num_scans; unsigned int room; unsigned short dacstat; unsigned int i, n; unsigned int bytes_per_scan; if (cmd->chanlist_len) { bytes_per_scan = cmd->chanlist_len * sizeof(short); } else { /* Shouldn't get here! */ bytes_per_scan = sizeof(short); } /* Determine number of scans available in buffer. */ num_scans = comedi_buf_read_n_available(s->async) / bytes_per_scan; if (!devpriv->ao_stop_continuous) { /* Fixed number of scans. */ if (num_scans > devpriv->ao_stop_count) num_scans = devpriv->ao_stop_count; } /* Determine how much room is in the FIFO (in samples). */ dacstat = inw(dev->iobase + PCI224_DACCON); switch (dacstat & PCI224_DACCON_FIFOFL_MASK) { case PCI224_DACCON_FIFOFL_EMPTY: room = PCI224_FIFO_ROOM_EMPTY; if (!devpriv->ao_stop_continuous && devpriv->ao_stop_count == 0) { /* FIFO empty at end of counted acquisition. */ pci224_ao_stop(dev, s); s->async->events |= COMEDI_CB_EOA; comedi_event(dev, s); return; } break; case PCI224_DACCON_FIFOFL_ONETOHALF: room = PCI224_FIFO_ROOM_ONETOHALF; break; case PCI224_DACCON_FIFOFL_HALFTOFULL: room = PCI224_FIFO_ROOM_HALFTOFULL; break; default: room = PCI224_FIFO_ROOM_FULL; break; } if (room >= PCI224_FIFO_ROOM_ONETOHALF) { /* FIFO is less than half-full. */ if (num_scans == 0) { /* Nothing left to put in the FIFO. */ pci224_ao_stop(dev, s); s->async->events |= COMEDI_CB_OVERFLOW; dev_err(dev->class_dev, "AO buffer underrun\n"); } } /* Determine how many new scans can be put in the FIFO. */ if (cmd->chanlist_len) room /= cmd->chanlist_len; /* Determine how many scans to process. */ if (num_scans > room) num_scans = room; /* Process scans. */ for (n = 0; n < num_scans; n++) { cfc_read_array_from_buffer(s, &devpriv->ao_scan_vals[0], bytes_per_scan); for (i = 0; i < cmd->chanlist_len; i++) { outw(devpriv->ao_scan_vals[devpriv->ao_scan_order[i]], dev->iobase + PCI224_DACDATA); } } if (!devpriv->ao_stop_continuous) { devpriv->ao_stop_count -= num_scans; if (devpriv->ao_stop_count == 0) { /* * Change FIFO interrupt trigger level to wait * until FIFO is empty. */ devpriv->daccon = COMBINE(devpriv->daccon, PCI224_DACCON_FIFOINTR_EMPTY, PCI224_DACCON_FIFOINTR_MASK); outw(devpriv->daccon, dev->iobase + PCI224_DACCON); } } if ((devpriv->daccon & PCI224_DACCON_TRIG_MASK) == PCI224_DACCON_TRIG_NONE) { unsigned short trig; /* * This is the initial DAC FIFO interrupt at the * start of the acquisition. The DAC's scan trigger * has been set to 'none' up until now. * * Now that data has been written to the FIFO, the * DAC's scan trigger source can be set to the * correct value. * * BUG: The first scan will be triggered immediately * if the scan trigger source is at logic level 1. */ if (cmd->scan_begin_src == TRIG_TIMER) { trig = PCI224_DACCON_TRIG_Z2CT0; } else { /* cmd->scan_begin_src == TRIG_EXT */ if (cmd->scan_begin_arg & CR_INVERT) trig = PCI224_DACCON_TRIG_EXTN; else trig = PCI224_DACCON_TRIG_EXTP; } devpriv->daccon = COMBINE(devpriv->daccon, trig, PCI224_DACCON_TRIG_MASK); outw(devpriv->daccon, dev->iobase + PCI224_DACCON); } if (s->async->events) comedi_event(dev, s); } /* * Internal trigger function to start acquisition on AO subdevice. */ static int pci224_ao_inttrig_start(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int trignum) { if (trignum != 0) return -EINVAL; s->async->inttrig = NULL; pci224_ao_start(dev, s); return 1; } #define MAX_SCAN_PERIOD 0xFFFFFFFFU #define MIN_SCAN_PERIOD 2500 #define CONVERT_PERIOD 625 /* * 'do_cmdtest' function for AO subdevice. */ static int pci224_ao_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { struct pci224_private *devpriv = dev->private; int err = 0; unsigned int tmp; /* Step 1 : check if triggers are trivially valid */ err |= cfc_check_trigger_src(&cmd->start_src, TRIG_INT | TRIG_EXT); err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_EXT | TRIG_TIMER); err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW); err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT); err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_EXT | TRIG_NONE); if (err) return 1; /* Step 2a : make sure trigger sources are unique */ err |= cfc_check_trigger_is_unique(cmd->start_src); err |= cfc_check_trigger_is_unique(cmd->scan_begin_src); err |= cfc_check_trigger_is_unique(cmd->stop_src); /* Step 2b : and mutually compatible */ /* * There's only one external trigger signal (which makes these * tests easier). Only one thing can use it. */ tmp = 0; if (cmd->start_src & TRIG_EXT) tmp++; if (cmd->scan_begin_src & TRIG_EXT) tmp++; if (cmd->stop_src & TRIG_EXT) tmp++; if (tmp > 1) err |= -EINVAL; if (err) return 2; /* Step 3: check if arguments are trivially valid */ switch (cmd->start_src) { case TRIG_INT: err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0); break; case TRIG_EXT: /* Force to external trigger 0. */ if ((cmd->start_arg & ~CR_FLAGS_MASK) != 0) { cmd->start_arg = COMBINE(cmd->start_arg, 0, ~CR_FLAGS_MASK); err |= -EINVAL; } /* The only flag allowed is CR_EDGE, which is ignored. */ if ((cmd->start_arg & CR_FLAGS_MASK & ~CR_EDGE) != 0) { cmd->start_arg = COMBINE(cmd->start_arg, 0, CR_FLAGS_MASK & ~CR_EDGE); err |= -EINVAL; } break; } switch (cmd->scan_begin_src) { case TRIG_TIMER: err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, MAX_SCAN_PERIOD); tmp = cmd->chanlist_len * CONVERT_PERIOD; if (tmp < MIN_SCAN_PERIOD) tmp = MIN_SCAN_PERIOD; err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg, tmp); break; case TRIG_EXT: /* Force to external trigger 0. */ if ((cmd->scan_begin_arg & ~CR_FLAGS_MASK) != 0) { cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0, ~CR_FLAGS_MASK); err |= -EINVAL; } /* Only allow flags CR_EDGE and CR_INVERT. Ignore CR_EDGE. */ if ((cmd->scan_begin_arg & CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT)) != 0) { cmd->scan_begin_arg = COMBINE(cmd->scan_begin_arg, 0, CR_FLAGS_MASK & ~(CR_EDGE | CR_INVERT)); err |= -EINVAL; } break; } err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0); err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len); switch (cmd->stop_src) { case TRIG_COUNT: /* Any count allowed. */ break; case TRIG_EXT: /* Force to external trigger 0. */ if ((cmd->stop_arg & ~CR_FLAGS_MASK) != 0) { cmd->stop_arg = COMBINE(cmd->stop_arg, 0, ~CR_FLAGS_MASK); err |= -EINVAL; } /* The only flag allowed is CR_EDGE, which is ignored. */ if ((cmd->stop_arg & CR_FLAGS_MASK & ~CR_EDGE) != 0) { cmd->stop_arg = COMBINE(cmd->stop_arg, 0, CR_FLAGS_MASK & ~CR_EDGE); } break; case TRIG_NONE: err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0); break; } if (err) return 3; /* Step 4: fix up any arguments. */ if (cmd->scan_begin_src == TRIG_TIMER) { unsigned int div1, div2, round; int round_mode = cmd->flags & TRIG_ROUND_MASK; tmp = cmd->scan_begin_arg; /* Check whether to use a single timer. */ switch (round_mode) { case TRIG_ROUND_NEAREST: default: round = TIMEBASE_10MHZ / 2; break; case TRIG_ROUND_DOWN: round = 0; break; case TRIG_ROUND_UP: round = TIMEBASE_10MHZ - 1; break; } /* Be careful to avoid overflow! */ div2 = cmd->scan_begin_arg / TIMEBASE_10MHZ; div2 += (round + cmd->scan_begin_arg % TIMEBASE_10MHZ) / TIMEBASE_10MHZ; if (div2 <= 0x10000) { /* A single timer will suffice. */ if (div2 < 2) div2 = 2; cmd->scan_begin_arg = div2 * TIMEBASE_10MHZ; if (cmd->scan_begin_arg < div2 || cmd->scan_begin_arg < TIMEBASE_10MHZ) { /* Overflow! */ cmd->scan_begin_arg = MAX_SCAN_PERIOD; } } else { /* Use two timers. */ div1 = devpriv->cached_div1; div2 = devpriv->cached_div2; pci224_cascade_ns_to_timer(TIMEBASE_10MHZ, &div1, &div2, &cmd->scan_begin_arg, round_mode); devpriv->cached_div1 = div1; devpriv->cached_div2 = div2; } if (tmp != cmd->scan_begin_arg) err++; } if (err) return 4; /* Step 5: check channel list. */ if (cmd->chanlist && (cmd->chanlist_len > 0)) { unsigned int range; enum { range_err = 1, dupchan_err = 2, }; unsigned errors; unsigned int n; unsigned int ch; /* * Check all channels have the same range index. Don't care * about analogue reference, as we can't configure it. * * Check the list has no duplicate channels. */ range = CR_RANGE(cmd->chanlist[0]); errors = 0; tmp = 0; for (n = 0; n < cmd->chanlist_len; n++) { ch = CR_CHAN(cmd->chanlist[n]); if (tmp & (1U << ch)) errors |= dupchan_err; tmp |= (1U << ch); if (CR_RANGE(cmd->chanlist[n]) != range) errors |= range_err; } if (errors) { if (errors & dupchan_err) { DPRINTK("comedi%d: " DRIVER_NAME ": ao_cmdtest: " "entries in chanlist must contain no " "duplicate channels\n", dev->minor); } if (errors & range_err) { DPRINTK("comedi%d: " DRIVER_NAME ": ao_cmdtest: " "entries in chanlist must all have " "the same range index\n", dev->minor); } err++; } } if (err) return 5; return 0; } /* * 'do_cmd' function for AO subdevice. */ static int pci224_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { struct pci224_private *devpriv = dev->private; struct comedi_cmd *cmd = &s->async->cmd; int range; unsigned int i, j; unsigned int ch; unsigned int rank; unsigned long flags; /* Cannot handle null/empty chanlist. */ if (cmd->chanlist == NULL || cmd->chanlist_len == 0) return -EINVAL; /* Determine which channels are enabled and their load order. */ devpriv->ao_enab = 0; for (i = 0; i < cmd->chanlist_len; i++) { ch = CR_CHAN(cmd->chanlist[i]); devpriv->ao_enab |= 1U << ch; rank = 0; for (j = 0; j < cmd->chanlist_len; j++) { if (CR_CHAN(cmd->chanlist[j]) < ch) rank++; } devpriv->ao_scan_order[rank] = i; } /* Set enabled channels. */ outw(devpriv->ao_enab, dev->iobase + PCI224_DACCEN); /* Determine range and polarity. All channels the same. */ range = CR_RANGE(cmd->chanlist[0]); /* * Set DAC range and polarity. * Set DAC scan trigger source to 'none'. * Set DAC FIFO interrupt trigger level to 'not half full'. * Reset DAC FIFO. * * N.B. DAC FIFO interrupts are currently disabled. */ devpriv->daccon = COMBINE(devpriv->daccon, (devpriv-> hwrange[range] | PCI224_DACCON_TRIG_NONE | PCI224_DACCON_FIFOINTR_NHALF), (PCI224_DACCON_POLAR_MASK | PCI224_DACCON_VREF_MASK | PCI224_DACCON_TRIG_MASK | PCI224_DACCON_FIFOINTR_MASK)); outw(devpriv->daccon | PCI224_DACCON_FIFORESET, dev->iobase + PCI224_DACCON); if (cmd->scan_begin_src == TRIG_TIMER) { unsigned int div1, div2, round; unsigned int ns = cmd->scan_begin_arg; int round_mode = cmd->flags & TRIG_ROUND_MASK; /* Check whether to use a single timer. */ switch (round_mode) { case TRIG_ROUND_NEAREST: default: round = TIMEBASE_10MHZ / 2; break; case TRIG_ROUND_DOWN: round = 0; break; case TRIG_ROUND_UP: round = TIMEBASE_10MHZ - 1; break; } /* Be careful to avoid overflow! */ div2 = cmd->scan_begin_arg / TIMEBASE_10MHZ; div2 += (round + cmd->scan_begin_arg % TIMEBASE_10MHZ) / TIMEBASE_10MHZ; if (div2 <= 0x10000) { /* A single timer will suffice. */ if (div2 < 2) div2 = 2; div2 &= 0xffff; div1 = 1; /* Flag that single timer to be used. */ } else { /* Use two timers. */ div1 = devpriv->cached_div1; div2 = devpriv->cached_div2; pci224_cascade_ns_to_timer(TIMEBASE_10MHZ, &div1, &div2, &ns, round_mode); } /* * The output of timer Z2-0 will be used as the scan trigger * source. */ /* Make sure Z2-0 is gated on. */ outb(GAT_CONFIG(0, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE); if (div1 == 1) { /* Not cascading. Z2-0 needs 10 MHz clock. */ outb(CLK_CONFIG(0, CLK_10MHZ), devpriv->iobase1 + PCI224_ZCLK_SCE); } else { /* Cascading with Z2-2. */ /* Make sure Z2-2 is gated on. */ outb(GAT_CONFIG(2, GAT_VCC), devpriv->iobase1 + PCI224_ZGAT_SCE); /* Z2-2 needs 10 MHz clock. */ outb(CLK_CONFIG(2, CLK_10MHZ), devpriv->iobase1 + PCI224_ZCLK_SCE); /* Load Z2-2 mode (2) and counter (div1). */ i8254_load(devpriv->iobase1 + PCI224_Z2_CT0, 0, 2, div1, 2); /* Z2-0 is clocked from Z2-2's output. */ outb(CLK_CONFIG(0, CLK_OUTNM1), devpriv->iobase1 + PCI224_ZCLK_SCE); } /* Load Z2-0 mode (2) and counter (div2). */ i8254_load(devpriv->iobase1 + PCI224_Z2_CT0, 0, 0, div2, 2); } /* * Sort out end of acquisition. */ switch (cmd->stop_src) { case TRIG_COUNT: /* Fixed number of scans. */ devpriv->ao_stop_continuous = 0; devpriv->ao_stop_count = cmd->stop_arg; break; default: /* Continuous scans. */ devpriv->ao_stop_continuous = 1; devpriv->ao_stop_count = 0; break; } /* * Sort out start of acquisition. */ switch (cmd->start_src) { case TRIG_INT: spin_lock_irqsave(&devpriv->ao_spinlock, flags); s->async->inttrig = &pci224_ao_inttrig_start; spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); break; case TRIG_EXT: /* Enable external interrupt trigger to start acquisition. */ spin_lock_irqsave(&devpriv->ao_spinlock, flags); devpriv->intsce |= PCI224_INTR_EXT; outb(devpriv->intsce, devpriv->iobase1 + PCI224_INT_SCE); spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); break; } return 0; } /* * 'cancel' function for AO subdevice. */ static int pci224_ao_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { pci224_ao_stop(dev, s); return 0; } /* * 'munge' data for AO command. */ static void pci224_ao_munge(struct comedi_device *dev, struct comedi_subdevice *s, void *data, unsigned int num_bytes, unsigned int chan_index) { const struct pci224_board *thisboard = comedi_board(dev); struct pci224_private *devpriv = dev->private; struct comedi_async *async = s->async; short *array = data; unsigned int length = num_bytes / sizeof(*array); unsigned int offset; unsigned int shift; unsigned int i; /* The hardware expects 16-bit numbers. */ shift = 16 - thisboard->ao_bits; /* Channels will be all bipolar or all unipolar. */ if ((devpriv->hwrange[CR_RANGE(async->cmd.chanlist[0])] & PCI224_DACCON_POLAR_MASK) == PCI224_DACCON_POLAR_UNI) { /* Unipolar */ offset = 0; } else { /* Bipolar */ offset = 32768; } /* Munge the data. */ for (i = 0; i < length; i++) array[i] = (array[i] << shift) - offset; } /* * Interrupt handler. */ static irqreturn_t pci224_interrupt(int irq, void *d) { struct comedi_device *dev = d; struct pci224_private *devpriv = dev->private; struct comedi_subdevice *s = &dev->subdevices[0]; struct comedi_cmd *cmd; unsigned char intstat, valid_intstat; unsigned char curenab; int retval = 0; unsigned long flags; intstat = inb(devpriv->iobase1 + PCI224_INT_SCE) & 0x3F; if (intstat) { retval = 1; spin_lock_irqsave(&devpriv->ao_spinlock, flags); valid_intstat = devpriv->intsce & intstat; /* Temporarily disable interrupt sources. */ curenab = devpriv->intsce & ~intstat; outb(curenab, devpriv->iobase1 + PCI224_INT_SCE); devpriv->intr_running = 1; devpriv->intr_cpuid = THISCPU; spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); if (valid_intstat != 0) { cmd = &s->async->cmd; if (valid_intstat & PCI224_INTR_EXT) { devpriv->intsce &= ~PCI224_INTR_EXT; if (cmd->start_src == TRIG_EXT) pci224_ao_start(dev, s); else if (cmd->stop_src == TRIG_EXT) pci224_ao_stop(dev, s); } if (valid_intstat & PCI224_INTR_DAC) pci224_ao_handle_fifo(dev, s); } /* Reenable interrupt sources. */ spin_lock_irqsave(&devpriv->ao_spinlock, flags); if (curenab != devpriv->intsce) { outb(devpriv->intsce, devpriv->iobase1 + PCI224_INT_SCE); } devpriv->intr_running = 0; spin_unlock_irqrestore(&devpriv->ao_spinlock, flags); } return IRQ_RETVAL(retval); } /* * This function looks for a board matching the supplied PCI device. */ static const struct pci224_board *pci224_find_pci_board(struct pci_dev *pci_dev) { int i; for (i = 0; i < ARRAY_SIZE(pci224_boards); i++) if (pci_dev->device == pci224_boards[i].devid) return &pci224_boards[i]; return NULL; } /* * This function looks for a PCI device matching the requested board name, * bus and slot. */ static struct pci_dev *pci224_find_pci_dev(struct comedi_device *dev, struct comedi_devconfig *it) { const struct pci224_board *thisboard = comedi_board(dev); struct pci_dev *pci_dev = NULL; int bus = it->options[0]; int slot = it->options[1]; for_each_pci_dev(pci_dev) { if (bus || slot) { if (bus != pci_dev->bus->number || slot != PCI_SLOT(pci_dev->devfn)) continue; } if (pci_dev->vendor != PCI_VENDOR_ID_AMPLICON) continue; if (thisboard->model == any_model) { /* Match any supported model. */ const struct pci224_board *board_ptr; board_ptr = pci224_find_pci_board(pci_dev); if (board_ptr == NULL) continue; /* Change board_ptr to matched board. */ dev->board_ptr = board_ptr; } else { /* Match specific model name. */ if (thisboard->devid != pci_dev->device) continue; } return pci_dev; } dev_err(dev->class_dev, "No supported board found! (req. bus %d, slot %d)\n", bus, slot); return NULL; } static void pci224_report_attach(struct comedi_device *dev, unsigned int irq) { struct pci_dev *pcidev = comedi_to_pci_dev(dev); char tmpbuf[30]; if (irq) snprintf(tmpbuf, sizeof(tmpbuf), "irq %u%s", irq, (dev->irq ? "" : " UNAVAILABLE")); else snprintf(tmpbuf, sizeof(tmpbuf), "no irq"); dev_info(dev->class_dev, "%s (pci %s) (%s) attached\n", dev->board_name, pci_name(pcidev), tmpbuf); } /* * Common part of attach and auto_attach. */ static int pci224_attach_common(struct comedi_device *dev, struct pci_dev *pci_dev, int *options) { const struct pci224_board *thisboard = comedi_board(dev); struct pci224_private *devpriv = dev->private; struct comedi_subdevice *s; unsigned int irq; unsigned n; int ret; comedi_set_hw_dev(dev, &pci_dev->dev); ret = comedi_pci_enable(dev); if (ret) return ret; spin_lock_init(&devpriv->ao_spinlock); devpriv->iobase1 = pci_resource_start(pci_dev, 2); dev->iobase = pci_resource_start(pci_dev, 3); irq = pci_dev->irq; /* Allocate readback buffer for AO channels. */ devpriv->ao_readback = kmalloc(sizeof(devpriv->ao_readback[0]) * thisboard->ao_chans, GFP_KERNEL); if (!devpriv->ao_readback) return -ENOMEM; /* Allocate buffer to hold values for AO channel scan. */ devpriv->ao_scan_vals = kmalloc(sizeof(devpriv->ao_scan_vals[0]) * thisboard->ao_chans, GFP_KERNEL); if (!devpriv->ao_scan_vals) return -ENOMEM; /* Allocate buffer to hold AO channel scan order. */ devpriv->ao_scan_order = kmalloc(sizeof(devpriv->ao_scan_order[0]) * thisboard->ao_chans, GFP_KERNEL); if (!devpriv->ao_scan_order) return -ENOMEM; /* Disable interrupt sources. */ devpriv->intsce = 0; outb(0, devpriv->iobase1 + PCI224_INT_SCE); /* Initialize the DAC hardware. */ outw(PCI224_DACCON_GLOBALRESET, dev->iobase + PCI224_DACCON); outw(0, dev->iobase + PCI224_DACCEN); outw(0, dev->iobase + PCI224_FIFOSIZ); devpriv->daccon = (PCI224_DACCON_TRIG_SW | PCI224_DACCON_POLAR_BI | PCI224_DACCON_FIFOENAB | PCI224_DACCON_FIFOINTR_EMPTY); outw(devpriv->daccon | PCI224_DACCON_FIFORESET, dev->iobase + PCI224_DACCON); ret = comedi_alloc_subdevices(dev, 1); if (ret) return ret; s = &dev->subdevices[0]; /* Analog output subdevice. */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE; s->n_chan = thisboard->ao_chans; s->maxdata = (1 << thisboard->ao_bits) - 1; s->insn_write = &pci224_ao_insn_write; s->insn_read = &pci224_ao_insn_read; s->len_chanlist = s->n_chan; dev->write_subdev = s; s->do_cmd = &pci224_ao_cmd; s->do_cmdtest = &pci224_ao_cmdtest; s->cancel = &pci224_ao_cancel; s->munge = &pci224_ao_munge; /* Sort out channel range options. */ if (thisboard->model == pci234_model) { /* PCI234 range options. */ const struct comedi_lrange **range_table_list; s->range_table_list = range_table_list = kmalloc(sizeof(struct comedi_lrange *) * s->n_chan, GFP_KERNEL); if (!s->range_table_list) return -ENOMEM; if (options) { for (n = 2; n < 3 + s->n_chan; n++) { if (options[n] < 0 || options[n] > 1) { dev_warn(dev->class_dev, DRIVER_NAME ": warning! bad options[%u]=%d\n", n, options[n]); } } } for (n = 0; n < s->n_chan; n++) { if (n < COMEDI_NDEVCONFOPTS - 3 && options && options[3 + n] == 1) { if (options[2] == 1) range_table_list[n] = &range_pci234_ext; else range_table_list[n] = &range_bipolar5; } else { if (options && options[2] == 1) { range_table_list[n] = &range_pci234_ext2; } else { range_table_list[n] = &range_bipolar10; } } } devpriv->hwrange = hwrange_pci234; } else { /* PCI224 range options. */ if (options && options[2] == 1) { s->range_table = &range_pci224_external; devpriv->hwrange = hwrange_pci224_external; } else { if (options && options[2] != 0) { dev_warn(dev->class_dev, DRIVER_NAME ": warning! bad options[2]=%d\n", options[2]); } s->range_table = &range_pci224_internal; devpriv->hwrange = hwrange_pci224_internal; } } dev->board_name = thisboard->name; if (irq) { ret = request_irq(irq, pci224_interrupt, IRQF_SHARED, DRIVER_NAME, dev); if (ret < 0) { dev_err(dev->class_dev, "error! unable to allocate irq %u\n", irq); return ret; } else { dev->irq = irq; } } pci224_report_attach(dev, irq); return 1; } static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci224_private *devpriv; struct pci_dev *pci_dev; dev_info(dev->class_dev, DRIVER_NAME ": attach\n"); devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; pci_dev = pci224_find_pci_dev(dev, it); if (!pci_dev) return -EIO; return pci224_attach_common(dev, pci_dev, it->options); } static int pci224_auto_attach(struct comedi_device *dev, unsigned long context_unused) { struct pci_dev *pci_dev = comedi_to_pci_dev(dev); struct pci224_private *devpriv; dev_info(dev->class_dev, DRIVER_NAME ": attach pci %s\n", pci_name(pci_dev)); devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL); if (!devpriv) return -ENOMEM; dev->private = devpriv; dev->board_ptr = pci224_find_pci_board(pci_dev); if (dev->board_ptr == NULL) { dev_err(dev->class_dev, DRIVER_NAME ": BUG! cannot determine board type!\n"); return -EINVAL; } /* * Need to 'get' the PCI device to match the 'put' in pci224_detach(). * TODO: Remove the pci_dev_get() and matching pci_dev_put() once * support for manual attachment of PCI devices via pci224_attach() * has been removed. */ pci_dev_get(pci_dev); return pci224_attach_common(dev, pci_dev, NULL); } static void pci224_detach(struct comedi_device *dev) { struct pci224_private *devpriv = dev->private; struct pci_dev *pcidev = comedi_to_pci_dev(dev); if (dev->irq) free_irq(dev->irq, dev); if (dev->subdevices) { struct comedi_subdevice *s; s = &dev->subdevices[0]; /* AO subdevice */ kfree(s->range_table_list); } if (devpriv) { kfree(devpriv->ao_readback); kfree(devpriv->ao_scan_vals); kfree(devpriv->ao_scan_order); } comedi_pci_disable(dev); if (pcidev) pci_dev_put(pcidev); } static struct comedi_driver amplc_pci224_driver = { .driver_name = "amplc_pci224", .module = THIS_MODULE, .attach = pci224_attach, .detach = pci224_detach, .auto_attach = pci224_auto_attach, .board_name = &pci224_boards[0].name, .offset = sizeof(struct pci224_board), .num_names = ARRAY_SIZE(pci224_boards), }; static int amplc_pci224_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { return comedi_pci_auto_config(dev, &amplc_pci224_driver, id->driver_data); } static DEFINE_PCI_DEVICE_TABLE(amplc_pci224_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) }, { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) }, { 0 } }; MODULE_DEVICE_TABLE(pci, amplc_pci224_pci_table); static struct pci_driver amplc_pci224_pci_driver = { .name = "amplc_pci224", .id_table = amplc_pci224_pci_table, .probe = amplc_pci224_pci_probe, .remove = comedi_pci_auto_unconfig, }; module_comedi_pci_driver(amplc_pci224_driver, amplc_pci224_pci_driver); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0