repo_name
string | path
string | copies
string | size
string | content
string | license
string |
|---|---|---|---|---|---|
netarchy/nexus-s
|
drivers/staging/ath6kl/htc2/AR6000/ar6k_events.c
|
2808
|
29998
|
//------------------------------------------------------------------------------
// <copyright file="ar6k_events.c" company="Atheros">
// Copyright (c) 2007-2010 Atheros Corporation. All rights reserved.
//
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
//
//------------------------------------------------------------------------------
//==============================================================================
// AR6K Driver layer event handling (i.e. interrupts, message polling)
//
// Author(s): ="Atheros"
//==============================================================================
#include "a_config.h"
#include "athdefs.h"
#include "hw/mbox_host_reg.h"
#include "a_osapi.h"
#include "../htc_debug.h"
#include "hif.h"
#include "htc_packet.h"
#include "ar6k.h"
extern void AR6KFreeIOPacket(struct ar6k_device *pDev, struct htc_packet *pPacket);
extern struct htc_packet *AR6KAllocIOPacket(struct ar6k_device *pDev);
static int DevServiceDebugInterrupt(struct ar6k_device *pDev);
#define DELAY_PER_INTERVAL_MS 10 /* 10 MS delay per polling interval */
/* completion routine for ALL HIF layer async I/O */
int DevRWCompletionHandler(void *context, int status)
{
struct htc_packet *pPacket = (struct htc_packet *)context;
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("+DevRWCompletionHandler (Pkt:0x%lX) , Status: %d \n",
(unsigned long)pPacket,
status));
COMPLETE_HTC_PACKET(pPacket,status);
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,
("-DevRWCompletionHandler\n"));
return 0;
}
/* mailbox recv message polling */
int DevPollMboxMsgRecv(struct ar6k_device *pDev,
u32 *pLookAhead,
int TimeoutMS)
{
int status = 0;
int timeout = TimeoutMS/DELAY_PER_INTERVAL_MS;
A_ASSERT(timeout > 0);
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("+DevPollMboxMsgRecv \n"));
while (true) {
if (pDev->GetPendingEventsFunc != NULL) {
struct hif_pending_events_info events;
#ifdef THREAD_X
events.Polling =1;
#endif
/* the HIF layer uses a special mechanism to get events, do this
* synchronously */
status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
&events,
NULL);
if (status)
{
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to get pending events \n"));
break;
}
if (events.Events & HIF_RECV_MSG_AVAIL)
{
/* there is a message available, the lookahead should be valid now */
*pLookAhead = events.LookAhead;
break;
}
} else {
/* this is the standard HIF way.... */
/* load the register table */
status = HIFReadWrite(pDev->HIFDevice,
HOST_INT_STATUS_ADDRESS,
(u8 *)&pDev->IrqProcRegisters,
AR6K_IRQ_PROC_REGS_SIZE,
HIF_RD_SYNC_BYTE_INC,
NULL);
if (status){
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("Failed to read register table \n"));
break;
}
/* check for MBOX data and valid lookahead */
if (pDev->IrqProcRegisters.host_int_status & (1 << HTC_MAILBOX)) {
if (pDev->IrqProcRegisters.rx_lookahead_valid & (1 << HTC_MAILBOX))
{
/* mailbox has a message and the look ahead is valid */
*pLookAhead = pDev->IrqProcRegisters.rx_lookahead[HTC_MAILBOX];
break;
}
}
}
timeout--;
if (timeout <= 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, (" Timeout waiting for recv message \n"));
status = A_ERROR;
/* check if the target asserted */
if ( pDev->IrqProcRegisters.counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) {
/* target signaled an assert, process this pending interrupt
* this will call the target failure handler */
DevServiceDebugInterrupt(pDev);
}
break;
}
/* delay a little */
A_MDELAY(DELAY_PER_INTERVAL_MS);
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,(" Retry Mbox Poll : %d \n",timeout));
}
AR_DEBUG_PRINTF(ATH_DEBUG_RECV,("-DevPollMboxMsgRecv \n"));
return status;
}
static int DevServiceCPUInterrupt(struct ar6k_device *pDev)
{
int status;
u8 cpu_int_status;
u8 regBuffer[4];
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("CPU Interrupt\n"));
cpu_int_status = pDev->IrqProcRegisters.cpu_int_status &
pDev->IrqEnableRegisters.cpu_int_status_enable;
A_ASSERT(cpu_int_status);
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
("Valid interrupt source(s) in CPU_INT_STATUS: 0x%x\n",
cpu_int_status));
/* Clear the interrupt */
pDev->IrqProcRegisters.cpu_int_status &= ~cpu_int_status; /* W1C */
/* set up the register transfer buffer to hit the register 4 times , this is done
* to make the access 4-byte aligned to mitigate issues with host bus interconnects that
* restrict bus transfer lengths to be a multiple of 4-bytes */
/* set W1C value to clear the interrupt, this hits the register first */
regBuffer[0] = cpu_int_status;
/* the remaining 4 values are set to zero which have no-effect */
regBuffer[1] = 0;
regBuffer[2] = 0;
regBuffer[3] = 0;
status = HIFReadWrite(pDev->HIFDevice,
CPU_INT_STATUS_ADDRESS,
regBuffer,
4,
HIF_WR_SYNC_BYTE_FIX,
NULL);
A_ASSERT(status == 0);
return status;
}
static int DevServiceErrorInterrupt(struct ar6k_device *pDev)
{
int status;
u8 error_int_status;
u8 regBuffer[4];
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error Interrupt\n"));
error_int_status = pDev->IrqProcRegisters.error_int_status & 0x0F;
A_ASSERT(error_int_status);
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
("Valid interrupt source(s) in ERROR_INT_STATUS: 0x%x\n",
error_int_status));
if (ERROR_INT_STATUS_WAKEUP_GET(error_int_status)) {
/* Wakeup */
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Error : Wakeup\n"));
}
if (ERROR_INT_STATUS_RX_UNDERFLOW_GET(error_int_status)) {
/* Rx Underflow */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Rx Underflow\n"));
}
if (ERROR_INT_STATUS_TX_OVERFLOW_GET(error_int_status)) {
/* Tx Overflow */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Error : Tx Overflow\n"));
}
/* Clear the interrupt */
pDev->IrqProcRegisters.error_int_status &= ~error_int_status; /* W1C */
/* set up the register transfer buffer to hit the register 4 times , this is done
* to make the access 4-byte aligned to mitigate issues with host bus interconnects that
* restrict bus transfer lengths to be a multiple of 4-bytes */
/* set W1C value to clear the interrupt, this hits the register first */
regBuffer[0] = error_int_status;
/* the remaining 4 values are set to zero which have no-effect */
regBuffer[1] = 0;
regBuffer[2] = 0;
regBuffer[3] = 0;
status = HIFReadWrite(pDev->HIFDevice,
ERROR_INT_STATUS_ADDRESS,
regBuffer,
4,
HIF_WR_SYNC_BYTE_FIX,
NULL);
A_ASSERT(status == 0);
return status;
}
static int DevServiceDebugInterrupt(struct ar6k_device *pDev)
{
u32 dummy;
int status;
/* Send a target failure event to the application */
AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Target debug interrupt\n"));
if (pDev->TargetFailureCallback != NULL) {
pDev->TargetFailureCallback(pDev->HTCContext);
}
if (pDev->GMboxEnabled) {
DevNotifyGMboxTargetFailure(pDev);
}
/* clear the interrupt , the debug error interrupt is
* counter 0 */
/* read counter to clear interrupt */
status = HIFReadWrite(pDev->HIFDevice,
COUNT_DEC_ADDRESS,
(u8 *)&dummy,
4,
HIF_RD_SYNC_BYTE_INC,
NULL);
A_ASSERT(status == 0);
return status;
}
static int DevServiceCounterInterrupt(struct ar6k_device *pDev)
{
u8 counter_int_status;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ, ("Counter Interrupt\n"));
counter_int_status = pDev->IrqProcRegisters.counter_int_status &
pDev->IrqEnableRegisters.counter_int_status_enable;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
("Valid interrupt source(s) in COUNTER_INT_STATUS: 0x%x\n",
counter_int_status));
/* Check if the debug interrupt is pending
* NOTE: other modules like GMBOX may use the counter interrupt for
* credit flow control on other counters, we only need to check for the debug assertion
* counter interrupt */
if (counter_int_status & AR6K_TARGET_DEBUG_INTR_MASK) {
return DevServiceDebugInterrupt(pDev);
}
return 0;
}
/* callback when our fetch to get interrupt status registers completes */
static void DevGetEventAsyncHandler(void *Context, struct htc_packet *pPacket)
{
struct ar6k_device *pDev = (struct ar6k_device *)Context;
u32 lookAhead = 0;
bool otherInts = false;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevGetEventAsyncHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
do {
if (pPacket->Status) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
(" GetEvents I/O request failed, status:%d \n", pPacket->Status));
/* bail out, don't unmask HIF interrupt */
break;
}
if (pDev->GetPendingEventsFunc != NULL) {
/* the HIF layer collected the information for us */
struct hif_pending_events_info *pEvents = (struct hif_pending_events_info *)pPacket->pBuffer;
if (pEvents->Events & HIF_RECV_MSG_AVAIL) {
lookAhead = pEvents->LookAhead;
if (0 == lookAhead) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" DevGetEventAsyncHandler1, lookAhead is zero! \n"));
}
}
if (pEvents->Events & HIF_OTHER_EVENTS) {
otherInts = true;
}
} else {
/* standard interrupt table handling.... */
struct ar6k_irq_proc_registers *pReg = (struct ar6k_irq_proc_registers *)pPacket->pBuffer;
u8 host_int_status;
host_int_status = pReg->host_int_status & pDev->IrqEnableRegisters.int_status_enable;
if (host_int_status & (1 << HTC_MAILBOX)) {
host_int_status &= ~(1 << HTC_MAILBOX);
if (pReg->rx_lookahead_valid & (1 << HTC_MAILBOX)) {
/* mailbox has a message and the look ahead is valid */
lookAhead = pReg->rx_lookahead[HTC_MAILBOX];
if (0 == lookAhead) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" DevGetEventAsyncHandler2, lookAhead is zero! \n"));
}
}
}
if (host_int_status) {
/* there are other interrupts to handle */
otherInts = true;
}
}
if (otherInts || (lookAhead == 0)) {
/* if there are other interrupts to process, we cannot do this in the async handler so
* ack the interrupt which will cause our sync handler to run again
* if however there are no more messages, we can now ack the interrupt */
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
(" Acking interrupt from DevGetEventAsyncHandler (otherints:%d, lookahead:0x%X)\n",
otherInts, lookAhead));
HIFAckInterrupt(pDev->HIFDevice);
} else {
int fetched = 0;
int status;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
(" DevGetEventAsyncHandler : detected another message, lookahead :0x%X \n",
lookAhead));
/* lookahead is non-zero and there are no other interrupts to service,
* go get the next message */
status = pDev->MessagePendingCallback(pDev->HTCContext, &lookAhead, 1, NULL, &fetched);
if (!status && !fetched) {
/* HTC layer could not pull out messages due to lack of resources, stop IRQ processing */
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("MessagePendingCallback did not pull any messages, force-ack \n"));
DevAsyncIrqProcessComplete(pDev);
}
}
} while (false);
/* free this IO packet */
AR6KFreeIOPacket(pDev,pPacket);
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevGetEventAsyncHandler \n"));
}
/* called by the HTC layer when it wants us to check if the device has any more pending
* recv messages, this starts off a series of async requests to read interrupt registers */
int DevCheckPendingRecvMsgsAsync(void *context)
{
struct ar6k_device *pDev = (struct ar6k_device *)context;
int status = 0;
struct htc_packet *pIOPacket;
/* this is called in an ASYNC only context, we may NOT block, sleep or call any apis that can
* cause us to switch contexts */
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevCheckPendingRecvMsgsAsync: (dev: 0x%lX)\n", (unsigned long)pDev));
do {
if (HIF_DEVICE_IRQ_SYNC_ONLY == pDev->HifIRQProcessingMode) {
/* break the async processing chain right here, no need to continue.
* The DevDsrHandler() will handle things in a loop when things are driven
* synchronously */
break;
}
/* an optimization to bypass reading the IRQ status registers unecessarily which can re-wake
* the target, if upper layers determine that we are in a low-throughput mode, we can
* rely on taking another interrupt rather than re-checking the status registers which can
* re-wake the target */
if (pDev->RecheckIRQStatusCnt == 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Bypassing IRQ Status re-check, re-acking HIF interrupts\n"));
/* ack interrupt */
HIFAckInterrupt(pDev->HIFDevice);
break;
}
/* first allocate one of our HTC packets we created for async I/O
* we reuse HTC packet definitions so that we can use the completion mechanism
* in DevRWCompletionHandler() */
pIOPacket = AR6KAllocIOPacket(pDev);
if (NULL == pIOPacket) {
/* there should be only 1 asynchronous request out at a time to read these registers
* so this should actually never happen */
status = A_NO_MEMORY;
A_ASSERT(false);
break;
}
/* stick in our completion routine when the I/O operation completes */
pIOPacket->Completion = DevGetEventAsyncHandler;
pIOPacket->pContext = pDev;
if (pDev->GetPendingEventsFunc) {
/* HIF layer has it's own mechanism, pass the IO to it.. */
status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
(struct hif_pending_events_info *)pIOPacket->pBuffer,
pIOPacket);
} else {
/* standard way, read the interrupt register table asynchronously again */
status = HIFReadWrite(pDev->HIFDevice,
HOST_INT_STATUS_ADDRESS,
pIOPacket->pBuffer,
AR6K_IRQ_PROC_REGS_SIZE,
HIF_RD_ASYNC_BYTE_INC,
pIOPacket);
}
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Async IO issued to get interrupt status...\n"));
} while (false);
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevCheckPendingRecvMsgsAsync \n"));
return status;
}
void DevAsyncIrqProcessComplete(struct ar6k_device *pDev)
{
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("DevAsyncIrqProcessComplete - forcing HIF IRQ ACK \n"));
HIFAckInterrupt(pDev->HIFDevice);
}
/* process pending interrupts synchronously */
static int ProcessPendingIRQs(struct ar6k_device *pDev, bool *pDone, bool *pASyncProcessing)
{
int status = 0;
u8 host_int_status = 0;
u32 lookAhead = 0;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+ProcessPendingIRQs: (dev: 0x%lX)\n", (unsigned long)pDev));
/*** NOTE: the HIF implementation guarantees that the context of this call allows
* us to perform SYNCHRONOUS I/O, that is we can block, sleep or call any API that
* can block or switch thread/task ontexts.
* This is a fully schedulable context.
* */
do {
if (pDev->IrqEnableRegisters.int_status_enable == 0) {
/* interrupt enables have been cleared, do not try to process any pending interrupts that
* may result in more bus transactions. The target may be unresponsive at this
* point. */
break;
}
if (pDev->GetPendingEventsFunc != NULL) {
struct hif_pending_events_info events;
#ifdef THREAD_X
events.Polling= 0;
#endif
/* the HIF layer uses a special mechanism to get events
* get this synchronously */
status = pDev->GetPendingEventsFunc(pDev->HIFDevice,
&events,
NULL);
if (status) {
break;
}
if (events.Events & HIF_RECV_MSG_AVAIL) {
lookAhead = events.LookAhead;
if (0 == lookAhead) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" ProcessPendingIRQs1 lookAhead is zero! \n"));
}
}
if (!(events.Events & HIF_OTHER_EVENTS) ||
!(pDev->IrqEnableRegisters.int_status_enable & OTHER_INTS_ENABLED)) {
/* no need to read the register table, no other interesting interrupts.
* Some interfaces (like SPI) can shadow interrupt sources without
* requiring the host to do a full table read */
break;
}
/* otherwise fall through and read the register table */
}
/*
* Read the first 28 bytes of the HTC register table. This will yield us
* the value of different int status registers and the lookahead
* registers.
* length = sizeof(int_status) + sizeof(cpu_int_status) +
* sizeof(error_int_status) + sizeof(counter_int_status) +
* sizeof(mbox_frame) + sizeof(rx_lookahead_valid) +
* sizeof(hole) + sizeof(rx_lookahead) +
* sizeof(int_status_enable) + sizeof(cpu_int_status_enable) +
* sizeof(error_status_enable) +
* sizeof(counter_int_status_enable);
*
*/
#ifdef CONFIG_MMC_SDHCI_S3C
pDev->IrqProcRegisters.host_int_status = 0;
pDev->IrqProcRegisters.rx_lookahead_valid = 0;
pDev->IrqProcRegisters.host_int_status2 = 0;
pDev->IrqProcRegisters.rx_lookahead[0] = 0;
pDev->IrqProcRegisters.rx_lookahead[1] = 0xaaa5555;
#endif /* CONFIG_MMC_SDHCI_S3C */
status = HIFReadWrite(pDev->HIFDevice,
HOST_INT_STATUS_ADDRESS,
(u8 *)&pDev->IrqProcRegisters,
AR6K_IRQ_PROC_REGS_SIZE,
HIF_RD_SYNC_BYTE_INC,
NULL);
if (status) {
break;
}
#ifdef ATH_DEBUG_MODULE
if (AR_DEBUG_LVL_CHECK(ATH_DEBUG_IRQ)) {
DevDumpRegisters(pDev,
&pDev->IrqProcRegisters,
&pDev->IrqEnableRegisters);
}
#endif
/* Update only those registers that are enabled */
host_int_status = pDev->IrqProcRegisters.host_int_status &
pDev->IrqEnableRegisters.int_status_enable;
if (NULL == pDev->GetPendingEventsFunc) {
/* only look at mailbox status if the HIF layer did not provide this function,
* on some HIF interfaces reading the RX lookahead is not valid to do */
if (host_int_status & (1 << HTC_MAILBOX)) {
/* mask out pending mailbox value, we use "lookAhead" as the real flag for
* mailbox processing below */
host_int_status &= ~(1 << HTC_MAILBOX);
if (pDev->IrqProcRegisters.rx_lookahead_valid & (1 << HTC_MAILBOX)) {
/* mailbox has a message and the look ahead is valid */
lookAhead = pDev->IrqProcRegisters.rx_lookahead[HTC_MAILBOX];
if (0 == lookAhead) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,(" ProcessPendingIRQs2, lookAhead is zero! \n"));
}
}
}
} else {
/* not valid to check if the HIF has another mechanism for reading mailbox pending status*/
host_int_status &= ~(1 << HTC_MAILBOX);
}
if (pDev->GMboxEnabled) {
/*call GMBOX layer to process any interrupts of interest */
status = DevCheckGMboxInterrupts(pDev);
}
} while (false);
do {
/* did the interrupt status fetches succeed? */
if (status) {
break;
}
if ((0 == host_int_status) && (0 == lookAhead)) {
/* nothing to process, the caller can use this to break out of a loop */
*pDone = true;
break;
}
if (lookAhead != 0) {
int fetched = 0;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Pending mailbox message, LookAhead: 0x%X\n",lookAhead));
/* Mailbox Interrupt, the HTC layer may issue async requests to empty the
* mailbox...
* When emptying the recv mailbox we use the async handler above called from the
* completion routine of the callers read request. This can improve performance
* by reducing context switching when we rapidly pull packets */
status = pDev->MessagePendingCallback(pDev->HTCContext, &lookAhead, 1, pASyncProcessing, &fetched);
if (status) {
break;
}
if (!fetched) {
/* HTC could not pull any messages out due to lack of resources */
/* force DSR handler to ack the interrupt */
*pASyncProcessing = false;
pDev->RecheckIRQStatusCnt = 0;
}
}
/* now handle the rest of them */
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,
(" Valid interrupt source(s) for OTHER interrupts: 0x%x\n",
host_int_status));
if (HOST_INT_STATUS_CPU_GET(host_int_status)) {
/* CPU Interrupt */
status = DevServiceCPUInterrupt(pDev);
if (status){
break;
}
}
if (HOST_INT_STATUS_ERROR_GET(host_int_status)) {
/* Error Interrupt */
status = DevServiceErrorInterrupt(pDev);
if (status){
break;
}
}
if (HOST_INT_STATUS_COUNTER_GET(host_int_status)) {
/* Counter Interrupt */
status = DevServiceCounterInterrupt(pDev);
if (status){
break;
}
}
} while (false);
/* an optimization to bypass reading the IRQ status registers unecessarily which can re-wake
* the target, if upper layers determine that we are in a low-throughput mode, we can
* rely on taking another interrupt rather than re-checking the status registers which can
* re-wake the target.
*
* NOTE : for host interfaces that use the special GetPendingEventsFunc, this optimization cannot
* be used due to possible side-effects. For example, SPI requires the host to drain all
* messages from the mailbox before exiting the ISR routine. */
if (!(*pASyncProcessing) && (pDev->RecheckIRQStatusCnt == 0) && (pDev->GetPendingEventsFunc == NULL)) {
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("Bypassing IRQ Status re-check, forcing done \n"));
*pDone = true;
}
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-ProcessPendingIRQs: (done:%d, async:%d) status=%d \n",
*pDone, *pASyncProcessing, status));
return status;
}
/* Synchronousinterrupt handler, this handler kicks off all interrupt processing.*/
int DevDsrHandler(void *context)
{
struct ar6k_device *pDev = (struct ar6k_device *)context;
int status = 0;
bool done = false;
bool asyncProc = false;
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("+DevDsrHandler: (dev: 0x%lX)\n", (unsigned long)pDev));
/* reset the recv counter that tracks when we need to yield from the DSR */
pDev->CurrentDSRRecvCount = 0;
/* reset counter used to flag a re-scan of IRQ status registers on the target */
pDev->RecheckIRQStatusCnt = 0;
while (!done) {
status = ProcessPendingIRQs(pDev, &done, &asyncProc);
if (status) {
break;
}
if (HIF_DEVICE_IRQ_SYNC_ONLY == pDev->HifIRQProcessingMode) {
/* the HIF layer does not allow async IRQ processing, override the asyncProc flag */
asyncProc = false;
/* this will cause us to re-enter ProcessPendingIRQ() and re-read interrupt status registers.
* this has a nice side effect of blocking us until all async read requests are completed.
* This behavior is required on some HIF implementations that do not allow ASYNC
* processing in interrupt handlers (like Windows CE) */
if (pDev->DSRCanYield && DEV_CHECK_RECV_YIELD(pDev)) {
/* ProcessPendingIRQs() pulled enough recv messages to satisfy the yield count, stop
* checking for more messages and return */
break;
}
}
if (asyncProc) {
/* the function performed some async I/O for performance, we
need to exit the ISR immediately, the check below will prevent the interrupt from being
Ack'd while we handle it asynchronously */
break;
}
}
if (!status && !asyncProc) {
/* Ack the interrupt only if :
* 1. we did not get any errors in processing interrupts
* 2. there are no outstanding async processing requests */
if (pDev->DSRCanYield) {
/* if the DSR can yield do not ACK the interrupt, there could be more pending messages.
* The HIF layer must ACK the interrupt on behalf of HTC */
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Yield in effect (cur RX count: %d) \n", pDev->CurrentDSRRecvCount));
} else {
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,(" Acking interrupt from DevDsrHandler \n"));
HIFAckInterrupt(pDev->HIFDevice);
}
}
AR_DEBUG_PRINTF(ATH_DEBUG_IRQ,("-DevDsrHandler \n"));
return status;
}
#ifdef ATH_DEBUG_MODULE
void DumpAR6KDevState(struct ar6k_device *pDev)
{
int status;
struct ar6k_irq_enable_registers regs;
struct ar6k_irq_proc_registers procRegs;
LOCK_AR6K(pDev);
/* copy into our temp area */
memcpy(®s,&pDev->IrqEnableRegisters,AR6K_IRQ_ENABLE_REGS_SIZE);
UNLOCK_AR6K(pDev);
/* load the register table from the device */
status = HIFReadWrite(pDev->HIFDevice,
HOST_INT_STATUS_ADDRESS,
(u8 *)&procRegs,
AR6K_IRQ_PROC_REGS_SIZE,
HIF_RD_SYNC_BYTE_INC,
NULL);
if (status) {
AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
("DumpAR6KDevState : Failed to read register table (%d) \n",status));
return;
}
DevDumpRegisters(pDev,&procRegs,®s);
if (pDev->GMboxInfo.pStateDumpCallback != NULL) {
pDev->GMboxInfo.pStateDumpCallback(pDev->GMboxInfo.pProtocolContext);
}
/* dump any bus state at the HIF layer */
HIFConfigureDevice(pDev->HIFDevice,HIF_DEVICE_DEBUG_BUS_STATE,NULL,0);
}
#endif
|
gpl-2.0
|
futranbg/ef65l-kernel-2.0
|
drivers/s390/cio/device_fsm.c
|
2808
|
30939
|
/*
* drivers/s390/cio/device_fsm.c
* finite state machine for device handling
*
* Copyright IBM Corp. 2002,2008
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/chpid.h>
#include "cio.h"
#include "cio_debug.h"
#include "css.h"
#include "device.h"
#include "chsc.h"
#include "ioasm.h"
#include "chp.h"
static int timeout_log_enabled;
static int __init ccw_timeout_log_setup(char *unused)
{
timeout_log_enabled = 1;
return 1;
}
__setup("ccw_timeout_log", ccw_timeout_log_setup);
static void ccw_timeout_log(struct ccw_device *cdev)
{
struct schib schib;
struct subchannel *sch;
struct io_subchannel_private *private;
union orb *orb;
int cc;
sch = to_subchannel(cdev->dev.parent);
private = to_io_private(sch);
orb = &private->orb;
cc = stsch_err(sch->schid, &schib);
printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
"device information:\n", get_clock());
printk(KERN_WARNING "cio: orb:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
orb, sizeof(*orb), 0);
printk(KERN_WARNING "cio: ccw device bus id: %s\n",
dev_name(&cdev->dev));
printk(KERN_WARNING "cio: subchannel bus id: %s\n",
dev_name(&sch->dev));
printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
"vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
if (orb->tm.b) {
printk(KERN_WARNING "cio: orb indicates transport mode\n");
printk(KERN_WARNING "cio: last tcw:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
(void *)(addr_t)orb->tm.tcw,
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
(void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
else
printk(KERN_WARNING "cio: last channel program:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
(void *)(addr_t)orb->cmd.cpa,
sizeof(struct ccw1), 0);
}
printk(KERN_WARNING "cio: ccw device state: %d\n",
cdev->private->state);
printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
printk(KERN_WARNING "cio: schib:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
&schib, sizeof(schib), 0);
printk(KERN_WARNING "cio: ccw device flags:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
&cdev->private->flags, sizeof(cdev->private->flags), 0);
}
/*
* Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
*/
static void
ccw_device_timeout(unsigned long data)
{
struct ccw_device *cdev;
cdev = (struct ccw_device *) data;
spin_lock_irq(cdev->ccwlock);
if (timeout_log_enabled)
ccw_timeout_log(cdev);
dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
spin_unlock_irq(cdev->ccwlock);
}
/*
* Set timeout
*/
void
ccw_device_set_timeout(struct ccw_device *cdev, int expires)
{
if (expires == 0) {
del_timer(&cdev->private->timer);
return;
}
if (timer_pending(&cdev->private->timer)) {
if (mod_timer(&cdev->private->timer, jiffies + expires))
return;
}
cdev->private->timer.function = ccw_device_timeout;
cdev->private->timer.data = (unsigned long) cdev;
cdev->private->timer.expires = jiffies + expires;
add_timer(&cdev->private->timer);
}
/*
* Cancel running i/o. This is called repeatedly since halt/clear are
* asynchronous operations. We do one try with cio_cancel, two tries
* with cio_halt, 255 tries with cio_clear. If everythings fails panic.
* Returns 0 if device now idle, -ENODEV for device not operational and
* -EBUSY if an interrupt is expected (either from halt/clear or from a
* status pending).
*/
int
ccw_device_cancel_halt_clear(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
sch = to_subchannel(cdev->dev.parent);
if (cio_update_schib(sch))
return -ENODEV;
if (!sch->schib.pmcw.ena)
/* Not operational -> done. */
return 0;
/* Stage 1: cancel io. */
if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
if (!scsw_is_tm(&sch->schib.scsw)) {
ret = cio_cancel(sch);
if (ret != -EINVAL)
return ret;
}
/* cancel io unsuccessful or not applicable (transport mode).
* Continue with asynchronous instructions. */
cdev->private->iretry = 3; /* 3 halt retries. */
}
if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) {
/* Stage 2: halt io. */
if (cdev->private->iretry) {
cdev->private->iretry--;
ret = cio_halt(sch);
if (ret != -EBUSY)
return (ret == 0) ? -EBUSY : ret;
}
/* halt io unsuccessful. */
cdev->private->iretry = 255; /* 255 clear retries. */
}
/* Stage 3: clear io. */
if (cdev->private->iretry) {
cdev->private->iretry--;
ret = cio_clear (sch);
return (ret == 0) ? -EBUSY : ret;
}
/* Function was unsuccessful */
CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
return -EIO;
}
void ccw_device_update_sense_data(struct ccw_device *cdev)
{
memset(&cdev->id, 0, sizeof(cdev->id));
cdev->id.cu_type = cdev->private->senseid.cu_type;
cdev->id.cu_model = cdev->private->senseid.cu_model;
cdev->id.dev_type = cdev->private->senseid.dev_type;
cdev->id.dev_model = cdev->private->senseid.dev_model;
}
int ccw_device_test_sense_data(struct ccw_device *cdev)
{
return cdev->id.cu_type == cdev->private->senseid.cu_type &&
cdev->id.cu_model == cdev->private->senseid.cu_model &&
cdev->id.dev_type == cdev->private->senseid.dev_type &&
cdev->id.dev_model == cdev->private->senseid.dev_model;
}
/*
* The machine won't give us any notification by machine check if a chpid has
* been varied online on the SE so we have to find out by magic (i. e. driving
* the channel subsystem to device selection and updating our path masks).
*/
static void
__recover_lost_chpids(struct subchannel *sch, int old_lpm)
{
int mask, i;
struct chp_id chpid;
chp_id_init(&chpid);
for (i = 0; i<8; i++) {
mask = 0x80 >> i;
if (!(sch->lpm & mask))
continue;
if (old_lpm & mask)
continue;
chpid.id = sch->schib.pmcw.chpid[i];
if (!chp_is_registered(chpid))
css_schedule_eval_all();
}
}
/*
* Stop device recognition.
*/
static void
ccw_device_recog_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
int old_lpm;
sch = to_subchannel(cdev->dev.parent);
if (cio_disable_subchannel(sch))
state = DEV_STATE_NOT_OPER;
/*
* Now that we tried recognition, we have performed device selection
* through ssch() and the path information is up to date.
*/
old_lpm = sch->lpm;
/* Check since device may again have become not operational. */
if (cio_update_schib(sch))
state = DEV_STATE_NOT_OPER;
else
sch->lpm = sch->schib.pmcw.pam & sch->opm;
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
/* Force reprobe on all chpids. */
old_lpm = 0;
if (sch->lpm != old_lpm)
__recover_lost_chpids(sch, old_lpm);
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID &&
(state == DEV_STATE_NOT_OPER || state == DEV_STATE_BOXED)) {
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_DISCONNECTED;
wake_up(&cdev->private->wait_q);
return;
}
if (cdev->private->flags.resuming) {
cdev->private->state = state;
cdev->private->flags.recog_done = 1;
wake_up(&cdev->private->wait_q);
return;
}
switch (state) {
case DEV_STATE_NOT_OPER:
break;
case DEV_STATE_OFFLINE:
if (!cdev->online) {
ccw_device_update_sense_data(cdev);
break;
}
cdev->private->state = DEV_STATE_OFFLINE;
cdev->private->flags.recog_done = 1;
if (ccw_device_test_sense_data(cdev)) {
cdev->private->flags.donotify = 1;
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
} else {
ccw_device_update_sense_data(cdev);
ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
return;
case DEV_STATE_BOXED:
if (cdev->id.cu_type != 0) { /* device was recognized before */
cdev->private->flags.recog_done = 1;
cdev->private->state = DEV_STATE_BOXED;
wake_up(&cdev->private->wait_q);
return;
}
break;
}
cdev->private->state = state;
io_subchannel_recog_done(cdev);
wake_up(&cdev->private->wait_q);
}
/*
* Function called from device_id.c after sense id has completed.
*/
void
ccw_device_sense_id_done(struct ccw_device *cdev, int err)
{
switch (err) {
case 0:
ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
break;
case -ETIME: /* Sense id stopped by timeout. */
ccw_device_recog_done(cdev, DEV_STATE_BOXED);
break;
default:
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
/**
* ccw_device_notify() - inform the device's driver about an event
* @cdev: device for which an event occurred
* @event: event that occurred
*
* Returns:
* -%EINVAL if the device is offline or has no driver.
* -%EOPNOTSUPP if the device's driver has no notifier registered.
* %NOTIFY_OK if the driver wants to keep the device.
* %NOTIFY_BAD if the driver doesn't want to keep the device.
*/
int ccw_device_notify(struct ccw_device *cdev, int event)
{
int ret = -EINVAL;
if (!cdev->drv)
goto out;
if (!cdev->online)
goto out;
CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
event);
if (!cdev->drv->notify) {
ret = -EOPNOTSUPP;
goto out;
}
if (cdev->drv->notify(cdev, event))
ret = NOTIFY_OK;
else
ret = NOTIFY_BAD;
out:
return ret;
}
static void ccw_device_oper_notify(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
/* Reenable channel measurements, if needed. */
ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
/* Save indication for new paths. */
cdev->private->path_new_mask = sch->vpm;
return;
}
/* Driver doesn't want device back. */
ccw_device_set_notoper(cdev);
ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
}
/*
* Finished with online/offline processing.
*/
static void
ccw_device_done(struct ccw_device *cdev, int state)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
ccw_device_set_timeout(cdev, 0);
if (state != DEV_STATE_ONLINE)
cio_disable_subchannel(sch);
/* Reset device status. */
memset(&cdev->private->irb, 0, sizeof(struct irb));
cdev->private->state = state;
switch (state) {
case DEV_STATE_BOXED:
CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (cdev->online &&
ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_NOT_OPER:
CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
cdev->private->dev_id.devno, sch->schid.sch_no);
if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
break;
case DEV_STATE_DISCONNECTED:
CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
"%04x\n", cdev->private->dev_id.devno,
sch->schid.sch_no);
if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
cdev->private->state = DEV_STATE_NOT_OPER;
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
} else
ccw_device_set_disconnected(cdev);
cdev->private->flags.donotify = 0;
break;
default:
break;
}
if (cdev->private->flags.donotify) {
cdev->private->flags.donotify = 0;
ccw_device_oper_notify(cdev);
}
wake_up(&cdev->private->wait_q);
}
/*
* Start device recognition.
*/
void ccw_device_recognition(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
/*
* We used to start here with a sense pgid to find out whether a device
* is locked by someone else. Unfortunately, the sense pgid command
* code has other meanings on devices predating the path grouping
* algorithm, so we start with sense id and box the device after an
* timeout (or if sense pgid during path verification detects the device
* is locked, as may happen on newer devices).
*/
cdev->private->flags.recog_done = 0;
cdev->private->state = DEV_STATE_SENSE_ID;
if (cio_enable_subchannel(sch, (u32) (addr_t) sch)) {
ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
return;
}
ccw_device_sense_id_start(cdev);
}
/*
* Handle events for states that use the ccw request infrastructure.
*/
static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
{
switch (e) {
case DEV_EVENT_NOTOPER:
ccw_request_notoper(cdev);
break;
case DEV_EVENT_INTERRUPT:
ccw_request_handler(cdev);
break;
case DEV_EVENT_TIMEOUT:
ccw_request_timeout(cdev);
break;
default:
break;
}
}
static void ccw_device_report_path_events(struct ccw_device *cdev)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
int path_event[8];
int chp, mask;
for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
path_event[chp] = PE_NONE;
if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
path_event[chp] |= PE_PATH_GONE;
if (mask & cdev->private->path_new_mask & sch->vpm)
path_event[chp] |= PE_PATH_AVAILABLE;
if (mask & cdev->private->pgid_reset_mask & sch->vpm)
path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
}
if (cdev->online && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
}
static void ccw_device_reset_path_events(struct ccw_device *cdev)
{
cdev->private->path_gone_mask = 0;
cdev->private->path_new_mask = 0;
cdev->private->pgid_reset_mask = 0;
}
void
ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/* Update schib - pom may have changed. */
if (cio_update_schib(sch)) {
err = -ENODEV;
goto callback;
}
/* Update lpm with verified path mask. */
sch->lpm = sch->vpm;
/* Repeat path verification? */
if (cdev->private->flags.doverify) {
ccw_device_verify_start(cdev);
return;
}
callback:
switch (err) {
case 0:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
memset(&cdev->private->irb, 0, sizeof(struct irb));
cdev->private->irb.scsw.cmd.cc = 1;
cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
cdev->private->irb.scsw.cmd.stctl =
SCSW_STCTL_STATUS_PEND;
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
&cdev->private->irb);
memset(&cdev->private->irb, 0, sizeof(struct irb));
}
ccw_device_report_path_events(cdev);
break;
case -ETIME:
case -EUSERS:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
case -EACCES:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_DISCONNECTED);
break;
default:
/* Reset oper notify indication after verify error. */
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
ccw_device_reset_path_events(cdev);
}
/*
* Get device online.
*/
int
ccw_device_online(struct ccw_device *cdev)
{
struct subchannel *sch;
int ret;
if ((cdev->private->state != DEV_STATE_OFFLINE) &&
(cdev->private->state != DEV_STATE_BOXED))
return -EINVAL;
sch = to_subchannel(cdev->dev.parent);
ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
if (ret != 0) {
/* Couldn't enable the subchannel for i/o. Sick device. */
if (ret == -ENODEV)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
return ret;
}
/* Start initial path verification. */
cdev->private->state = DEV_STATE_VERIFY;
ccw_device_verify_start(cdev);
return 0;
}
void
ccw_device_disband_done(struct ccw_device *cdev, int err)
{
switch (err) {
case 0:
ccw_device_done(cdev, DEV_STATE_OFFLINE);
break;
case -ETIME:
ccw_device_done(cdev, DEV_STATE_BOXED);
break;
default:
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
break;
}
}
/*
* Shutdown device.
*/
int
ccw_device_offline(struct ccw_device *cdev)
{
struct subchannel *sch;
/* Allow ccw_device_offline while disconnected. */
if (cdev->private->state == DEV_STATE_DISCONNECTED ||
cdev->private->state == DEV_STATE_NOT_OPER) {
cdev->private->flags.donotify = 0;
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
return 0;
}
if (cdev->private->state == DEV_STATE_BOXED) {
ccw_device_done(cdev, DEV_STATE_BOXED);
return 0;
}
if (ccw_device_is_orphan(cdev)) {
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
}
sch = to_subchannel(cdev->dev.parent);
if (cio_update_schib(sch))
return -ENODEV;
if (scsw_actl(&sch->schib.scsw) != 0)
return -EBUSY;
if (cdev->private->state != DEV_STATE_ONLINE)
return -EINVAL;
/* Are we doing path grouping? */
if (!cdev->private->flags.pgroup) {
/* No, set state offline immediately. */
ccw_device_done(cdev, DEV_STATE_OFFLINE);
return 0;
}
/* Start Set Path Group commands. */
cdev->private->state = DEV_STATE_DISBAND_PGID;
ccw_device_disband_start(cdev);
return 0;
}
/*
* Handle not operational event in non-special state.
*/
static void ccw_device_generic_notoper(struct ccw_device *cdev,
enum dev_event dev_event)
{
if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
else
ccw_device_set_disconnected(cdev);
}
/*
* Handle path verification event in offline state.
*/
static void ccw_device_offline_verify(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
css_schedule_eval(sch->schid);
}
/*
* Handle path verification event.
*/
static void
ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
if (cdev->private->state == DEV_STATE_W4SENSE) {
cdev->private->flags.doverify = 1;
return;
}
sch = to_subchannel(cdev->dev.parent);
/*
* Since we might not just be coming from an interrupt from the
* subchannel we have to update the schib.
*/
if (cio_update_schib(sch)) {
ccw_device_verify_done(cdev, -ENODEV);
return;
}
if (scsw_actl(&sch->schib.scsw) != 0 ||
(scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
(scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
/*
* No final status yet or final status not yet delivered
* to the device driver. Can't do path verification now,
* delay until final status was delivered.
*/
cdev->private->flags.doverify = 1;
return;
}
/* Device is idle, we can do the path verification. */
cdev->private->state = DEV_STATE_VERIFY;
ccw_device_verify_start(cdev);
}
/*
* Handle path verification event in boxed state.
*/
static void ccw_device_boxed_verify(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch = to_subchannel(cdev->dev.parent);
if (cdev->online) {
if (cio_enable_subchannel(sch, (u32) (addr_t) sch))
ccw_device_done(cdev, DEV_STATE_NOT_OPER);
else
ccw_device_online_verify(cdev, dev_event);
} else
css_schedule_eval(sch->schid);
}
/*
* Got an interrupt for a normal io (state online).
*/
static void
ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
int is_cmd;
irb = (struct irb *)&S390_lowcore.irb;
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) {
if (is_cmd && (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
!irb->esw.esw0.erw.cons) {
/* Unit check but no sense data. Need basic sense. */
if (ccw_device_do_sense(cdev, irb) != 0)
goto call_handler_unsol;
memcpy(&cdev->private->irb, irb, sizeof(struct irb));
cdev->private->state = DEV_STATE_W4SENSE;
cdev->private->intparm = 0;
return;
}
call_handler_unsol:
if (cdev->handler)
cdev->handler (cdev, 0, irb);
if (cdev->private->flags.doverify)
ccw_device_online_verify(cdev, 0);
return;
}
/* Accumulate status and find out if a basic sense is needed. */
ccw_device_accumulate_irb(cdev, irb);
if (is_cmd && cdev->private->flags.dosense) {
if (ccw_device_do_sense(cdev, irb) == 0) {
cdev->private->state = DEV_STATE_W4SENSE;
}
return;
}
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
}
/*
* Got an timeout in online state.
*/
static void
ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ccw_device_set_timeout(cdev, 0);
cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
if (ret)
dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
else if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-ETIMEDOUT));
}
/*
* Got an interrupt for a basic sense.
*/
static void
ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
irb = (struct irb *)&S390_lowcore.irb;
/* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
if (scsw_cc(&irb->scsw) == 1)
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense(cdev, irb);
else {
CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
"interrupt during w4sense...\n",
cdev->private->dev_id.ssid,
cdev->private->dev_id.devno);
if (cdev->handler)
cdev->handler (cdev, 0, irb);
}
return;
}
/*
* Check if a halt or clear has been issued in the meanwhile. If yes,
* only deliver the halt/clear interrupt to the device driver as if it
* had killed the original request.
*/
if (scsw_fctl(&irb->scsw) &
(SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
cdev->private->flags.dosense = 0;
memset(&cdev->private->irb, 0, sizeof(struct irb));
ccw_device_accumulate_irb(cdev, irb);
goto call_handler;
}
/* Add basic sense info to irb. */
ccw_device_accumulate_basic_sense(cdev, irb);
if (cdev->private->flags.dosense) {
/* Another basic sense is needed. */
ccw_device_do_sense(cdev, irb);
return;
}
call_handler:
cdev->private->state = DEV_STATE_ONLINE;
/* In case sensing interfered with setting the device online */
wake_up(&cdev->private->wait_q);
/* Call the handler. */
if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
}
static void
ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
/* OK, i/o is dead now. Call interrupt handler. */
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
static void
ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
return;
}
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
void ccw_device_kill_io(struct ccw_device *cdev)
{
int ret;
cdev->private->iretry = 255;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, 3*HZ);
cdev->private->state = DEV_STATE_TIMEOUT_KILL;
return;
}
/* Start delayed path verification. */
ccw_device_online_verify(cdev, 0);
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
ERR_PTR(-EIO));
}
static void
ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
{
/* Start verification after current task finished. */
cdev->private->flags.doverify = 1;
}
static void
ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
/* Couldn't enable the subchannel for i/o. Sick device. */
return;
cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
ccw_device_sense_id_start(cdev);
}
void ccw_device_trigger_reprobe(struct ccw_device *cdev)
{
struct subchannel *sch;
if (cdev->private->state != DEV_STATE_DISCONNECTED)
return;
sch = to_subchannel(cdev->dev.parent);
/* Update some values. */
if (cio_update_schib(sch))
return;
/*
* The pim, pam, pom values may not be accurate, but they are the best
* we have before performing device selection :/
*/
sch->lpm = sch->schib.pmcw.pam & sch->opm;
/*
* Use the initial configuration since we can't be shure that the old
* paths are valid.
*/
io_subchannel_init_config(sch);
if (cio_commit_config(sch))
return;
/* We should also udate ssd info, but this has to wait. */
/* Check if this is another device which appeared on the same sch. */
if (sch->schib.pmcw.dev != cdev->private->dev_id.devno)
css_schedule_eval(sch->schid);
else
ccw_device_start_id(cdev, 0);
}
static void ccw_device_disabled_irq(struct ccw_device *cdev,
enum dev_event dev_event)
{
struct subchannel *sch;
sch = to_subchannel(cdev->dev.parent);
/*
* An interrupt in a disabled state means a previous disable was not
* successful - should not happen, but we try to disable again.
*/
cio_disable_subchannel(sch);
}
static void
ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
{
retry_set_schib(cdev);
cdev->private->state = DEV_STATE_ONLINE;
dev_fsm_event(cdev, dev_event);
}
static void ccw_device_update_cmfblock(struct ccw_device *cdev,
enum dev_event dev_event)
{
cmf_retry_copy_block(cdev);
cdev->private->state = DEV_STATE_ONLINE;
dev_fsm_event(cdev, dev_event);
}
static void
ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
{
ccw_device_set_timeout(cdev, 0);
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
static void
ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
{
int ret;
ret = ccw_device_cancel_halt_clear(cdev);
if (ret == -EBUSY) {
ccw_device_set_timeout(cdev, HZ/10);
} else {
cdev->private->state = DEV_STATE_NOT_OPER;
wake_up(&cdev->private->wait_q);
}
}
/*
* No operation action. This is used e.g. to ignore a timeout event in
* state offline.
*/
static void
ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
{
}
/*
* device statemachine
*/
fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
[DEV_STATE_NOT_OPER] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_PGID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_OFFLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_disabled_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_offline_verify,
},
[DEV_STATE_VERIFY] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_delay_verify,
},
[DEV_STATE_ONLINE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_online_timeout,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_W4SENSE] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_w4sense,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_online_verify,
},
[DEV_STATE_DISBAND_PGID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_BOXED] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_nop,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_boxed_verify,
},
/* states to wait for i/o completion before doing something */
[DEV_STATE_TIMEOUT_KILL] = {
[DEV_EVENT_NOTOPER] = ccw_device_generic_notoper,
[DEV_EVENT_INTERRUPT] = ccw_device_killing_irq,
[DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME
},
[DEV_STATE_QUIESCE] = {
[DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
[DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done,
[DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
/* special states for devices gone not operational */
[DEV_STATE_DISCONNECTED] = {
[DEV_EVENT_NOTOPER] = ccw_device_nop,
[DEV_EVENT_INTERRUPT] = ccw_device_start_id,
[DEV_EVENT_TIMEOUT] = ccw_device_nop,
[DEV_EVENT_VERIFY] = ccw_device_start_id,
},
[DEV_STATE_DISCONNECTED_SENSE_ID] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
[DEV_STATE_CMFCHANGE] = {
[DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate,
[DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate,
[DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate,
[DEV_EVENT_VERIFY] = ccw_device_change_cmfstate,
},
[DEV_STATE_CMFUPDATE] = {
[DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock,
[DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock,
[DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock,
[DEV_EVENT_VERIFY] = ccw_device_update_cmfblock,
},
[DEV_STATE_STEAL_LOCK] = {
[DEV_EVENT_NOTOPER] = ccw_device_request_event,
[DEV_EVENT_INTERRUPT] = ccw_device_request_event,
[DEV_EVENT_TIMEOUT] = ccw_device_request_event,
[DEV_EVENT_VERIFY] = ccw_device_nop,
},
};
EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
|
gpl-2.0
|
onexmaster/android_kernel_htc_endeavoru
|
fs/ocfs2/dlm/dlmthread.c
|
3064
|
21185
|
/* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* dlmthread.c
*
* standalone DLM module
*
* Copyright (C) 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/sysctl.h>
#include <linux/random.h>
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/timer.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include "cluster/heartbeat.h"
#include "cluster/nodemanager.h"
#include "cluster/tcp.h"
#include "dlmapi.h"
#include "dlmcommon.h"
#include "dlmdomain.h"
#define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_THREAD)
#include "cluster/masklog.h"
static int dlm_thread(void *data);
static void dlm_flush_asts(struct dlm_ctxt *dlm);
#define dlm_lock_is_remote(dlm, lock) ((lock)->ml.node != (dlm)->node_num)
/* will exit holding res->spinlock, but may drop in function */
/* waits until flags are cleared on res->state */
void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags)
{
DECLARE_WAITQUEUE(wait, current);
assert_spin_locked(&res->spinlock);
add_wait_queue(&res->wq, &wait);
repeat:
set_current_state(TASK_UNINTERRUPTIBLE);
if (res->state & flags) {
spin_unlock(&res->spinlock);
schedule();
spin_lock(&res->spinlock);
goto repeat;
}
remove_wait_queue(&res->wq, &wait);
__set_current_state(TASK_RUNNING);
}
int __dlm_lockres_has_locks(struct dlm_lock_resource *res)
{
if (list_empty(&res->granted) &&
list_empty(&res->converting) &&
list_empty(&res->blocked))
return 0;
return 1;
}
/* "unused": the lockres has no locks, is not on the dirty list,
* has no inflight locks (in the gap between mastery and acquiring
* the first lock), and has no bits in its refmap.
* truly ready to be freed. */
int __dlm_lockres_unused(struct dlm_lock_resource *res)
{
int bit;
if (__dlm_lockres_has_locks(res))
return 0;
if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY)
return 0;
if (res->state & DLM_LOCK_RES_RECOVERING)
return 0;
bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
if (bit < O2NM_MAX_NODES)
return 0;
/*
* since the bit for dlm->node_num is not set, inflight_locks better
* be zero
*/
BUG_ON(res->inflight_locks != 0);
return 1;
}
/* Call whenever you may have added or deleted something from one of
* the lockres queue's. This will figure out whether it belongs on the
* unused list or not and does the appropriate thing. */
void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
if (__dlm_lockres_unused(res)){
if (list_empty(&res->purge)) {
mlog(0, "%s: Adding res %.*s to purge list\n",
dlm->name, res->lockname.len, res->lockname.name);
res->last_used = jiffies;
dlm_lockres_get(res);
list_add_tail(&res->purge, &dlm->purge_list);
dlm->purge_count++;
}
} else if (!list_empty(&res->purge)) {
mlog(0, "%s: Removing res %.*s from purge list\n",
dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--;
}
}
void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
__dlm_lockres_calc_usage(dlm, res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
}
static void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
int master;
int ret = 0;
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
master = (res->owner == dlm->node_num);
mlog(0, "%s: Purging res %.*s, master %d\n", dlm->name,
res->lockname.len, res->lockname.name, master);
if (!master) {
res->state |= DLM_LOCK_RES_DROPPING_REF;
/* drop spinlock... retake below */
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
spin_lock(&res->spinlock);
/* This ensures that clear refmap is sent after the set */
__dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
spin_unlock(&res->spinlock);
/* clear our bit from the master's refmap, ignore errors */
ret = dlm_drop_lockres_ref(dlm, res);
if (ret < 0) {
mlog(ML_ERROR, "%s: deref %.*s failed %d\n", dlm->name,
res->lockname.len, res->lockname.name, ret);
if (!dlm_is_host_down(ret))
BUG();
}
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
}
if (!list_empty(&res->purge)) {
mlog(0, "%s: Removing res %.*s from purgelist, master %d\n",
dlm->name, res->lockname.len, res->lockname.name, master);
list_del_init(&res->purge);
dlm_lockres_put(res);
dlm->purge_count--;
}
if (!__dlm_lockres_unused(res)) {
mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
dlm->name, res->lockname.len, res->lockname.name);
__dlm_print_one_lock_resource(res);
BUG();
}
__dlm_unhash_lockres(res);
/* lockres is not in the hash now. drop the flag and wake up
* any processes waiting in dlm_get_lock_resource. */
if (!master) {
res->state &= ~DLM_LOCK_RES_DROPPING_REF;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
} else
spin_unlock(&res->spinlock);
}
static void dlm_run_purge_list(struct dlm_ctxt *dlm,
int purge_now)
{
unsigned int run_max, unused;
unsigned long purge_jiffies;
struct dlm_lock_resource *lockres;
spin_lock(&dlm->spinlock);
run_max = dlm->purge_count;
while(run_max && !list_empty(&dlm->purge_list)) {
run_max--;
lockres = list_entry(dlm->purge_list.next,
struct dlm_lock_resource, purge);
spin_lock(&lockres->spinlock);
purge_jiffies = lockres->last_used +
msecs_to_jiffies(DLM_PURGE_INTERVAL_MS);
/* Make sure that we want to be processing this guy at
* this time. */
if (!purge_now && time_after(purge_jiffies, jiffies)) {
/* Since resources are added to the purge list
* in tail order, we can stop at the first
* unpurgable resource -- anyone added after
* him will have a greater last_used value */
spin_unlock(&lockres->spinlock);
break;
}
/* Status of the lockres *might* change so double
* check. If the lockres is unused, holding the dlm
* spinlock will prevent people from getting and more
* refs on it. */
unused = __dlm_lockres_unused(lockres);
if (!unused ||
(lockres->state & DLM_LOCK_RES_MIGRATING)) {
mlog(0, "%s: res %.*s is in use or being remastered, "
"used %d, state %d\n", dlm->name,
lockres->lockname.len, lockres->lockname.name,
!unused, lockres->state);
list_move_tail(&dlm->purge_list, &lockres->purge);
spin_unlock(&lockres->spinlock);
continue;
}
dlm_lockres_get(lockres);
dlm_purge_lockres(dlm, lockres);
dlm_lockres_put(lockres);
/* Avoid adding any scheduling latencies */
cond_resched_lock(&dlm->spinlock);
}
spin_unlock(&dlm->spinlock);
}
static void dlm_shuffle_lists(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
struct dlm_lock *lock, *target;
struct list_head *iter;
struct list_head *head;
int can_grant = 1;
/*
* Because this function is called with the lockres
* spinlock, and because we know that it is not migrating/
* recovering/in-progress, it is fine to reserve asts and
* basts right before queueing them all throughout
*/
assert_spin_locked(&dlm->ast_lock);
assert_spin_locked(&res->spinlock);
BUG_ON((res->state & (DLM_LOCK_RES_MIGRATING|
DLM_LOCK_RES_RECOVERING|
DLM_LOCK_RES_IN_PROGRESS)));
converting:
if (list_empty(&res->converting))
goto blocked;
mlog(0, "%s: res %.*s has locks on the convert queue\n", dlm->name,
res->lockname.len, res->lockname.name);
target = list_entry(res->converting.next, struct dlm_lock, list);
if (target->ml.convert_type == LKM_IVMODE) {
mlog(ML_ERROR, "%s: res %.*s converting lock to invalid mode\n",
dlm->name, res->lockname.len, res->lockname.name);
BUG();
}
head = &res->granted;
list_for_each(iter, head) {
lock = list_entry(iter, struct dlm_lock, list);
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type,
target->ml.convert_type)) {
can_grant = 0;
/* queue the BAST if not already */
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
/* update the highest_blocked if needed */
if (lock->ml.highest_blocked < target->ml.convert_type)
lock->ml.highest_blocked =
target->ml.convert_type;
}
}
head = &res->converting;
list_for_each(iter, head) {
lock = list_entry(iter, struct dlm_lock, list);
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type,
target->ml.convert_type)) {
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.convert_type)
lock->ml.highest_blocked =
target->ml.convert_type;
}
}
/* we can convert the lock */
if (can_grant) {
spin_lock(&target->spinlock);
BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
mlog(0, "%s: res %.*s, AST for Converting lock %u:%llu, type "
"%d => %d, node %u\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
target->ml.type,
target->ml.convert_type, target->ml.node);
target->ml.type = target->ml.convert_type;
target->ml.convert_type = LKM_IVMODE;
list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL;
spin_unlock(&target->spinlock);
__dlm_lockres_reserve_ast(res);
__dlm_queue_ast(dlm, target);
/* go back and check for more */
goto converting;
}
blocked:
if (list_empty(&res->blocked))
goto leave;
target = list_entry(res->blocked.next, struct dlm_lock, list);
head = &res->granted;
list_for_each(iter, head) {
lock = list_entry(iter, struct dlm_lock, list);
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.type)
lock->ml.highest_blocked = target->ml.type;
}
}
head = &res->converting;
list_for_each(iter, head) {
lock = list_entry(iter, struct dlm_lock, list);
if (lock==target)
continue;
if (!dlm_lock_compatible(lock->ml.type, target->ml.type)) {
can_grant = 0;
if (lock->ml.highest_blocked == LKM_IVMODE) {
__dlm_lockres_reserve_ast(res);
__dlm_queue_bast(dlm, lock);
}
if (lock->ml.highest_blocked < target->ml.type)
lock->ml.highest_blocked = target->ml.type;
}
}
/* we can grant the blocked lock (only
* possible if converting list empty) */
if (can_grant) {
spin_lock(&target->spinlock);
BUG_ON(target->ml.highest_blocked != LKM_IVMODE);
mlog(0, "%s: res %.*s, AST for Blocked lock %u:%llu, type %d, "
"node %u\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(target->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(target->ml.cookie)),
target->ml.type, target->ml.node);
/* target->ml.type is already correct */
list_move_tail(&target->list, &res->granted);
BUG_ON(!target->lksb);
target->lksb->status = DLM_NORMAL;
spin_unlock(&target->spinlock);
__dlm_lockres_reserve_ast(res);
__dlm_queue_ast(dlm, target);
/* go back and check for more */
goto converting;
}
leave:
return;
}
/* must have NO locks when calling this with res !=NULL * */
void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
if (res) {
spin_lock(&dlm->spinlock);
spin_lock(&res->spinlock);
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
}
wake_up(&dlm->dlm_thread_wq);
}
void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
/* don't shuffle secondary queues */
if ((res->owner == dlm->node_num)) {
if (res->state & (DLM_LOCK_RES_MIGRATING |
DLM_LOCK_RES_BLOCK_DIRTY))
return;
if (list_empty(&res->dirty)) {
/* ref for dirty_list */
dlm_lockres_get(res);
list_add_tail(&res->dirty, &dlm->dirty_list);
res->state |= DLM_LOCK_RES_DIRTY;
}
}
mlog(0, "%s: res %.*s\n", dlm->name, res->lockname.len,
res->lockname.name);
}
/* Launch the NM thread for the mounted volume */
int dlm_launch_thread(struct dlm_ctxt *dlm)
{
mlog(0, "Starting dlm_thread...\n");
dlm->dlm_thread_task = kthread_run(dlm_thread, dlm, "dlm_thread");
if (IS_ERR(dlm->dlm_thread_task)) {
mlog_errno(PTR_ERR(dlm->dlm_thread_task));
dlm->dlm_thread_task = NULL;
return -EINVAL;
}
return 0;
}
void dlm_complete_thread(struct dlm_ctxt *dlm)
{
if (dlm->dlm_thread_task) {
mlog(ML_KTHREAD, "Waiting for dlm thread to exit\n");
kthread_stop(dlm->dlm_thread_task);
dlm->dlm_thread_task = NULL;
}
}
static int dlm_dirty_list_empty(struct dlm_ctxt *dlm)
{
int empty;
spin_lock(&dlm->spinlock);
empty = list_empty(&dlm->dirty_list);
spin_unlock(&dlm->spinlock);
return empty;
}
static void dlm_flush_asts(struct dlm_ctxt *dlm)
{
int ret;
struct dlm_lock *lock;
struct dlm_lock_resource *res;
u8 hi;
spin_lock(&dlm->ast_lock);
while (!list_empty(&dlm->pending_asts)) {
lock = list_entry(dlm->pending_asts.next,
struct dlm_lock, ast_list);
/* get an extra ref on lock */
dlm_lock_get(lock);
res = lock->lockres;
mlog(0, "%s: res %.*s, Flush AST for lock %u:%llu, type %d, "
"node %u\n", dlm->name, res->lockname.len,
res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
lock->ml.type, lock->ml.node);
BUG_ON(!lock->ast_pending);
/* remove from list (including ref) */
list_del_init(&lock->ast_list);
dlm_lock_put(lock);
spin_unlock(&dlm->ast_lock);
if (lock->ml.node != dlm->node_num) {
ret = dlm_do_remote_ast(dlm, res, lock);
if (ret < 0)
mlog_errno(ret);
} else
dlm_do_local_ast(dlm, res, lock);
spin_lock(&dlm->ast_lock);
/* possible that another ast was queued while
* we were delivering the last one */
if (!list_empty(&lock->ast_list)) {
mlog(0, "%s: res %.*s, AST queued while flushing last "
"one\n", dlm->name, res->lockname.len,
res->lockname.name);
} else
lock->ast_pending = 0;
/* drop the extra ref.
* this may drop it completely. */
dlm_lock_put(lock);
dlm_lockres_release_ast(dlm, res);
}
while (!list_empty(&dlm->pending_basts)) {
lock = list_entry(dlm->pending_basts.next,
struct dlm_lock, bast_list);
/* get an extra ref on lock */
dlm_lock_get(lock);
res = lock->lockres;
BUG_ON(!lock->bast_pending);
/* get the highest blocked lock, and reset */
spin_lock(&lock->spinlock);
BUG_ON(lock->ml.highest_blocked <= LKM_IVMODE);
hi = lock->ml.highest_blocked;
lock->ml.highest_blocked = LKM_IVMODE;
spin_unlock(&lock->spinlock);
/* remove from list (including ref) */
list_del_init(&lock->bast_list);
dlm_lock_put(lock);
spin_unlock(&dlm->ast_lock);
mlog(0, "%s: res %.*s, Flush BAST for lock %u:%llu, "
"blocked %d, node %u\n",
dlm->name, res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
hi, lock->ml.node);
if (lock->ml.node != dlm->node_num) {
ret = dlm_send_proxy_bast(dlm, res, lock, hi);
if (ret < 0)
mlog_errno(ret);
} else
dlm_do_local_bast(dlm, res, lock, hi);
spin_lock(&dlm->ast_lock);
/* possible that another bast was queued while
* we were delivering the last one */
if (!list_empty(&lock->bast_list)) {
mlog(0, "%s: res %.*s, BAST queued while flushing last "
"one\n", dlm->name, res->lockname.len,
res->lockname.name);
} else
lock->bast_pending = 0;
/* drop the extra ref.
* this may drop it completely. */
dlm_lock_put(lock);
dlm_lockres_release_ast(dlm, res);
}
wake_up(&dlm->ast_wq);
spin_unlock(&dlm->ast_lock);
}
#define DLM_THREAD_TIMEOUT_MS (4 * 1000)
#define DLM_THREAD_MAX_DIRTY 100
#define DLM_THREAD_MAX_ASTS 10
static int dlm_thread(void *data)
{
struct dlm_lock_resource *res;
struct dlm_ctxt *dlm = data;
unsigned long timeout = msecs_to_jiffies(DLM_THREAD_TIMEOUT_MS);
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) {
int n = DLM_THREAD_MAX_DIRTY;
/* dlm_shutting_down is very point-in-time, but that
* doesn't matter as we'll just loop back around if we
* get false on the leading edge of a state
* transition. */
dlm_run_purge_list(dlm, dlm_shutting_down(dlm));
/* We really don't want to hold dlm->spinlock while
* calling dlm_shuffle_lists on each lockres that
* needs to have its queues adjusted and AST/BASTs
* run. So let's pull each entry off the dirty_list
* and drop dlm->spinlock ASAP. Once off the list,
* res->spinlock needs to be taken again to protect
* the queues while calling dlm_shuffle_lists. */
spin_lock(&dlm->spinlock);
while (!list_empty(&dlm->dirty_list)) {
int delay = 0;
res = list_entry(dlm->dirty_list.next,
struct dlm_lock_resource, dirty);
/* peel a lockres off, remove it from the list,
* unset the dirty flag and drop the dlm lock */
BUG_ON(!res);
dlm_lockres_get(res);
spin_lock(&res->spinlock);
/* We clear the DLM_LOCK_RES_DIRTY state once we shuffle lists below */
list_del_init(&res->dirty);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
/* Drop dirty_list ref */
dlm_lockres_put(res);
/* lockres can be re-dirtied/re-added to the
* dirty_list in this gap, but that is ok */
spin_lock(&dlm->ast_lock);
spin_lock(&res->spinlock);
if (res->owner != dlm->node_num) {
__dlm_print_one_lock_resource(res);
mlog(ML_ERROR, "%s: inprog %d, mig %d, reco %d,"
" dirty %d\n", dlm->name,
!!(res->state & DLM_LOCK_RES_IN_PROGRESS),
!!(res->state & DLM_LOCK_RES_MIGRATING),
!!(res->state & DLM_LOCK_RES_RECOVERING),
!!(res->state & DLM_LOCK_RES_DIRTY));
}
BUG_ON(res->owner != dlm->node_num);
/* it is now ok to move lockreses in these states
* to the dirty list, assuming that they will only be
* dirty for a short while. */
BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
if (res->state & (DLM_LOCK_RES_IN_PROGRESS |
DLM_LOCK_RES_RECOVERING)) {
/* move it to the tail and keep going */
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->ast_lock);
mlog(0, "%s: res %.*s, inprogress, delay list "
"shuffle, state %d\n", dlm->name,
res->lockname.len, res->lockname.name,
res->state);
delay = 1;
goto in_progress;
}
/* at this point the lockres is not migrating/
* recovering/in-progress. we have the lockres
* spinlock and do NOT have the dlm lock.
* safe to reserve/queue asts and run the lists. */
/* called while holding lockres lock */
dlm_shuffle_lists(dlm, res);
res->state &= ~DLM_LOCK_RES_DIRTY;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->ast_lock);
dlm_lockres_calc_usage(dlm, res);
in_progress:
spin_lock(&dlm->spinlock);
/* if the lock was in-progress, stick
* it on the back of the list */
if (delay) {
spin_lock(&res->spinlock);
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
}
dlm_lockres_put(res);
/* unlikely, but we may need to give time to
* other tasks */
if (!--n) {
mlog(0, "%s: Throttling dlm thread\n",
dlm->name);
break;
}
}
spin_unlock(&dlm->spinlock);
dlm_flush_asts(dlm);
/* yield and continue right away if there is more work to do */
if (!n) {
cond_resched();
continue;
}
wait_event_interruptible_timeout(dlm->dlm_thread_wq,
!dlm_dirty_list_empty(dlm) ||
kthread_should_stop(),
timeout);
}
mlog(0, "quitting DLM thread\n");
return 0;
}
|
gpl-2.0
|
project-voodoo/android_kernel_samsung
|
drivers/char/hvc_rtas.c
|
4344
|
3672
|
/*
* IBM RTAS driver interface to hvc_console.c
*
* (C) Copyright IBM Corporation 2001-2005
* (C) Copyright Red Hat, Inc. 2005
*
* Author(s): Maximino Augilar <IBM STI Design Center>
* : Ryan S. Arnold <rsa@us.ibm.com>
* : Utz Bacher <utz.bacher@de.ibm.com>
* : David Woodhouse <dwmw2@infradead.org>
*
* inspired by drivers/char/hvc_console.c
* written by Anton Blanchard and Paul Mackerras
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/irq.h>
#include <asm/rtas.h>
#include "hvc_console.h"
#define hvc_rtas_cookie 0x67781e15
struct hvc_struct *hvc_rtas_dev;
static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
int count)
{
int i;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
break;
}
return i;
}
static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
{
int i, c;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_get_char_token, 0, 2, &c))
break;
buf[i] = c;
}
return i;
}
static const struct hv_ops hvc_rtas_get_put_ops = {
.get_chars = hvc_rtas_read_console,
.put_chars = hvc_rtas_write_console,
};
static int __init hvc_rtas_init(void)
{
struct hvc_struct *hp;
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
BUG_ON(hvc_rtas_dev);
/* Allocate an hvc_struct for the console device we instantiated
* earlier. Save off hp so that we can return it on exit */
hp = hvc_alloc(hvc_rtas_cookie, NO_IRQ, &hvc_rtas_get_put_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
hvc_rtas_dev = hp;
return 0;
}
module_init(hvc_rtas_init);
/* This will tear down the tty portion of the driver */
static void __exit hvc_rtas_exit(void)
{
/* Really the fun isn't over until the worker thread breaks down and
* the tty cleans up */
if (hvc_rtas_dev)
hvc_remove(hvc_rtas_dev);
}
module_exit(hvc_rtas_exit);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
{
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops);
add_preferred_console("hvc", 0, NULL);
return 0;
}
console_initcall(hvc_rtas_console_init);
|
gpl-2.0
|
ErcOne/kernel-3-4-projek-n7000
|
arch/arm/mach-omap2/voltage.c
|
4856
|
11497
|
/*
* OMAP3/OMAP4 Voltage Management Routines
*
* Author: Thara Gopinath <thara@ti.com>
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Rajendra Nayak <rnayak@ti.com>
* Lesly A M <x0080970@ti.com>
*
* Copyright (C) 2008, 2011 Nokia Corporation
* Kalle Jokiniemi
* Paul Walmsley
*
* Copyright (C) 2010 Texas Instruments, Inc.
* Thara Gopinath <thara@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include "common.h"
#include "prm-regbits-34xx.h"
#include "prm-regbits-44xx.h"
#include "prm44xx.h"
#include "prcm44xx.h"
#include "prminst44xx.h"
#include "control.h"
#include "voltage.h"
#include "powerdomain.h"
#include "vc.h"
#include "vp.h"
static LIST_HEAD(voltdm_list);
/* Public functions */
/**
* voltdm_get_voltage() - Gets the current non-auto-compensated voltage
* @voltdm: pointer to the voltdm for which current voltage info is needed
*
* API to get the current non-auto-compensated voltage for a voltage domain.
* Returns 0 in case of error else returns the current voltage.
*/
unsigned long voltdm_get_voltage(struct voltagedomain *voltdm)
{
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return 0;
}
return voltdm->nominal_volt;
}
/**
* voltdm_scale() - API to scale voltage of a particular voltage domain.
* @voltdm: pointer to the voltage domain which is to be scaled.
* @target_volt: The target voltage of the voltage domain
*
* This API should be called by the kernel to do the voltage scaling
* for a particular voltage domain during DVFS.
*/
int voltdm_scale(struct voltagedomain *voltdm,
unsigned long target_volt)
{
int ret;
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return -EINVAL;
}
if (!voltdm->scale) {
pr_err("%s: No voltage scale API registered for vdd_%s\n",
__func__, voltdm->name);
return -ENODATA;
}
ret = voltdm->scale(voltdm, target_volt);
if (!ret)
voltdm->nominal_volt = target_volt;
return ret;
}
/**
* voltdm_reset() - Resets the voltage of a particular voltage domain
* to that of the current OPP.
* @voltdm: pointer to the voltage domain whose voltage is to be reset.
*
* This API finds out the correct voltage the voltage domain is supposed
* to be at and resets the voltage to that level. Should be used especially
* while disabling any voltage compensation modules.
*/
void voltdm_reset(struct voltagedomain *voltdm)
{
unsigned long target_volt;
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return;
}
target_volt = voltdm_get_voltage(voltdm);
if (!target_volt) {
pr_err("%s: unable to find current voltage for vdd_%s\n",
__func__, voltdm->name);
return;
}
voltdm_scale(voltdm, target_volt);
}
/**
* omap_voltage_get_volttable() - API to get the voltage table associated with a
* particular voltage domain.
* @voltdm: pointer to the VDD for which the voltage table is required
* @volt_data: the voltage table for the particular vdd which is to be
* populated by this API
*
* This API populates the voltage table associated with a VDD into the
* passed parameter pointer. Returns the count of distinct voltages
* supported by this vdd.
*
*/
void omap_voltage_get_volttable(struct voltagedomain *voltdm,
struct omap_volt_data **volt_data)
{
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return;
}
*volt_data = voltdm->volt_data;
}
/**
* omap_voltage_get_voltdata() - API to get the voltage table entry for a
* particular voltage
* @voltdm: pointer to the VDD whose voltage table has to be searched
* @volt: the voltage to be searched in the voltage table
*
* This API searches through the voltage table for the required voltage
* domain and tries to find a matching entry for the passed voltage volt.
* If a matching entry is found volt_data is populated with that entry.
* This API searches only through the non-compensated voltages int the
* voltage table.
* Returns pointer to the voltage table entry corresponding to volt on
* success. Returns -ENODATA if no voltage table exisits for the passed voltage
* domain or if there is no matching entry.
*/
struct omap_volt_data *omap_voltage_get_voltdata(struct voltagedomain *voltdm,
unsigned long volt)
{
int i;
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return ERR_PTR(-EINVAL);
}
if (!voltdm->volt_data) {
pr_warning("%s: voltage table does not exist for vdd_%s\n",
__func__, voltdm->name);
return ERR_PTR(-ENODATA);
}
for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) {
if (voltdm->volt_data[i].volt_nominal == volt)
return &voltdm->volt_data[i];
}
pr_notice("%s: Unable to match the current voltage with the voltage"
"table for vdd_%s\n", __func__, voltdm->name);
return ERR_PTR(-ENODATA);
}
/**
* omap_voltage_register_pmic() - API to register PMIC specific data
* @voltdm: pointer to the VDD for which the PMIC specific data is
* to be registered
* @pmic: the structure containing pmic info
*
* This API is to be called by the SOC/PMIC file to specify the
* pmic specific info as present in omap_voltdm_pmic structure.
*/
int omap_voltage_register_pmic(struct voltagedomain *voltdm,
struct omap_voltdm_pmic *pmic)
{
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return -EINVAL;
}
voltdm->pmic = pmic;
return 0;
}
/**
* omap_change_voltscale_method() - API to change the voltage scaling method.
* @voltdm: pointer to the VDD whose voltage scaling method
* has to be changed.
* @voltscale_method: the method to be used for voltage scaling.
*
* This API can be used by the board files to change the method of voltage
* scaling between vpforceupdate and vcbypass. The parameter values are
* defined in voltage.h
*/
void omap_change_voltscale_method(struct voltagedomain *voltdm,
int voltscale_method)
{
if (!voltdm || IS_ERR(voltdm)) {
pr_warning("%s: VDD specified does not exist!\n", __func__);
return;
}
switch (voltscale_method) {
case VOLTSCALE_VPFORCEUPDATE:
voltdm->scale = omap_vp_forceupdate_scale;
return;
case VOLTSCALE_VCBYPASS:
voltdm->scale = omap_vc_bypass_scale;
return;
default:
pr_warning("%s: Trying to change the method of voltage scaling"
"to an unsupported one!\n", __func__);
}
}
/**
* omap_voltage_late_init() - Init the various voltage parameters
*
* This API is to be called in the later stages of the
* system boot to init the voltage controller and
* voltage processors.
*/
int __init omap_voltage_late_init(void)
{
struct voltagedomain *voltdm;
if (list_empty(&voltdm_list)) {
pr_err("%s: Voltage driver support not added\n",
__func__);
return -EINVAL;
}
list_for_each_entry(voltdm, &voltdm_list, node) {
struct clk *sys_ck;
if (!voltdm->scalable)
continue;
sys_ck = clk_get(NULL, voltdm->sys_clk.name);
if (IS_ERR(sys_ck)) {
pr_warning("%s: Could not get sys clk.\n", __func__);
return -EINVAL;
}
voltdm->sys_clk.rate = clk_get_rate(sys_ck);
WARN_ON(!voltdm->sys_clk.rate);
clk_put(sys_ck);
if (voltdm->vc) {
voltdm->scale = omap_vc_bypass_scale;
omap_vc_init_channel(voltdm);
}
if (voltdm->vp) {
voltdm->scale = omap_vp_forceupdate_scale;
omap_vp_init(voltdm);
}
}
return 0;
}
static struct voltagedomain *_voltdm_lookup(const char *name)
{
struct voltagedomain *voltdm, *temp_voltdm;
voltdm = NULL;
list_for_each_entry(temp_voltdm, &voltdm_list, node) {
if (!strcmp(name, temp_voltdm->name)) {
voltdm = temp_voltdm;
break;
}
}
return voltdm;
}
/**
* voltdm_add_pwrdm - add a powerdomain to a voltagedomain
* @voltdm: struct voltagedomain * to add the powerdomain to
* @pwrdm: struct powerdomain * to associate with a voltagedomain
*
* Associate the powerdomain @pwrdm with a voltagedomain @voltdm. This
* enables the use of voltdm_for_each_pwrdm(). Returns -EINVAL if
* presented with invalid pointers; -ENOMEM if memory could not be allocated;
* or 0 upon success.
*/
int voltdm_add_pwrdm(struct voltagedomain *voltdm, struct powerdomain *pwrdm)
{
if (!voltdm || !pwrdm)
return -EINVAL;
pr_debug("voltagedomain: associating powerdomain %s with voltagedomain "
"%s\n", pwrdm->name, voltdm->name);
list_add(&pwrdm->voltdm_node, &voltdm->pwrdm_list);
return 0;
}
/**
* voltdm_for_each_pwrdm - call function for each pwrdm in a voltdm
* @voltdm: struct voltagedomain * to iterate over
* @fn: callback function *
*
* Call the supplied function @fn for each powerdomain in the
* voltagedomain @voltdm. Returns -EINVAL if presented with invalid
* pointers; or passes along the last return value of the callback
* function, which should be 0 for success or anything else to
* indicate failure.
*/
int voltdm_for_each_pwrdm(struct voltagedomain *voltdm,
int (*fn)(struct voltagedomain *voltdm,
struct powerdomain *pwrdm))
{
struct powerdomain *pwrdm;
int ret = 0;
if (!fn)
return -EINVAL;
list_for_each_entry(pwrdm, &voltdm->pwrdm_list, voltdm_node)
ret = (*fn)(voltdm, pwrdm);
return ret;
}
/**
* voltdm_for_each - call function on each registered voltagedomain
* @fn: callback function *
*
* Call the supplied function @fn for each registered voltagedomain.
* The callback function @fn can return anything but 0 to bail out
* early from the iterator. Returns the last return value of the
* callback function, which should be 0 for success or anything else
* to indicate failure; or -EINVAL if the function pointer is null.
*/
int voltdm_for_each(int (*fn)(struct voltagedomain *voltdm, void *user),
void *user)
{
struct voltagedomain *temp_voltdm;
int ret = 0;
if (!fn)
return -EINVAL;
list_for_each_entry(temp_voltdm, &voltdm_list, node) {
ret = (*fn)(temp_voltdm, user);
if (ret)
break;
}
return ret;
}
static int _voltdm_register(struct voltagedomain *voltdm)
{
if (!voltdm || !voltdm->name)
return -EINVAL;
INIT_LIST_HEAD(&voltdm->pwrdm_list);
list_add(&voltdm->node, &voltdm_list);
pr_debug("voltagedomain: registered %s\n", voltdm->name);
return 0;
}
/**
* voltdm_lookup - look up a voltagedomain by name, return a pointer
* @name: name of voltagedomain
*
* Find a registered voltagedomain by its name @name. Returns a pointer
* to the struct voltagedomain if found, or NULL otherwise.
*/
struct voltagedomain *voltdm_lookup(const char *name)
{
struct voltagedomain *voltdm ;
if (!name)
return NULL;
voltdm = _voltdm_lookup(name);
return voltdm;
}
/**
* voltdm_init - set up the voltagedomain layer
* @voltdm_list: array of struct voltagedomain pointers to register
*
* Loop through the array of voltagedomains @voltdm_list, registering all
* that are available on the current CPU. If voltdm_list is supplied
* and not null, all of the referenced voltagedomains will be
* registered. No return value.
*/
void voltdm_init(struct voltagedomain **voltdms)
{
struct voltagedomain **v;
if (voltdms) {
for (v = voltdms; *v; v++)
_voltdm_register(*v);
}
}
|
gpl-2.0
|
DJSteve/g800h_custom_kernel
|
drivers/hwmon/lm63.c
|
4856
|
38142
|
/*
* lm63.c - driver for the National Semiconductor LM63 temperature sensor
* with integrated fan control
* Copyright (C) 2004-2008 Jean Delvare <khali@linux-fr.org>
* Based on the lm90 driver.
*
* The LM63 is a sensor chip made by National Semiconductor. It measures
* two temperatures (its own and one external one) and the speed of one
* fan, those speed it can additionally control. Complete datasheet can be
* obtained from National's website at:
* http://www.national.com/pf/LM/LM63.html
*
* The LM63 is basically an LM86 with fan speed monitoring and control
* capabilities added. It misses some of the LM86 features though:
* - No low limit for local temperature.
* - No critical limit for local temperature.
* - Critical limit for remote temperature can be changed only once. We
* will consider that the critical limit is read-only.
*
* The datasheet isn't very clear about what the tachometer reading is.
* I had a explanation from National Semiconductor though. The two lower
* bits of the read value have to be masked out. The value is still 16 bit
* in width.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/types.h>
/*
* Addresses to scan
* Address is fully defined internally and cannot be changed except for
* LM64 which has one pin dedicated to address selection.
* LM63 and LM96163 have address 0x4c.
* LM64 can have address 0x18 or 0x4e.
*/
static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
/*
* The LM63 registers
*/
#define LM63_REG_CONFIG1 0x03
#define LM63_REG_CONVRATE 0x04
#define LM63_REG_CONFIG2 0xBF
#define LM63_REG_CONFIG_FAN 0x4A
#define LM63_REG_TACH_COUNT_MSB 0x47
#define LM63_REG_TACH_COUNT_LSB 0x46
#define LM63_REG_TACH_LIMIT_MSB 0x49
#define LM63_REG_TACH_LIMIT_LSB 0x48
#define LM63_REG_PWM_VALUE 0x4C
#define LM63_REG_PWM_FREQ 0x4D
#define LM63_REG_LUT_TEMP_HYST 0x4F
#define LM63_REG_LUT_TEMP(nr) (0x50 + 2 * (nr))
#define LM63_REG_LUT_PWM(nr) (0x51 + 2 * (nr))
#define LM63_REG_LOCAL_TEMP 0x00
#define LM63_REG_LOCAL_HIGH 0x05
#define LM63_REG_REMOTE_TEMP_MSB 0x01
#define LM63_REG_REMOTE_TEMP_LSB 0x10
#define LM63_REG_REMOTE_OFFSET_MSB 0x11
#define LM63_REG_REMOTE_OFFSET_LSB 0x12
#define LM63_REG_REMOTE_HIGH_MSB 0x07
#define LM63_REG_REMOTE_HIGH_LSB 0x13
#define LM63_REG_REMOTE_LOW_MSB 0x08
#define LM63_REG_REMOTE_LOW_LSB 0x14
#define LM63_REG_REMOTE_TCRIT 0x19
#define LM63_REG_REMOTE_TCRIT_HYST 0x21
#define LM63_REG_ALERT_STATUS 0x02
#define LM63_REG_ALERT_MASK 0x16
#define LM63_REG_MAN_ID 0xFE
#define LM63_REG_CHIP_ID 0xFF
#define LM96163_REG_TRUTHERM 0x30
#define LM96163_REG_REMOTE_TEMP_U_MSB 0x31
#define LM96163_REG_REMOTE_TEMP_U_LSB 0x32
#define LM96163_REG_CONFIG_ENHANCED 0x45
#define LM63_MAX_CONVRATE 9
#define LM63_MAX_CONVRATE_HZ 32
#define LM96163_MAX_CONVRATE_HZ 26
/*
* Conversions and various macros
* For tachometer counts, the LM63 uses 16-bit values.
* For local temperature and high limit, remote critical limit and hysteresis
* value, it uses signed 8-bit values with LSB = 1 degree Celsius.
* For remote temperature, low and high limits, it uses signed 11-bit values
* with LSB = 0.125 degree Celsius, left-justified in 16-bit registers.
* For LM64 the actual remote diode temperature is 16 degree Celsius higher
* than the register reading. Remote temperature setpoints have to be
* adapted accordingly.
*/
#define FAN_FROM_REG(reg) ((reg) == 0xFFFC || (reg) == 0 ? 0 : \
5400000 / (reg))
#define FAN_TO_REG(val) ((val) <= 82 ? 0xFFFC : \
(5400000 / (val)) & 0xFFFC)
#define TEMP8_FROM_REG(reg) ((reg) * 1000)
#define TEMP8_TO_REG(val) ((val) <= -128000 ? -128 : \
(val) >= 127000 ? 127 : \
(val) < 0 ? ((val) - 500) / 1000 : \
((val) + 500) / 1000)
#define TEMP8U_TO_REG(val) ((val) <= 0 ? 0 : \
(val) >= 255000 ? 255 : \
((val) + 500) / 1000)
#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
#define TEMP11_TO_REG(val) ((val) <= -128000 ? 0x8000 : \
(val) >= 127875 ? 0x7FE0 : \
(val) < 0 ? ((val) - 62) / 125 * 32 : \
((val) + 62) / 125 * 32)
#define TEMP11U_TO_REG(val) ((val) <= 0 ? 0 : \
(val) >= 255875 ? 0xFFE0 : \
((val) + 62) / 125 * 32)
#define HYST_TO_REG(val) ((val) <= 0 ? 0 : \
(val) >= 127000 ? 127 : \
((val) + 500) / 1000)
#define UPDATE_INTERVAL(max, rate) \
((1000 << (LM63_MAX_CONVRATE - (rate))) / (max))
enum chips { lm63, lm64, lm96163 };
/*
* Client data (each client gets its own)
*/
struct lm63_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
char lut_valid; /* zero until lut fields are valid */
unsigned long last_updated; /* in jiffies */
unsigned long lut_last_updated; /* in jiffies */
enum chips kind;
int temp2_offset;
int update_interval; /* in milliseconds */
int max_convrate_hz;
int lut_size; /* 8 or 12 */
/* registers values */
u8 config, config_fan;
u16 fan[2]; /* 0: input
1: low limit */
u8 pwm1_freq;
u8 pwm1[13]; /* 0: current output
1-12: lookup table */
s8 temp8[15]; /* 0: local input
1: local high limit
2: remote critical limit
3-14: lookup table */
s16 temp11[4]; /* 0: remote input
1: remote low limit
2: remote high limit
3: remote offset */
u16 temp11u; /* remote input (unsigned) */
u8 temp2_crit_hyst;
u8 lut_temp_hyst;
u8 alarms;
bool pwm_highres;
bool lut_temp_highres;
bool remote_unsigned; /* true if unsigned remote upper limits */
bool trutherm;
};
static inline int temp8_from_reg(struct lm63_data *data, int nr)
{
if (data->remote_unsigned)
return TEMP8_FROM_REG((u8)data->temp8[nr]);
return TEMP8_FROM_REG(data->temp8[nr]);
}
static inline int lut_temp_from_reg(struct lm63_data *data, int nr)
{
return data->temp8[nr] * (data->lut_temp_highres ? 500 : 1000);
}
static inline int lut_temp_to_reg(struct lm63_data *data, long val)
{
val -= data->temp2_offset;
if (data->lut_temp_highres)
return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127500), 500);
else
return DIV_ROUND_CLOSEST(SENSORS_LIMIT(val, 0, 127000), 1000);
}
/*
* Update the lookup table register cache.
* client->update_lock must be held when calling this function.
*/
static void lm63_update_lut(struct i2c_client *client)
{
struct lm63_data *data = i2c_get_clientdata(client);
int i;
if (time_after(jiffies, data->lut_last_updated + 5 * HZ) ||
!data->lut_valid) {
for (i = 0; i < data->lut_size; i++) {
data->pwm1[1 + i] = i2c_smbus_read_byte_data(client,
LM63_REG_LUT_PWM(i));
data->temp8[3 + i] = i2c_smbus_read_byte_data(client,
LM63_REG_LUT_TEMP(i));
}
data->lut_temp_hyst = i2c_smbus_read_byte_data(client,
LM63_REG_LUT_TEMP_HYST);
data->lut_last_updated = jiffies;
data->lut_valid = 1;
}
}
static struct lm63_data *lm63_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long next_update;
mutex_lock(&data->update_lock);
next_update = data->last_updated
+ msecs_to_jiffies(data->update_interval) + 1;
if (time_after(jiffies, next_update) || !data->valid) {
if (data->config & 0x04) { /* tachometer enabled */
/* order matters for fan1_input */
data->fan[0] = i2c_smbus_read_byte_data(client,
LM63_REG_TACH_COUNT_LSB) & 0xFC;
data->fan[0] |= i2c_smbus_read_byte_data(client,
LM63_REG_TACH_COUNT_MSB) << 8;
data->fan[1] = (i2c_smbus_read_byte_data(client,
LM63_REG_TACH_LIMIT_LSB) & 0xFC)
| (i2c_smbus_read_byte_data(client,
LM63_REG_TACH_LIMIT_MSB) << 8);
}
data->pwm1_freq = i2c_smbus_read_byte_data(client,
LM63_REG_PWM_FREQ);
if (data->pwm1_freq == 0)
data->pwm1_freq = 1;
data->pwm1[0] = i2c_smbus_read_byte_data(client,
LM63_REG_PWM_VALUE);
data->temp8[0] = i2c_smbus_read_byte_data(client,
LM63_REG_LOCAL_TEMP);
data->temp8[1] = i2c_smbus_read_byte_data(client,
LM63_REG_LOCAL_HIGH);
/* order matters for temp2_input */
data->temp11[0] = i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_TEMP_MSB) << 8;
data->temp11[0] |= i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_TEMP_LSB);
data->temp11[1] = (i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_LOW_MSB) << 8)
| i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_LOW_LSB);
data->temp11[2] = (i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_HIGH_MSB) << 8)
| i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_HIGH_LSB);
data->temp11[3] = (i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_OFFSET_MSB) << 8)
| i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_OFFSET_LSB);
if (data->kind == lm96163)
data->temp11u = (i2c_smbus_read_byte_data(client,
LM96163_REG_REMOTE_TEMP_U_MSB) << 8)
| i2c_smbus_read_byte_data(client,
LM96163_REG_REMOTE_TEMP_U_LSB);
data->temp8[2] = i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_TCRIT);
data->temp2_crit_hyst = i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_TCRIT_HYST);
data->alarms = i2c_smbus_read_byte_data(client,
LM63_REG_ALERT_STATUS) & 0x7F;
data->last_updated = jiffies;
data->valid = 1;
}
lm63_update_lut(client);
mutex_unlock(&data->update_lock);
return data;
}
/*
* Trip points in the lookup table should be in ascending order for both
* temperatures and PWM output values.
*/
static int lm63_lut_looks_bad(struct i2c_client *client)
{
struct lm63_data *data = i2c_get_clientdata(client);
int i;
mutex_lock(&data->update_lock);
lm63_update_lut(client);
for (i = 1; i < data->lut_size; i++) {
if (data->pwm1[1 + i - 1] > data->pwm1[1 + i]
|| data->temp8[3 + i - 1] > data->temp8[3 + i]) {
dev_warn(&client->dev,
"Lookup table doesn't look sane (check entries %d and %d)\n",
i, i + 1);
break;
}
}
mutex_unlock(&data->update_lock);
return i == data->lut_size ? 0 : 1;
}
/*
* Sysfs callback functions and files
*/
static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index]));
}
static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->fan[1] = FAN_TO_REG(val);
i2c_smbus_write_byte_data(client, LM63_REG_TACH_LIMIT_LSB,
data->fan[1] & 0xFF);
i2c_smbus_write_byte_data(client, LM63_REG_TACH_LIMIT_MSB,
data->fan[1] >> 8);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm1(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
int nr = attr->index;
int pwm;
if (data->pwm_highres)
pwm = data->pwm1[nr];
else
pwm = data->pwm1[nr] >= 2 * data->pwm1_freq ?
255 : (data->pwm1[nr] * 255 + data->pwm1_freq) /
(2 * data->pwm1_freq);
return sprintf(buf, "%d\n", pwm);
}
static ssize_t set_pwm1(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
int nr = attr->index;
unsigned long val;
int err;
u8 reg;
if (!(data->config_fan & 0x20)) /* register is read-only */
return -EPERM;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
reg = nr ? LM63_REG_LUT_PWM(nr - 1) : LM63_REG_PWM_VALUE;
val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
data->pwm1[nr] = data->pwm_highres ? val :
(val * data->pwm1_freq * 2 + 127) / 255;
i2c_smbus_write_byte_data(client, reg, data->pwm1[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_pwm1_enable(struct device *dev,
struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
}
static ssize_t set_pwm1_enable(struct device *dev,
struct device_attribute *dummy,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
if (val < 1 || val > 2)
return -EINVAL;
/*
* Only let the user switch to automatic mode if the lookup table
* looks sane.
*/
if (val == 2 && lm63_lut_looks_bad(client))
return -EPERM;
mutex_lock(&data->update_lock);
data->config_fan = i2c_smbus_read_byte_data(client,
LM63_REG_CONFIG_FAN);
if (val == 1)
data->config_fan |= 0x20;
else
data->config_fan &= ~0x20;
i2c_smbus_write_byte_data(client, LM63_REG_CONFIG_FAN,
data->config_fan);
mutex_unlock(&data->update_lock);
return count;
}
/*
* There are 8bit registers for both local(temp1) and remote(temp2) sensor.
* For remote sensor registers temp2_offset has to be considered,
* for local sensor it must not.
* So we need separate 8bit accessors for local and remote sensor.
*/
static ssize_t show_local_temp8(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index]));
}
static ssize_t show_remote_temp8(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", temp8_from_reg(data, attr->index)
+ data->temp2_offset);
}
static ssize_t show_lut_temp(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+ data->temp2_offset);
}
static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
int nr = attr->index;
long val;
int err;
int temp;
u8 reg;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
switch (nr) {
case 2:
reg = LM63_REG_REMOTE_TCRIT;
if (data->remote_unsigned)
temp = TEMP8U_TO_REG(val - data->temp2_offset);
else
temp = TEMP8_TO_REG(val - data->temp2_offset);
break;
case 1:
reg = LM63_REG_LOCAL_HIGH;
temp = TEMP8_TO_REG(val);
break;
default: /* lookup table */
reg = LM63_REG_LUT_TEMP(nr - 3);
temp = lut_temp_to_reg(data, val);
}
data->temp8[nr] = temp;
i2c_smbus_write_byte_data(client, reg, temp);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
int nr = attr->index;
int temp;
if (!nr) {
/*
* Use unsigned temperature unless its value is zero.
* If it is zero, use signed temperature.
*/
if (data->temp11u)
temp = TEMP11_FROM_REG(data->temp11u);
else
temp = TEMP11_FROM_REG(data->temp11[nr]);
} else {
if (data->remote_unsigned && nr == 2)
temp = TEMP11_FROM_REG((u16)data->temp11[nr]);
else
temp = TEMP11_FROM_REG(data->temp11[nr]);
}
return sprintf(buf, "%d\n", temp + data->temp2_offset);
}
static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
static const u8 reg[6] = {
LM63_REG_REMOTE_LOW_MSB,
LM63_REG_REMOTE_LOW_LSB,
LM63_REG_REMOTE_HIGH_MSB,
LM63_REG_REMOTE_HIGH_LSB,
LM63_REG_REMOTE_OFFSET_MSB,
LM63_REG_REMOTE_OFFSET_LSB,
};
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
long val;
int err;
int nr = attr->index;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
if (data->remote_unsigned && nr == 2)
data->temp11[nr] = TEMP11U_TO_REG(val - data->temp2_offset);
else
data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
data->temp11[nr] >> 8);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
data->temp11[nr] & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Hysteresis register holds a relative value, while we want to present
* an absolute to user-space
*/
static ssize_t show_temp2_crit_hyst(struct device *dev,
struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", temp8_from_reg(data, 2)
+ data->temp2_offset
- TEMP8_FROM_REG(data->temp2_crit_hyst));
}
static ssize_t show_lut_temp_hyst(struct device *dev,
struct device_attribute *devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+ data->temp2_offset
- TEMP8_FROM_REG(data->lut_temp_hyst));
}
/*
* And now the other way around, user-space provides an absolute
* hysteresis value and we have to store a relative one
*/
static ssize_t set_temp2_crit_hyst(struct device *dev,
struct device_attribute *dummy,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
long val;
int err;
long hyst;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
hyst = temp8_from_reg(data, 2) + data->temp2_offset - val;
i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
HYST_TO_REG(hyst));
mutex_unlock(&data->update_lock);
return count;
}
/*
* Set conversion rate.
* client->update_lock must be held when calling this function.
*/
static void lm63_set_convrate(struct i2c_client *client, struct lm63_data *data,
unsigned int interval)
{
int i;
unsigned int update_interval;
/* Shift calculations to avoid rounding errors */
interval <<= 6;
/* find the nearest update rate */
update_interval = (1 << (LM63_MAX_CONVRATE + 6)) * 1000
/ data->max_convrate_hz;
for (i = 0; i < LM63_MAX_CONVRATE; i++, update_interval >>= 1)
if (interval >= update_interval * 3 / 4)
break;
i2c_smbus_write_byte_data(client, LM63_REG_CONVRATE, i);
data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i);
}
static ssize_t show_update_interval(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm63_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->update_interval);
}
static ssize_t set_update_interval(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
return sprintf(buf, data->trutherm ? "1\n" : "2\n");
}
static ssize_t set_type(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long val;
int ret;
u8 reg;
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
if (val != 1 && val != 2)
return -EINVAL;
mutex_lock(&data->update_lock);
data->trutherm = val == 1;
reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02;
i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM,
reg | (data->trutherm ? 0x02 : 0x00));
data->valid = 0;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
int bitnr = attr->index;
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
set_fan, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0);
static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
show_pwm1_enable, set_pwm1_enable);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 1);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 3);
static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 3);
static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 2);
static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 4);
static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 4);
static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 3);
static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 5);
static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 5);
static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 4);
static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 6);
static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 6);
static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 5);
static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 7);
static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 7);
static SENSOR_DEVICE_ATTR(pwm1_auto_point6_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 6);
static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 8);
static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 8);
static SENSOR_DEVICE_ATTR(pwm1_auto_point7_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 7);
static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 9);
static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 9);
static SENSOR_DEVICE_ATTR(pwm1_auto_point8_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 8);
static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 10);
static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 10);
static SENSOR_DEVICE_ATTR(pwm1_auto_point9_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 9);
static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 11);
static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 11);
static SENSOR_DEVICE_ATTR(pwm1_auto_point10_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 10);
static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 12);
static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 12);
static SENSOR_DEVICE_ATTR(pwm1_auto_point11_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 11);
static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 13);
static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 13);
static SENSOR_DEVICE_ATTR(pwm1_auto_point12_pwm, S_IWUSR | S_IRUGO,
show_pwm1, set_pwm1, 12);
static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp, S_IWUSR | S_IRUGO,
show_lut_temp, set_temp8, 14);
static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp_hyst, S_IRUGO,
show_lut_temp_hyst, NULL, 14);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
set_temp8, 1);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 2);
static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 3);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
set_temp8, 2);
static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
set_temp2_crit_hyst);
static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
/* Raw alarm file for compatibility */
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
set_update_interval);
static struct attribute *lm63_attributes[] = {
&sensor_dev_attr_pwm1.dev_attr.attr,
&dev_attr_pwm1_enable.attr,
&sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&dev_attr_temp2_crit_hyst.attr,
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
&dev_attr_update_interval.attr,
NULL
};
static struct attribute *lm63_attributes_extra_lut[] = {
&sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point11_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point11_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point11_temp_hyst.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point12_pwm.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point12_temp.dev_attr.attr,
&sensor_dev_attr_pwm1_auto_point12_temp_hyst.dev_attr.attr,
NULL
};
static const struct attribute_group lm63_group_extra_lut = {
.attrs = lm63_attributes_extra_lut,
};
/*
* On LM63, temp2_crit can be set only once, which should be job
* of the bootloader.
* On LM64, temp2_crit can always be set.
* On LM96163, temp2_crit can be set if bit 1 of the configuration
* register is true.
*/
static umode_t lm63_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
if (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr
&& (data->kind == lm64 ||
(data->kind == lm96163 && (data->config & 0x02))))
return attr->mode | S_IWUSR;
return attr->mode;
}
static const struct attribute_group lm63_group = {
.is_visible = lm63_attribute_mode,
.attrs = lm63_attributes,
};
static struct attribute *lm63_attributes_fan1[] = {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_min_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group lm63_group_fan1 = {
.attrs = lm63_attributes_fan1,
};
/*
* Real code
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm63_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
u8 man_id, chip_id, reg_config1, reg_config2;
u8 reg_alert_status, reg_alert_mask;
int address = client->addr;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
man_id = i2c_smbus_read_byte_data(client, LM63_REG_MAN_ID);
chip_id = i2c_smbus_read_byte_data(client, LM63_REG_CHIP_ID);
reg_config1 = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1);
reg_config2 = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG2);
reg_alert_status = i2c_smbus_read_byte_data(client,
LM63_REG_ALERT_STATUS);
reg_alert_mask = i2c_smbus_read_byte_data(client, LM63_REG_ALERT_MASK);
if (man_id != 0x01 /* National Semiconductor */
|| (reg_config1 & 0x18) != 0x00
|| (reg_config2 & 0xF8) != 0x00
|| (reg_alert_status & 0x20) != 0x00
|| (reg_alert_mask & 0xA4) != 0xA4) {
dev_dbg(&adapter->dev,
"Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n",
man_id, chip_id);
return -ENODEV;
}
if (chip_id == 0x41 && address == 0x4c)
strlcpy(info->type, "lm63", I2C_NAME_SIZE);
else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
strlcpy(info->type, "lm64", I2C_NAME_SIZE);
else if (chip_id == 0x49 && address == 0x4c)
strlcpy(info->type, "lm96163", I2C_NAME_SIZE);
else
return -ENODEV;
return 0;
}
/*
* Ideally we shouldn't have to initialize anything, since the BIOS
* should have taken care of everything
*/
static void lm63_init_client(struct i2c_client *client)
{
struct lm63_data *data = i2c_get_clientdata(client);
u8 convrate;
data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1);
data->config_fan = i2c_smbus_read_byte_data(client,
LM63_REG_CONFIG_FAN);
/* Start converting if needed */
if (data->config & 0x40) { /* standby */
dev_dbg(&client->dev, "Switching to operational mode\n");
data->config &= 0xA7;
i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1,
data->config);
}
/* Tachometer is always enabled on LM64 */
if (data->kind == lm64)
data->config |= 0x04;
/* We may need pwm1_freq before ever updating the client data */
data->pwm1_freq = i2c_smbus_read_byte_data(client, LM63_REG_PWM_FREQ);
if (data->pwm1_freq == 0)
data->pwm1_freq = 1;
switch (data->kind) {
case lm63:
case lm64:
data->max_convrate_hz = LM63_MAX_CONVRATE_HZ;
data->lut_size = 8;
break;
case lm96163:
data->max_convrate_hz = LM96163_MAX_CONVRATE_HZ;
data->lut_size = 12;
data->trutherm
= i2c_smbus_read_byte_data(client,
LM96163_REG_TRUTHERM) & 0x02;
break;
}
convrate = i2c_smbus_read_byte_data(client, LM63_REG_CONVRATE);
if (unlikely(convrate > LM63_MAX_CONVRATE))
convrate = LM63_MAX_CONVRATE;
data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz,
convrate);
/*
* For LM96163, check if high resolution PWM
* and unsigned temperature format is enabled.
*/
if (data->kind == lm96163) {
u8 config_enhanced
= i2c_smbus_read_byte_data(client,
LM96163_REG_CONFIG_ENHANCED);
if (config_enhanced & 0x20)
data->lut_temp_highres = true;
if ((config_enhanced & 0x10)
&& !(data->config_fan & 0x08) && data->pwm1_freq == 8)
data->pwm_highres = true;
if (config_enhanced & 0x08)
data->remote_unsigned = true;
}
/* Show some debug info about the LM63 configuration */
if (data->kind == lm63)
dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
(data->config & 0x04) ? "tachometer input" :
"alert output");
dev_dbg(&client->dev, "PWM clock %s kHz, output frequency %u Hz\n",
(data->config_fan & 0x08) ? "1.4" : "360",
((data->config_fan & 0x08) ? 700 : 180000) / data->pwm1_freq);
dev_dbg(&client->dev, "PWM output active %s, %s mode\n",
(data->config_fan & 0x10) ? "low" : "high",
(data->config_fan & 0x20) ? "manual" : "auto");
}
static int lm63_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm63_data *data;
int err;
data = kzalloc(sizeof(struct lm63_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Set the device type */
data->kind = id->driver_data;
if (data->kind == lm64)
data->temp2_offset = 16000;
/* Initialize chip */
lm63_init_client(client);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm63_group);
if (err)
goto exit_free;
if (data->config & 0x04) { /* tachometer enabled */
err = sysfs_create_group(&client->dev.kobj, &lm63_group_fan1);
if (err)
goto exit_remove_files;
}
if (data->kind == lm96163) {
err = device_create_file(&client->dev, &dev_attr_temp2_type);
if (err)
goto exit_remove_files;
err = sysfs_create_group(&client->dev.kobj,
&lm63_group_extra_lut);
if (err)
goto exit_remove_files;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
return 0;
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &lm63_group);
sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
if (data->kind == lm96163) {
device_remove_file(&client->dev, &dev_attr_temp2_type);
sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
}
exit_free:
kfree(data);
exit:
return err;
}
static int lm63_remove(struct i2c_client *client)
{
struct lm63_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm63_group);
sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
if (data->kind == lm96163) {
device_remove_file(&client->dev, &dev_attr_temp2_type);
sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
}
kfree(data);
return 0;
}
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm63_id[] = {
{ "lm63", lm63 },
{ "lm64", lm64 },
{ "lm96163", lm96163 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
static struct i2c_driver lm63_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm63",
},
.probe = lm63_probe,
.remove = lm63_remove,
.id_table = lm63_id,
.detect = lm63_detect,
.address_list = normal_i2c,
};
module_i2c_driver(lm63_driver);
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("LM63 driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
k2wl/caf
|
drivers/hwmon/w83795.c
|
4856
|
63764
|
/*
* w83795.c - Linux kernel driver for hardware monitoring
* Copyright (C) 2008 Nuvoton Technology Corp.
* Wei Song
* Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation - version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301 USA.
*
* Supports following chips:
*
* Chip #vin #fanin #pwm #temp #dts wchipid vendid i2c ISA
* w83795g 21 14 8 6 8 0x79 0x5ca3 yes no
* w83795adg 18 14 2 6 8 0x79 0x5ca3 yes no
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = {
0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END
};
static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
#define W83795_REG_BANKSEL 0x00
#define W83795_REG_VENDORID 0xfd
#define W83795_REG_CHIPID 0xfe
#define W83795_REG_DEVICEID 0xfb
#define W83795_REG_DEVICEID_A 0xff
#define W83795_REG_I2C_ADDR 0xfc
#define W83795_REG_CONFIG 0x01
#define W83795_REG_CONFIG_CONFIG48 0x04
#define W83795_REG_CONFIG_START 0x01
/* Multi-Function Pin Ctrl Registers */
#define W83795_REG_VOLT_CTRL1 0x02
#define W83795_REG_VOLT_CTRL2 0x03
#define W83795_REG_TEMP_CTRL1 0x04
#define W83795_REG_TEMP_CTRL2 0x05
#define W83795_REG_FANIN_CTRL1 0x06
#define W83795_REG_FANIN_CTRL2 0x07
#define W83795_REG_VMIGB_CTRL 0x08
#define TEMP_READ 0
#define TEMP_CRIT 1
#define TEMP_CRIT_HYST 2
#define TEMP_WARN 3
#define TEMP_WARN_HYST 4
/*
* only crit and crit_hyst affect real-time alarm status
* current crit crit_hyst warn warn_hyst
*/
static const u16 W83795_REG_TEMP[][5] = {
{0x21, 0x96, 0x97, 0x98, 0x99}, /* TD1/TR1 */
{0x22, 0x9a, 0x9b, 0x9c, 0x9d}, /* TD2/TR2 */
{0x23, 0x9e, 0x9f, 0xa0, 0xa1}, /* TD3/TR3 */
{0x24, 0xa2, 0xa3, 0xa4, 0xa5}, /* TD4/TR4 */
{0x1f, 0xa6, 0xa7, 0xa8, 0xa9}, /* TR5 */
{0x20, 0xaa, 0xab, 0xac, 0xad}, /* TR6 */
};
#define IN_READ 0
#define IN_MAX 1
#define IN_LOW 2
static const u16 W83795_REG_IN[][3] = {
/* Current, HL, LL */
{0x10, 0x70, 0x71}, /* VSEN1 */
{0x11, 0x72, 0x73}, /* VSEN2 */
{0x12, 0x74, 0x75}, /* VSEN3 */
{0x13, 0x76, 0x77}, /* VSEN4 */
{0x14, 0x78, 0x79}, /* VSEN5 */
{0x15, 0x7a, 0x7b}, /* VSEN6 */
{0x16, 0x7c, 0x7d}, /* VSEN7 */
{0x17, 0x7e, 0x7f}, /* VSEN8 */
{0x18, 0x80, 0x81}, /* VSEN9 */
{0x19, 0x82, 0x83}, /* VSEN10 */
{0x1A, 0x84, 0x85}, /* VSEN11 */
{0x1B, 0x86, 0x87}, /* VTT */
{0x1C, 0x88, 0x89}, /* 3VDD */
{0x1D, 0x8a, 0x8b}, /* 3VSB */
{0x1E, 0x8c, 0x8d}, /* VBAT */
{0x1F, 0xa6, 0xa7}, /* VSEN12 */
{0x20, 0xaa, 0xab}, /* VSEN13 */
{0x21, 0x96, 0x97}, /* VSEN14 */
{0x22, 0x9a, 0x9b}, /* VSEN15 */
{0x23, 0x9e, 0x9f}, /* VSEN16 */
{0x24, 0xa2, 0xa3}, /* VSEN17 */
};
#define W83795_REG_VRLSB 0x3C
static const u8 W83795_REG_IN_HL_LSB[] = {
0x8e, /* VSEN1-4 */
0x90, /* VSEN5-8 */
0x92, /* VSEN9-11 */
0x94, /* VTT, 3VDD, 3VSB, 3VBAT */
0xa8, /* VSEN12 */
0xac, /* VSEN13 */
0x98, /* VSEN14 */
0x9c, /* VSEN15 */
0xa0, /* VSEN16 */
0xa4, /* VSEN17 */
};
#define IN_LSB_REG(index, type) \
(((type) == 1) ? W83795_REG_IN_HL_LSB[(index)] \
: (W83795_REG_IN_HL_LSB[(index)] + 1))
#define IN_LSB_SHIFT 0
#define IN_LSB_IDX 1
static const u8 IN_LSB_SHIFT_IDX[][2] = {
/* High/Low LSB shift, LSB No. */
{0x00, 0x00}, /* VSEN1 */
{0x02, 0x00}, /* VSEN2 */
{0x04, 0x00}, /* VSEN3 */
{0x06, 0x00}, /* VSEN4 */
{0x00, 0x01}, /* VSEN5 */
{0x02, 0x01}, /* VSEN6 */
{0x04, 0x01}, /* VSEN7 */
{0x06, 0x01}, /* VSEN8 */
{0x00, 0x02}, /* VSEN9 */
{0x02, 0x02}, /* VSEN10 */
{0x04, 0x02}, /* VSEN11 */
{0x00, 0x03}, /* VTT */
{0x02, 0x03}, /* 3VDD */
{0x04, 0x03}, /* 3VSB */
{0x06, 0x03}, /* VBAT */
{0x06, 0x04}, /* VSEN12 */
{0x06, 0x05}, /* VSEN13 */
{0x06, 0x06}, /* VSEN14 */
{0x06, 0x07}, /* VSEN15 */
{0x06, 0x08}, /* VSEN16 */
{0x06, 0x09}, /* VSEN17 */
};
#define W83795_REG_FAN(index) (0x2E + (index))
#define W83795_REG_FAN_MIN_HL(index) (0xB6 + (index))
#define W83795_REG_FAN_MIN_LSB(index) (0xC4 + (index) / 2)
#define W83795_REG_FAN_MIN_LSB_SHIFT(index) \
(((index) & 1) ? 4 : 0)
#define W83795_REG_VID_CTRL 0x6A
#define W83795_REG_ALARM_CTRL 0x40
#define ALARM_CTRL_RTSACS (1 << 7)
#define W83795_REG_ALARM(index) (0x41 + (index))
#define W83795_REG_CLR_CHASSIS 0x4D
#define W83795_REG_BEEP(index) (0x50 + (index))
#define W83795_REG_OVT_CFG 0x58
#define OVT_CFG_SEL (1 << 7)
#define W83795_REG_FCMS1 0x201
#define W83795_REG_FCMS2 0x208
#define W83795_REG_TFMR(index) (0x202 + (index))
#define W83795_REG_FOMC 0x20F
#define W83795_REG_TSS(index) (0x209 + (index))
#define TSS_MAP_RESERVED 0xff
static const u8 tss_map[4][6] = {
{ 0, 1, 2, 3, 4, 5},
{ 6, 7, 8, 9, 0, 1},
{10, 11, 12, 13, 2, 3},
{ 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED},
};
#define PWM_OUTPUT 0
#define PWM_FREQ 1
#define PWM_START 2
#define PWM_NONSTOP 3
#define PWM_STOP_TIME 4
#define W83795_REG_PWM(index, nr) (0x210 + (nr) * 8 + (index))
#define W83795_REG_FTSH(index) (0x240 + (index) * 2)
#define W83795_REG_FTSL(index) (0x241 + (index) * 2)
#define W83795_REG_TFTS 0x250
#define TEMP_PWM_TTTI 0
#define TEMP_PWM_CTFS 1
#define TEMP_PWM_HCT 2
#define TEMP_PWM_HOT 3
#define W83795_REG_TTTI(index) (0x260 + (index))
#define W83795_REG_CTFS(index) (0x268 + (index))
#define W83795_REG_HT(index) (0x270 + (index))
#define SF4_TEMP 0
#define SF4_PWM 1
#define W83795_REG_SF4_TEMP(temp_num, index) \
(0x280 + 0x10 * (temp_num) + (index))
#define W83795_REG_SF4_PWM(temp_num, index) \
(0x288 + 0x10 * (temp_num) + (index))
#define W83795_REG_DTSC 0x301
#define W83795_REG_DTSE 0x302
#define W83795_REG_DTS(index) (0x26 + (index))
#define W83795_REG_PECI_TBASE(index) (0x320 + (index))
#define DTS_CRIT 0
#define DTS_CRIT_HYST 1
#define DTS_WARN 2
#define DTS_WARN_HYST 3
#define W83795_REG_DTS_EXT(index) (0xB2 + (index))
#define SETUP_PWM_DEFAULT 0
#define SETUP_PWM_UPTIME 1
#define SETUP_PWM_DOWNTIME 2
#define W83795_REG_SETUP_PWM(index) (0x20C + (index))
static inline u16 in_from_reg(u8 index, u16 val)
{
/* 3VDD, 3VSB and VBAT: 6 mV/bit; other inputs: 2 mV/bit */
if (index >= 12 && index <= 14)
return val * 6;
else
return val * 2;
}
static inline u16 in_to_reg(u8 index, u16 val)
{
if (index >= 12 && index <= 14)
return val / 6;
else
return val / 2;
}
static inline unsigned long fan_from_reg(u16 val)
{
if ((val == 0xfff) || (val == 0))
return 0;
return 1350000UL / val;
}
static inline u16 fan_to_reg(long rpm)
{
if (rpm <= 0)
return 0x0fff;
return SENSORS_LIMIT((1350000 + (rpm >> 1)) / rpm, 1, 0xffe);
}
static inline unsigned long time_from_reg(u8 reg)
{
return reg * 100;
}
static inline u8 time_to_reg(unsigned long val)
{
return SENSORS_LIMIT((val + 50) / 100, 0, 0xff);
}
static inline long temp_from_reg(s8 reg)
{
return reg * 1000;
}
static inline s8 temp_to_reg(long val, s8 min, s8 max)
{
return SENSORS_LIMIT(val / 1000, min, max);
}
static const u16 pwm_freq_cksel0[16] = {
1024, 512, 341, 256, 205, 171, 146, 128,
85, 64, 32, 16, 8, 4, 2, 1
};
static unsigned int pwm_freq_from_reg(u8 reg, u16 clkin)
{
unsigned long base_clock;
if (reg & 0x80) {
base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
return base_clock / ((reg & 0x7f) + 1);
} else
return pwm_freq_cksel0[reg & 0x0f];
}
static u8 pwm_freq_to_reg(unsigned long val, u16 clkin)
{
unsigned long base_clock;
u8 reg0, reg1;
unsigned long best0, best1;
/* Best fit for cksel = 0 */
for (reg0 = 0; reg0 < ARRAY_SIZE(pwm_freq_cksel0) - 1; reg0++) {
if (val > (pwm_freq_cksel0[reg0] +
pwm_freq_cksel0[reg0 + 1]) / 2)
break;
}
if (val < 375) /* cksel = 1 can't beat this */
return reg0;
best0 = pwm_freq_cksel0[reg0];
/* Best fit for cksel = 1 */
base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256);
reg1 = SENSORS_LIMIT(DIV_ROUND_CLOSEST(base_clock, val), 1, 128);
best1 = base_clock / reg1;
reg1 = 0x80 | (reg1 - 1);
/* Choose the closest one */
if (abs(val - best0) > abs(val - best1))
return reg1;
else
return reg0;
}
enum chip_types {w83795g, w83795adg};
struct w83795_data {
struct device *hwmon_dev;
struct mutex update_lock;
unsigned long last_updated; /* In jiffies */
enum chip_types chip_type;
u8 bank;
u32 has_in; /* Enable monitor VIN or not */
u8 has_dyn_in; /* Only in2-0 can have this */
u16 in[21][3]; /* Register value, read/high/low */
u8 in_lsb[10][3]; /* LSB Register value, high/low */
u8 has_gain; /* has gain: in17-20 * 8 */
u16 has_fan; /* Enable fan14-1 or not */
u16 fan[14]; /* Register value combine */
u16 fan_min[14]; /* Register value combine */
u8 has_temp; /* Enable monitor temp6-1 or not */
s8 temp[6][5]; /* current, crit, crit_hyst, warn, warn_hyst */
u8 temp_read_vrlsb[6];
u8 temp_mode; /* Bit vector, 0 = TR, 1 = TD */
u8 temp_src[3]; /* Register value */
u8 enable_dts; /*
* Enable PECI and SB-TSI,
* bit 0: =1 enable, =0 disable,
* bit 1: =1 AMD SB-TSI, =0 Intel PECI
*/
u8 has_dts; /* Enable monitor DTS temp */
s8 dts[8]; /* Register value */
u8 dts_read_vrlsb[8]; /* Register value */
s8 dts_ext[4]; /* Register value */
u8 has_pwm; /*
* 795g supports 8 pwm, 795adg only supports 2,
* no config register, only affected by chip
* type
*/
u8 pwm[8][5]; /*
* Register value, output, freq, start,
* non stop, stop time
*/
u16 clkin; /* CLKIN frequency in kHz */
u8 pwm_fcms[2]; /* Register value */
u8 pwm_tfmr[6]; /* Register value */
u8 pwm_fomc; /* Register value */
u16 target_speed[8]; /*
* Register value, target speed for speed
* cruise
*/
u8 tol_speed; /* tolerance of target speed */
u8 pwm_temp[6][4]; /* TTTI, CTFS, HCT, HOT */
u8 sf4_reg[6][2][7]; /* 6 temp, temp/dcpwm, 7 registers */
u8 setup_pwm[3]; /* Register value */
u8 alarms[6]; /* Register value */
u8 enable_beep;
u8 beeps[6]; /* Register value */
char valid;
char valid_limits;
char valid_pwm_config;
};
/*
* Hardware access
* We assume that nobdody can change the bank outside the driver.
*/
/* Must be called with data->update_lock held, except during initialization */
static int w83795_set_bank(struct i2c_client *client, u8 bank)
{
struct w83795_data *data = i2c_get_clientdata(client);
int err;
/* If the same bank is already set, nothing to do */
if ((data->bank & 0x07) == bank)
return 0;
/* Change to new bank, preserve all other bits */
bank |= data->bank & ~0x07;
err = i2c_smbus_write_byte_data(client, W83795_REG_BANKSEL, bank);
if (err < 0) {
dev_err(&client->dev,
"Failed to set bank to %d, err %d\n",
(int)bank, err);
return err;
}
data->bank = bank;
return 0;
}
/* Must be called with data->update_lock held, except during initialization */
static u8 w83795_read(struct i2c_client *client, u16 reg)
{
int err;
err = w83795_set_bank(client, reg >> 8);
if (err < 0)
return 0x00; /* Arbitrary */
err = i2c_smbus_read_byte_data(client, reg & 0xff);
if (err < 0) {
dev_err(&client->dev,
"Failed to read from register 0x%03x, err %d\n",
(int)reg, err);
return 0x00; /* Arbitrary */
}
return err;
}
/* Must be called with data->update_lock held, except during initialization */
static int w83795_write(struct i2c_client *client, u16 reg, u8 value)
{
int err;
err = w83795_set_bank(client, reg >> 8);
if (err < 0)
return err;
err = i2c_smbus_write_byte_data(client, reg & 0xff, value);
if (err < 0)
dev_err(&client->dev,
"Failed to write to register 0x%03x, err %d\n",
(int)reg, err);
return err;
}
static void w83795_update_limits(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
int i, limit;
u8 lsb;
/* Read the voltage limits */
for (i = 0; i < ARRAY_SIZE(data->in); i++) {
if (!(data->has_in & (1 << i)))
continue;
data->in[i][IN_MAX] =
w83795_read(client, W83795_REG_IN[i][IN_MAX]);
data->in[i][IN_LOW] =
w83795_read(client, W83795_REG_IN[i][IN_LOW]);
}
for (i = 0; i < ARRAY_SIZE(data->in_lsb); i++) {
if ((i == 2 && data->chip_type == w83795adg) ||
(i >= 4 && !(data->has_in & (1 << (i + 11)))))
continue;
data->in_lsb[i][IN_MAX] =
w83795_read(client, IN_LSB_REG(i, IN_MAX));
data->in_lsb[i][IN_LOW] =
w83795_read(client, IN_LSB_REG(i, IN_LOW));
}
/* Read the fan limits */
lsb = 0; /* Silent false gcc warning */
for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
/*
* Each register contains LSB for 2 fans, but we want to
* read it only once to save time
*/
if ((i & 1) == 0 && (data->has_fan & (3 << i)))
lsb = w83795_read(client, W83795_REG_FAN_MIN_LSB(i));
if (!(data->has_fan & (1 << i)))
continue;
data->fan_min[i] =
w83795_read(client, W83795_REG_FAN_MIN_HL(i)) << 4;
data->fan_min[i] |=
(lsb >> W83795_REG_FAN_MIN_LSB_SHIFT(i)) & 0x0F;
}
/* Read the temperature limits */
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
if (!(data->has_temp & (1 << i)))
continue;
for (limit = TEMP_CRIT; limit <= TEMP_WARN_HYST; limit++)
data->temp[i][limit] =
w83795_read(client, W83795_REG_TEMP[i][limit]);
}
/* Read the DTS limits */
if (data->enable_dts) {
for (limit = DTS_CRIT; limit <= DTS_WARN_HYST; limit++)
data->dts_ext[limit] =
w83795_read(client, W83795_REG_DTS_EXT(limit));
}
/* Read beep settings */
if (data->enable_beep) {
for (i = 0; i < ARRAY_SIZE(data->beeps); i++)
data->beeps[i] =
w83795_read(client, W83795_REG_BEEP(i));
}
data->valid_limits = 1;
}
static struct w83795_data *w83795_update_pwm_config(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
int i, tmp;
mutex_lock(&data->update_lock);
if (data->valid_pwm_config)
goto END;
/* Read temperature source selection */
for (i = 0; i < ARRAY_SIZE(data->temp_src); i++)
data->temp_src[i] = w83795_read(client, W83795_REG_TSS(i));
/* Read automatic fan speed control settings */
data->pwm_fcms[0] = w83795_read(client, W83795_REG_FCMS1);
data->pwm_fcms[1] = w83795_read(client, W83795_REG_FCMS2);
for (i = 0; i < ARRAY_SIZE(data->pwm_tfmr); i++)
data->pwm_tfmr[i] = w83795_read(client, W83795_REG_TFMR(i));
data->pwm_fomc = w83795_read(client, W83795_REG_FOMC);
for (i = 0; i < data->has_pwm; i++) {
for (tmp = PWM_FREQ; tmp <= PWM_STOP_TIME; tmp++)
data->pwm[i][tmp] =
w83795_read(client, W83795_REG_PWM(i, tmp));
}
for (i = 0; i < ARRAY_SIZE(data->target_speed); i++) {
data->target_speed[i] =
w83795_read(client, W83795_REG_FTSH(i)) << 4;
data->target_speed[i] |=
w83795_read(client, W83795_REG_FTSL(i)) >> 4;
}
data->tol_speed = w83795_read(client, W83795_REG_TFTS) & 0x3f;
for (i = 0; i < ARRAY_SIZE(data->pwm_temp); i++) {
data->pwm_temp[i][TEMP_PWM_TTTI] =
w83795_read(client, W83795_REG_TTTI(i)) & 0x7f;
data->pwm_temp[i][TEMP_PWM_CTFS] =
w83795_read(client, W83795_REG_CTFS(i));
tmp = w83795_read(client, W83795_REG_HT(i));
data->pwm_temp[i][TEMP_PWM_HCT] = tmp >> 4;
data->pwm_temp[i][TEMP_PWM_HOT] = tmp & 0x0f;
}
/* Read SmartFanIV trip points */
for (i = 0; i < ARRAY_SIZE(data->sf4_reg); i++) {
for (tmp = 0; tmp < 7; tmp++) {
data->sf4_reg[i][SF4_TEMP][tmp] =
w83795_read(client,
W83795_REG_SF4_TEMP(i, tmp));
data->sf4_reg[i][SF4_PWM][tmp] =
w83795_read(client, W83795_REG_SF4_PWM(i, tmp));
}
}
/* Read setup PWM */
for (i = 0; i < ARRAY_SIZE(data->setup_pwm); i++)
data->setup_pwm[i] =
w83795_read(client, W83795_REG_SETUP_PWM(i));
data->valid_pwm_config = 1;
END:
mutex_unlock(&data->update_lock);
return data;
}
static struct w83795_data *w83795_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
u16 tmp;
u8 intrusion;
int i;
mutex_lock(&data->update_lock);
if (!data->valid_limits)
w83795_update_limits(client);
if (!(time_after(jiffies, data->last_updated + HZ * 2)
|| !data->valid))
goto END;
/* Update the voltages value */
for (i = 0; i < ARRAY_SIZE(data->in); i++) {
if (!(data->has_in & (1 << i)))
continue;
tmp = w83795_read(client, W83795_REG_IN[i][IN_READ]) << 2;
tmp |= w83795_read(client, W83795_REG_VRLSB) >> 6;
data->in[i][IN_READ] = tmp;
}
/* in0-2 can have dynamic limits (W83795G only) */
if (data->has_dyn_in) {
u8 lsb_max = w83795_read(client, IN_LSB_REG(0, IN_MAX));
u8 lsb_low = w83795_read(client, IN_LSB_REG(0, IN_LOW));
for (i = 0; i < 3; i++) {
if (!(data->has_dyn_in & (1 << i)))
continue;
data->in[i][IN_MAX] =
w83795_read(client, W83795_REG_IN[i][IN_MAX]);
data->in[i][IN_LOW] =
w83795_read(client, W83795_REG_IN[i][IN_LOW]);
data->in_lsb[i][IN_MAX] = (lsb_max >> (2 * i)) & 0x03;
data->in_lsb[i][IN_LOW] = (lsb_low >> (2 * i)) & 0x03;
}
}
/* Update fan */
for (i = 0; i < ARRAY_SIZE(data->fan); i++) {
if (!(data->has_fan & (1 << i)))
continue;
data->fan[i] = w83795_read(client, W83795_REG_FAN(i)) << 4;
data->fan[i] |= w83795_read(client, W83795_REG_VRLSB) >> 4;
}
/* Update temperature */
for (i = 0; i < ARRAY_SIZE(data->temp); i++) {
data->temp[i][TEMP_READ] =
w83795_read(client, W83795_REG_TEMP[i][TEMP_READ]);
data->temp_read_vrlsb[i] =
w83795_read(client, W83795_REG_VRLSB);
}
/* Update dts temperature */
if (data->enable_dts) {
for (i = 0; i < ARRAY_SIZE(data->dts); i++) {
if (!(data->has_dts & (1 << i)))
continue;
data->dts[i] =
w83795_read(client, W83795_REG_DTS(i));
data->dts_read_vrlsb[i] =
w83795_read(client, W83795_REG_VRLSB);
}
}
/* Update pwm output */
for (i = 0; i < data->has_pwm; i++) {
data->pwm[i][PWM_OUTPUT] =
w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT));
}
/*
* Update intrusion and alarms
* It is important to read intrusion first, because reading from
* register SMI STS6 clears the interrupt status temporarily.
*/
tmp = w83795_read(client, W83795_REG_ALARM_CTRL);
/* Switch to interrupt status for intrusion if needed */
if (tmp & ALARM_CTRL_RTSACS)
w83795_write(client, W83795_REG_ALARM_CTRL,
tmp & ~ALARM_CTRL_RTSACS);
intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6);
/* Switch to real-time alarms */
w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS);
for (i = 0; i < ARRAY_SIZE(data->alarms); i++)
data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i));
data->alarms[5] |= intrusion;
/* Restore original configuration if needed */
if (!(tmp & ALARM_CTRL_RTSACS))
w83795_write(client, W83795_REG_ALARM_CTRL,
tmp & ~ALARM_CTRL_RTSACS);
data->last_updated = jiffies;
data->valid = 1;
END:
mutex_unlock(&data->update_lock);
return data;
}
/*
* Sysfs attributes
*/
#define ALARM_STATUS 0
#define BEEP_ENABLE 1
static ssize_t
show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = w83795_update_device(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index >> 3;
int bit = sensor_attr->index & 0x07;
u8 val;
if (nr == ALARM_STATUS)
val = (data->alarms[index] >> bit) & 1;
else /* BEEP_ENABLE */
val = (data->beeps[index] >> bit) & 1;
return sprintf(buf, "%u\n", val);
}
static ssize_t
store_beep(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index >> 3;
int shift = sensor_attr->index & 0x07;
u8 beep_bit = 1 << shift;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
mutex_lock(&data->update_lock);
data->beeps[index] = w83795_read(client, W83795_REG_BEEP(index));
data->beeps[index] &= ~beep_bit;
data->beeps[index] |= val << shift;
w83795_write(client, W83795_REG_BEEP(index), data->beeps[index]);
mutex_unlock(&data->update_lock);
return count;
}
/* Write 0 to clear chassis alarm */
static ssize_t
store_chassis_clear(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
val = w83795_read(client, W83795_REG_CLR_CHASSIS);
val |= 0x80;
w83795_write(client, W83795_REG_CLR_CHASSIS, val);
/* Clear status and force cache refresh */
w83795_read(client, W83795_REG_ALARM(5));
data->valid = 0;
mutex_unlock(&data->update_lock);
return count;
}
#define FAN_INPUT 0
#define FAN_MIN 1
static ssize_t
show_fan(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83795_data *data = w83795_update_device(dev);
u16 val;
if (nr == FAN_INPUT)
val = data->fan[index] & 0x0fff;
else
val = data->fan_min[index] & 0x0fff;
return sprintf(buf, "%lu\n", fan_from_reg(val));
}
static ssize_t
store_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
val = fan_to_reg(val);
mutex_lock(&data->update_lock);
data->fan_min[index] = val;
w83795_write(client, W83795_REG_FAN_MIN_HL(index), (val >> 4) & 0xff);
val &= 0x0f;
if (index & 1) {
val <<= 4;
val |= w83795_read(client, W83795_REG_FAN_MIN_LSB(index))
& 0x0f;
} else {
val |= w83795_read(client, W83795_REG_FAN_MIN_LSB(index))
& 0xf0;
}
w83795_write(client, W83795_REG_FAN_MIN_LSB(index), val & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data;
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned int val;
data = nr == PWM_OUTPUT ? w83795_update_device(dev)
: w83795_update_pwm_config(dev);
switch (nr) {
case PWM_STOP_TIME:
val = time_from_reg(data->pwm[index][nr]);
break;
case PWM_FREQ:
val = pwm_freq_from_reg(data->pwm[index][nr], data->clkin);
break;
default:
val = data->pwm[index][nr];
break;
}
return sprintf(buf, "%u\n", val);
}
static ssize_t
store_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
switch (nr) {
case PWM_STOP_TIME:
val = time_to_reg(val);
break;
case PWM_FREQ:
val = pwm_freq_to_reg(val, data->clkin);
break;
default:
val = SENSORS_LIMIT(val, 0, 0xff);
break;
}
w83795_write(client, W83795_REG_PWM(index, nr), val);
data->pwm[index][nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
struct w83795_data *data = w83795_update_pwm_config(dev);
int index = sensor_attr->index;
u8 tmp;
/* Speed cruise mode */
if (data->pwm_fcms[0] & (1 << index)) {
tmp = 2;
goto out;
}
/* Thermal cruise or SmartFan IV mode */
for (tmp = 0; tmp < 6; tmp++) {
if (data->pwm_tfmr[tmp] & (1 << index)) {
tmp = 3;
goto out;
}
}
/* Manual mode */
tmp = 1;
out:
return sprintf(buf, "%u\n", tmp);
}
static ssize_t
store_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
unsigned long val;
int i;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val < 1 || val > 2)
return -EINVAL;
#ifndef CONFIG_SENSORS_W83795_FANCTRL
if (val > 1) {
dev_warn(dev, "Automatic fan speed control support disabled\n");
dev_warn(dev, "Build with CONFIG_SENSORS_W83795_FANCTRL=y if you want it\n");
return -EOPNOTSUPP;
}
#endif
mutex_lock(&data->update_lock);
switch (val) {
case 1:
/* Clear speed cruise mode bits */
data->pwm_fcms[0] &= ~(1 << index);
w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]);
/* Clear thermal cruise mode bits */
for (i = 0; i < 6; i++) {
data->pwm_tfmr[i] &= ~(1 << index);
w83795_write(client, W83795_REG_TFMR(i),
data->pwm_tfmr[i]);
}
break;
case 2:
data->pwm_fcms[0] |= (1 << index);
w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]);
break;
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = w83795_update_pwm_config(dev);
int index = to_sensor_dev_attr_2(attr)->index;
unsigned int mode;
if (data->pwm_fomc & (1 << index))
mode = 0; /* DC */
else
mode = 1; /* PWM */
return sprintf(buf, "%u\n", mode);
}
/*
* Check whether a given temperature source can ever be useful.
* Returns the number of selectable temperature channels which are
* enabled.
*/
static int w83795_tss_useful(const struct w83795_data *data, int tsrc)
{
int useful = 0, i;
for (i = 0; i < 4; i++) {
if (tss_map[i][tsrc] == TSS_MAP_RESERVED)
continue;
if (tss_map[i][tsrc] < 6) /* Analog */
useful += (data->has_temp >> tss_map[i][tsrc]) & 1;
else /* Digital */
useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1;
}
return useful;
}
static ssize_t
show_temp_src(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
struct w83795_data *data = w83795_update_pwm_config(dev);
int index = sensor_attr->index;
u8 tmp = data->temp_src[index / 2];
if (index & 1)
tmp >>= 4; /* Pick high nibble */
else
tmp &= 0x0f; /* Pick low nibble */
/* Look-up the actual temperature channel number */
if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED)
return -EINVAL; /* Shouldn't happen */
return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1);
}
static ssize_t
store_temp_src(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
int tmp;
unsigned long channel;
u8 val = index / 2;
if (kstrtoul(buf, 10, &channel) < 0 ||
channel < 1 || channel > 14)
return -EINVAL;
/* Check if request can be fulfilled */
for (tmp = 0; tmp < 4; tmp++) {
if (tss_map[tmp][index] == channel - 1)
break;
}
if (tmp == 4) /* No match */
return -EINVAL;
mutex_lock(&data->update_lock);
if (index & 1) {
tmp <<= 4;
data->temp_src[val] &= 0x0f;
} else {
data->temp_src[val] &= 0xf0;
}
data->temp_src[val] |= tmp;
w83795_write(client, W83795_REG_TSS(val), data->temp_src[val]);
mutex_unlock(&data->update_lock);
return count;
}
#define TEMP_PWM_ENABLE 0
#define TEMP_PWM_FAN_MAP 1
static ssize_t
show_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u8 tmp = 0xff;
switch (nr) {
case TEMP_PWM_ENABLE:
tmp = (data->pwm_fcms[1] >> index) & 1;
if (tmp)
tmp = 4;
else
tmp = 3;
break;
case TEMP_PWM_FAN_MAP:
tmp = data->pwm_tfmr[index];
break;
}
return sprintf(buf, "%u\n", tmp);
}
static ssize_t
store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long tmp;
if (kstrtoul(buf, 10, &tmp) < 0)
return -EINVAL;
switch (nr) {
case TEMP_PWM_ENABLE:
if (tmp != 3 && tmp != 4)
return -EINVAL;
tmp -= 3;
mutex_lock(&data->update_lock);
data->pwm_fcms[1] &= ~(1 << index);
data->pwm_fcms[1] |= tmp << index;
w83795_write(client, W83795_REG_FCMS2, data->pwm_fcms[1]);
mutex_unlock(&data->update_lock);
break;
case TEMP_PWM_FAN_MAP:
mutex_lock(&data->update_lock);
tmp = SENSORS_LIMIT(tmp, 0, 0xff);
w83795_write(client, W83795_REG_TFMR(index), tmp);
data->pwm_tfmr[index] = tmp;
mutex_unlock(&data->update_lock);
break;
}
return count;
}
#define FANIN_TARGET 0
#define FANIN_TOL 1
static ssize_t
show_fanin(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
u16 tmp = 0;
switch (nr) {
case FANIN_TARGET:
tmp = fan_from_reg(data->target_speed[index]);
break;
case FANIN_TOL:
tmp = data->tol_speed;
break;
}
return sprintf(buf, "%u\n", tmp);
}
static ssize_t
store_fanin(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
switch (nr) {
case FANIN_TARGET:
val = fan_to_reg(SENSORS_LIMIT(val, 0, 0xfff));
w83795_write(client, W83795_REG_FTSH(index), val >> 4);
w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0);
data->target_speed[index] = val;
break;
case FANIN_TOL:
val = SENSORS_LIMIT(val, 0, 0x3f);
w83795_write(client, W83795_REG_TFTS, val);
data->tol_speed = val;
break;
}
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_temp_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
long tmp = temp_from_reg(data->pwm_temp[index][nr]);
return sprintf(buf, "%ld\n", tmp);
}
static ssize_t
store_temp_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
u8 tmp;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
mutex_lock(&data->update_lock);
switch (nr) {
case TEMP_PWM_TTTI:
val = SENSORS_LIMIT(val, 0, 0x7f);
w83795_write(client, W83795_REG_TTTI(index), val);
break;
case TEMP_PWM_CTFS:
val = SENSORS_LIMIT(val, 0, 0x7f);
w83795_write(client, W83795_REG_CTFS(index), val);
break;
case TEMP_PWM_HCT:
val = SENSORS_LIMIT(val, 0, 0x0f);
tmp = w83795_read(client, W83795_REG_HT(index));
tmp &= 0x0f;
tmp |= (val << 4) & 0xf0;
w83795_write(client, W83795_REG_HT(index), tmp);
break;
case TEMP_PWM_HOT:
val = SENSORS_LIMIT(val, 0, 0x0f);
tmp = w83795_read(client, W83795_REG_HT(index));
tmp &= 0xf0;
tmp |= val & 0x0f;
w83795_write(client, W83795_REG_HT(index), tmp);
break;
}
data->pwm_temp[index][nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf4_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
return sprintf(buf, "%u\n", data->sf4_reg[index][SF4_PWM][nr]);
}
static ssize_t
store_sf4_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
w83795_write(client, W83795_REG_SF4_PWM(index, nr), val);
data->sf4_reg[index][SF4_PWM][nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_sf4_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = w83795_update_pwm_config(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
return sprintf(buf, "%u\n",
(data->sf4_reg[index][SF4_TEMP][nr]) * 1000);
}
static ssize_t
store_sf4_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
mutex_lock(&data->update_lock);
w83795_write(client, W83795_REG_SF4_TEMP(index, nr), val);
data->sf4_reg[index][SF4_TEMP][nr] = val;
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_temp(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83795_data *data = w83795_update_device(dev);
long temp = temp_from_reg(data->temp[index][nr]);
if (nr == TEMP_READ)
temp += (data->temp_read_vrlsb[index] >> 6) * 250;
return sprintf(buf, "%ld\n", temp);
}
static ssize_t
store_temp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
data->temp[index][nr] = temp_to_reg(tmp, -128, 127);
w83795_write(client, W83795_REG_TEMP[index][nr], data->temp[index][nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_dts_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = dev_get_drvdata(dev);
int tmp;
if (data->enable_dts & 2)
tmp = 5;
else
tmp = 6;
return sprintf(buf, "%d\n", tmp);
}
static ssize_t
show_dts(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
struct w83795_data *data = w83795_update_device(dev);
long temp = temp_from_reg(data->dts[index]);
temp += (data->dts_read_vrlsb[index] >> 6) * 250;
return sprintf(buf, "%ld\n", temp);
}
static ssize_t
show_dts_ext(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct w83795_data *data = dev_get_drvdata(dev);
long temp = temp_from_reg(data->dts_ext[nr]);
return sprintf(buf, "%ld\n", temp);
}
static ssize_t
store_dts_ext(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
data->dts_ext[nr] = temp_to_reg(tmp, -128, 127);
w83795_write(client, W83795_REG_DTS_EXT(nr), data->dts_ext[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t
show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w83795_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
int tmp;
if (data->temp_mode & (1 << index))
tmp = 3; /* Thermal diode */
else
tmp = 4; /* Thermistor */
return sprintf(buf, "%d\n", tmp);
}
/* Only for temp1-4 (temp5-6 can only be thermistor) */
static ssize_t
store_temp_mode(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int index = sensor_attr->index;
int reg_shift;
unsigned long val;
u8 tmp;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if ((val != 4) && (val != 3))
return -EINVAL;
mutex_lock(&data->update_lock);
if (val == 3) {
/* Thermal diode */
val = 0x01;
data->temp_mode |= 1 << index;
} else if (val == 4) {
/* Thermistor */
val = 0x03;
data->temp_mode &= ~(1 << index);
}
reg_shift = 2 * index;
tmp = w83795_read(client, W83795_REG_TEMP_CTRL2);
tmp &= ~(0x03 << reg_shift);
tmp |= val << reg_shift;
w83795_write(client, W83795_REG_TEMP_CTRL2, tmp);
mutex_unlock(&data->update_lock);
return count;
}
/* show/store VIN */
static ssize_t
show_in(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct w83795_data *data = w83795_update_device(dev);
u16 val = data->in[index][nr];
u8 lsb_idx;
switch (nr) {
case IN_READ:
/* calculate this value again by sensors as sensors3.conf */
if ((index >= 17) &&
!((data->has_gain >> (index - 17)) & 1))
val *= 8;
break;
case IN_MAX:
case IN_LOW:
lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
val <<= 2;
val |= (data->in_lsb[lsb_idx][nr] >>
IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]) & 0x03;
if ((index >= 17) &&
!((data->has_gain >> (index - 17)) & 1))
val *= 8;
break;
}
val = in_from_reg(index, val);
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_in(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
int index = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
u8 tmp;
u8 lsb_idx;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val = in_to_reg(index, val);
if ((index >= 17) &&
!((data->has_gain >> (index - 17)) & 1))
val /= 8;
val = SENSORS_LIMIT(val, 0, 0x3FF);
mutex_lock(&data->update_lock);
lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX];
tmp = w83795_read(client, IN_LSB_REG(lsb_idx, nr));
tmp &= ~(0x03 << IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]);
tmp |= (val & 0x03) << IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT];
w83795_write(client, IN_LSB_REG(lsb_idx, nr), tmp);
data->in_lsb[lsb_idx][nr] = tmp;
tmp = (val >> 2) & 0xff;
w83795_write(client, W83795_REG_IN[index][nr], tmp);
data->in[index][nr] = tmp;
mutex_unlock(&data->update_lock);
return count;
}
#ifdef CONFIG_SENSORS_W83795_FANCTRL
static ssize_t
show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct w83795_data *data = w83795_update_pwm_config(dev);
u16 val = data->setup_pwm[nr];
switch (nr) {
case SETUP_PWM_UPTIME:
case SETUP_PWM_DOWNTIME:
val = time_from_reg(val);
break;
}
return sprintf(buf, "%d\n", val);
}
static ssize_t
store_sf_setup(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sensor_attr =
to_sensor_dev_attr_2(attr);
int nr = sensor_attr->nr;
struct i2c_client *client = to_i2c_client(dev);
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
switch (nr) {
case SETUP_PWM_DEFAULT:
val = SENSORS_LIMIT(val, 0, 0xff);
break;
case SETUP_PWM_UPTIME:
case SETUP_PWM_DOWNTIME:
val = time_to_reg(val);
if (val == 0)
return -EINVAL;
break;
}
mutex_lock(&data->update_lock);
data->setup_pwm[nr] = val;
w83795_write(client, W83795_REG_SETUP_PWM(nr), val);
mutex_unlock(&data->update_lock);
return count;
}
#endif
#define NOT_USED -1
/*
* Don't change the attribute order, _max, _min and _beep are accessed by index
* somewhere else in the code
*/
#define SENSOR_ATTR_IN(index) { \
SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \
IN_READ, index), \
SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_MAX, index), \
SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \
store_in, IN_LOW, index), \
SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + ((index > 14) ? 1 : 0)), \
SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, \
index + ((index > 14) ? 1 : 0)) }
/*
* Don't change the attribute order, _beep is accessed by index
* somewhere else in the code
*/
#define SENSOR_ATTR_FAN(index) { \
SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \
NULL, FAN_INPUT, index - 1), \
SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \
show_fan, store_fan_min, FAN_MIN, index - 1), \
SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \
NULL, ALARM_STATUS, index + 31), \
SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 31) }
#define SENSOR_ATTR_PWM(index) { \
SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \
store_pwm, PWM_OUTPUT, index - 1), \
SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \
show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \
SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \
show_pwm_mode, NULL, NOT_USED, index - 1), \
SENSOR_ATTR_2(pwm##index##_freq, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_FREQ, index - 1), \
SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_NONSTOP, index - 1), \
SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_START, index - 1), \
SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \
show_pwm, store_pwm, PWM_STOP_TIME, index - 1), \
SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \
show_fanin, store_fanin, FANIN_TARGET, index - 1) }
/*
* Don't change the attribute order, _beep is accessed by index
* somewhere else in the code
*/
#define SENSOR_ATTR_DTS(index) { \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \
show_dts_mode, NULL, NOT_USED, index - 7), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_dts, \
NULL, NOT_USED, index - 7), \
SENSOR_ATTR_2(temp##index##_crit, S_IRUGO | S_IWUSR, show_dts_ext, \
store_dts_ext, DTS_CRIT, NOT_USED), \
SENSOR_ATTR_2(temp##index##_crit_hyst, S_IRUGO | S_IWUSR, \
show_dts_ext, store_dts_ext, DTS_CRIT_HYST, NOT_USED), \
SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_dts_ext, \
store_dts_ext, DTS_WARN, NOT_USED), \
SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
show_dts_ext, store_dts_ext, DTS_WARN_HYST, NOT_USED), \
SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
show_alarm_beep, NULL, ALARM_STATUS, index + 17), \
SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) }
/*
* Don't change the attribute order, _beep is accessed by index
* somewhere else in the code
*/
#define SENSOR_ATTR_TEMP(index) { \
SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
NULL, TEMP_READ, index - 1), \
SENSOR_ATTR_2(temp##index##_crit, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_CRIT, index - 1), \
SENSOR_ATTR_2(temp##index##_crit_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \
store_temp, TEMP_WARN, index - 1), \
SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \
show_temp, store_temp, TEMP_WARN_HYST, index - 1), \
SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \
show_alarm_beep, NULL, ALARM_STATUS, \
index + (index > 4 ? 11 : 17)), \
SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \
show_alarm_beep, store_beep, BEEP_ENABLE, \
index + (index > 4 ? 11 : 17)), \
SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \
show_temp_pwm_enable, store_temp_pwm_enable, \
TEMP_PWM_ENABLE, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_channels_pwm, S_IWUSR | S_IRUGO, \
show_temp_pwm_enable, store_temp_pwm_enable, \
TEMP_PWM_FAN_MAP, index - 1), \
SENSOR_ATTR_2(thermal_cruise##index, S_IWUSR | S_IRUGO, \
show_temp_pwm, store_temp_pwm, TEMP_PWM_TTTI, index - 1), \
SENSOR_ATTR_2(temp##index##_warn, S_IWUSR | S_IRUGO, \
show_temp_pwm, store_temp_pwm, TEMP_PWM_CTFS, index - 1), \
SENSOR_ATTR_2(temp##index##_warn_hyst, S_IWUSR | S_IRUGO, \
show_temp_pwm, store_temp_pwm, TEMP_PWM_HCT, index - 1), \
SENSOR_ATTR_2(temp##index##_operation_hyst, S_IWUSR | S_IRUGO, \
show_temp_pwm, store_temp_pwm, TEMP_PWM_HOT, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \
show_sf4_pwm, store_sf4_pwm, 6, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 0, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 1, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 2, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 3, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 4, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 5, index - 1), \
SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\
show_sf4_temp, store_sf4_temp, 6, index - 1) }
static struct sensor_device_attribute_2 w83795_in[][5] = {
SENSOR_ATTR_IN(0),
SENSOR_ATTR_IN(1),
SENSOR_ATTR_IN(2),
SENSOR_ATTR_IN(3),
SENSOR_ATTR_IN(4),
SENSOR_ATTR_IN(5),
SENSOR_ATTR_IN(6),
SENSOR_ATTR_IN(7),
SENSOR_ATTR_IN(8),
SENSOR_ATTR_IN(9),
SENSOR_ATTR_IN(10),
SENSOR_ATTR_IN(11),
SENSOR_ATTR_IN(12),
SENSOR_ATTR_IN(13),
SENSOR_ATTR_IN(14),
SENSOR_ATTR_IN(15),
SENSOR_ATTR_IN(16),
SENSOR_ATTR_IN(17),
SENSOR_ATTR_IN(18),
SENSOR_ATTR_IN(19),
SENSOR_ATTR_IN(20),
};
static const struct sensor_device_attribute_2 w83795_fan[][4] = {
SENSOR_ATTR_FAN(1),
SENSOR_ATTR_FAN(2),
SENSOR_ATTR_FAN(3),
SENSOR_ATTR_FAN(4),
SENSOR_ATTR_FAN(5),
SENSOR_ATTR_FAN(6),
SENSOR_ATTR_FAN(7),
SENSOR_ATTR_FAN(8),
SENSOR_ATTR_FAN(9),
SENSOR_ATTR_FAN(10),
SENSOR_ATTR_FAN(11),
SENSOR_ATTR_FAN(12),
SENSOR_ATTR_FAN(13),
SENSOR_ATTR_FAN(14),
};
static const struct sensor_device_attribute_2 w83795_temp[][28] = {
SENSOR_ATTR_TEMP(1),
SENSOR_ATTR_TEMP(2),
SENSOR_ATTR_TEMP(3),
SENSOR_ATTR_TEMP(4),
SENSOR_ATTR_TEMP(5),
SENSOR_ATTR_TEMP(6),
};
static const struct sensor_device_attribute_2 w83795_dts[][8] = {
SENSOR_ATTR_DTS(7),
SENSOR_ATTR_DTS(8),
SENSOR_ATTR_DTS(9),
SENSOR_ATTR_DTS(10),
SENSOR_ATTR_DTS(11),
SENSOR_ATTR_DTS(12),
SENSOR_ATTR_DTS(13),
SENSOR_ATTR_DTS(14),
};
static const struct sensor_device_attribute_2 w83795_pwm[][8] = {
SENSOR_ATTR_PWM(1),
SENSOR_ATTR_PWM(2),
SENSOR_ATTR_PWM(3),
SENSOR_ATTR_PWM(4),
SENSOR_ATTR_PWM(5),
SENSOR_ATTR_PWM(6),
SENSOR_ATTR_PWM(7),
SENSOR_ATTR_PWM(8),
};
static const struct sensor_device_attribute_2 w83795_tss[6] = {
SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO,
show_temp_src, store_temp_src, NOT_USED, 0),
SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO,
show_temp_src, store_temp_src, NOT_USED, 1),
SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO,
show_temp_src, store_temp_src, NOT_USED, 2),
SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO,
show_temp_src, store_temp_src, NOT_USED, 3),
SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO,
show_temp_src, store_temp_src, NOT_USED, 4),
SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO,
show_temp_src, store_temp_src, NOT_USED, 5),
};
static const struct sensor_device_attribute_2 sda_single_files[] = {
SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep,
store_chassis_clear, ALARM_STATUS, 46),
#ifdef CONFIG_SENSORS_W83795_FANCTRL
SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin,
store_fanin, FANIN_TOL, NOT_USED),
SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED),
SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_UPTIME, NOT_USED),
SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup,
store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED),
#endif
};
static const struct sensor_device_attribute_2 sda_beep_files[] = {
SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep,
store_beep, BEEP_ENABLE, 46),
SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep,
store_beep, BEEP_ENABLE, 47),
};
/*
* Driver interface
*/
static void w83795_init_client(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
static const u16 clkin[4] = { /* in kHz */
14318, 24000, 33333, 48000
};
u8 config;
if (reset)
w83795_write(client, W83795_REG_CONFIG, 0x80);
/* Start monitoring if needed */
config = w83795_read(client, W83795_REG_CONFIG);
if (!(config & W83795_REG_CONFIG_START)) {
dev_info(&client->dev, "Enabling monitoring operations\n");
w83795_write(client, W83795_REG_CONFIG,
config | W83795_REG_CONFIG_START);
}
data->clkin = clkin[(config >> 3) & 0x3];
dev_dbg(&client->dev, "clkin = %u kHz\n", data->clkin);
}
static int w83795_get_device_id(struct i2c_client *client)
{
int device_id;
device_id = i2c_smbus_read_byte_data(client, W83795_REG_DEVICEID);
/*
* Special case for rev. A chips; can't be checked first because later
* revisions emulate this for compatibility
*/
if (device_id < 0 || (device_id & 0xf0) != 0x50) {
int alt_id;
alt_id = i2c_smbus_read_byte_data(client,
W83795_REG_DEVICEID_A);
if (alt_id == 0x50)
device_id = alt_id;
}
return device_id;
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int w83795_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
int bank, vendor_id, device_id, expected, i2c_addr, config;
struct i2c_adapter *adapter = client->adapter;
unsigned short address = client->addr;
const char *chip_name;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL);
if (bank < 0 || (bank & 0x7c)) {
dev_dbg(&adapter->dev,
"w83795: Detection failed at addr 0x%02hx, check %s\n",
address, "bank");
return -ENODEV;
}
/* Check Nuvoton vendor ID */
vendor_id = i2c_smbus_read_byte_data(client, W83795_REG_VENDORID);
expected = bank & 0x80 ? 0x5c : 0xa3;
if (vendor_id != expected) {
dev_dbg(&adapter->dev,
"w83795: Detection failed at addr 0x%02hx, check %s\n",
address, "vendor id");
return -ENODEV;
}
/* Check device ID */
device_id = w83795_get_device_id(client) |
(i2c_smbus_read_byte_data(client, W83795_REG_CHIPID) << 8);
if ((device_id >> 4) != 0x795) {
dev_dbg(&adapter->dev,
"w83795: Detection failed at addr 0x%02hx, check %s\n",
address, "device id\n");
return -ENODEV;
}
/*
* If Nuvoton chip, address of chip and W83795_REG_I2C_ADDR
* should match
*/
if ((bank & 0x07) == 0) {
i2c_addr = i2c_smbus_read_byte_data(client,
W83795_REG_I2C_ADDR);
if ((i2c_addr & 0x7f) != address) {
dev_dbg(&adapter->dev,
"w83795: Detection failed at addr 0x%02hx, "
"check %s\n", address, "i2c addr");
return -ENODEV;
}
}
/*
* Check 795 chip type: 795G or 795ADG
* Usually we don't write to chips during detection, but here we don't
* quite have the choice; hopefully it's OK, we are about to return
* success anyway
*/
if ((bank & 0x07) != 0)
i2c_smbus_write_byte_data(client, W83795_REG_BANKSEL,
bank & ~0x07);
config = i2c_smbus_read_byte_data(client, W83795_REG_CONFIG);
if (config & W83795_REG_CONFIG_CONFIG48)
chip_name = "w83795adg";
else
chip_name = "w83795g";
strlcpy(info->type, chip_name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Found %s rev. %c at 0x%02hx\n", chip_name,
'A' + (device_id & 0xf), address);
return 0;
}
#ifdef CONFIG_SENSORS_W83795_FANCTRL
#define NUM_PWM_ATTRIBUTES ARRAY_SIZE(w83795_pwm[0])
#define NUM_TEMP_ATTRIBUTES ARRAY_SIZE(w83795_temp[0])
#else
#define NUM_PWM_ATTRIBUTES 4
#define NUM_TEMP_ATTRIBUTES 8
#endif
static int w83795_handle_files(struct device *dev, int (*fn)(struct device *,
const struct device_attribute *))
{
struct w83795_data *data = dev_get_drvdata(dev);
int err, i, j;
for (i = 0; i < ARRAY_SIZE(w83795_in); i++) {
if (!(data->has_in & (1 << i)))
continue;
for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) {
if (j == 4 && !data->enable_beep)
continue;
err = fn(dev, &w83795_in[i][j].dev_attr);
if (err)
return err;
}
}
for (i = 0; i < ARRAY_SIZE(w83795_fan); i++) {
if (!(data->has_fan & (1 << i)))
continue;
for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) {
if (j == 3 && !data->enable_beep)
continue;
err = fn(dev, &w83795_fan[i][j].dev_attr);
if (err)
return err;
}
}
for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) {
j = w83795_tss_useful(data, i);
if (!j)
continue;
err = fn(dev, &w83795_tss[i].dev_attr);
if (err)
return err;
}
for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
err = fn(dev, &sda_single_files[i].dev_attr);
if (err)
return err;
}
if (data->enable_beep) {
for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) {
err = fn(dev, &sda_beep_files[i].dev_attr);
if (err)
return err;
}
}
for (i = 0; i < data->has_pwm; i++) {
for (j = 0; j < NUM_PWM_ATTRIBUTES; j++) {
err = fn(dev, &w83795_pwm[i][j].dev_attr);
if (err)
return err;
}
}
for (i = 0; i < ARRAY_SIZE(w83795_temp); i++) {
if (!(data->has_temp & (1 << i)))
continue;
for (j = 0; j < NUM_TEMP_ATTRIBUTES; j++) {
if (j == 7 && !data->enable_beep)
continue;
err = fn(dev, &w83795_temp[i][j].dev_attr);
if (err)
return err;
}
}
if (data->enable_dts) {
for (i = 0; i < ARRAY_SIZE(w83795_dts); i++) {
if (!(data->has_dts & (1 << i)))
continue;
for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) {
if (j == 7 && !data->enable_beep)
continue;
err = fn(dev, &w83795_dts[i][j].dev_attr);
if (err)
return err;
}
}
}
return 0;
}
/* We need a wrapper that fits in w83795_handle_files */
static int device_remove_file_wrapper(struct device *dev,
const struct device_attribute *attr)
{
device_remove_file(dev, attr);
return 0;
}
static void w83795_check_dynamic_in_limits(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
u8 vid_ctl;
int i, err_max, err_min;
vid_ctl = w83795_read(client, W83795_REG_VID_CTRL);
/* Return immediately if VRM isn't configured */
if ((vid_ctl & 0x07) == 0x00 || (vid_ctl & 0x07) == 0x07)
return;
data->has_dyn_in = (vid_ctl >> 3) & 0x07;
for (i = 0; i < 2; i++) {
if (!(data->has_dyn_in & (1 << i)))
continue;
/* Voltage limits in dynamic mode, switch to read-only */
err_max = sysfs_chmod_file(&client->dev.kobj,
&w83795_in[i][2].dev_attr.attr,
S_IRUGO);
err_min = sysfs_chmod_file(&client->dev.kobj,
&w83795_in[i][3].dev_attr.attr,
S_IRUGO);
if (err_max || err_min)
dev_warn(&client->dev, "Failed to set in%d limits "
"read-only (%d, %d)\n", i, err_max, err_min);
else
dev_info(&client->dev, "in%d limits set dynamically "
"from VID\n", i);
}
}
/* Check pins that can be used for either temperature or voltage monitoring */
static void w83795_apply_temp_config(struct w83795_data *data, u8 config,
int temp_chan, int in_chan)
{
/* config is a 2-bit value */
switch (config) {
case 0x2: /* Voltage monitoring */
data->has_in |= 1 << in_chan;
break;
case 0x1: /* Thermal diode */
if (temp_chan >= 4)
break;
data->temp_mode |= 1 << temp_chan;
/* fall through */
case 0x3: /* Thermistor */
data->has_temp |= 1 << temp_chan;
break;
}
}
static int w83795_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int i;
u8 tmp;
struct device *dev = &client->dev;
struct w83795_data *data;
int err;
data = kzalloc(sizeof(struct w83795_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
data->chip_type = id->driver_data;
data->bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL);
mutex_init(&data->update_lock);
/* Initialize the chip */
w83795_init_client(client);
/* Check which voltages and fans are present */
data->has_in = w83795_read(client, W83795_REG_VOLT_CTRL1)
| (w83795_read(client, W83795_REG_VOLT_CTRL2) << 8);
data->has_fan = w83795_read(client, W83795_REG_FANIN_CTRL1)
| (w83795_read(client, W83795_REG_FANIN_CTRL2) << 8);
/* Check which analog temperatures and extra voltages are present */
tmp = w83795_read(client, W83795_REG_TEMP_CTRL1);
if (tmp & 0x20)
data->enable_dts = 1;
w83795_apply_temp_config(data, (tmp >> 2) & 0x3, 5, 16);
w83795_apply_temp_config(data, tmp & 0x3, 4, 15);
tmp = w83795_read(client, W83795_REG_TEMP_CTRL2);
w83795_apply_temp_config(data, tmp >> 6, 3, 20);
w83795_apply_temp_config(data, (tmp >> 4) & 0x3, 2, 19);
w83795_apply_temp_config(data, (tmp >> 2) & 0x3, 1, 18);
w83795_apply_temp_config(data, tmp & 0x3, 0, 17);
/* Check DTS enable status */
if (data->enable_dts) {
if (1 & w83795_read(client, W83795_REG_DTSC))
data->enable_dts |= 2;
data->has_dts = w83795_read(client, W83795_REG_DTSE);
}
/* Report PECI Tbase values */
if (data->enable_dts == 1) {
for (i = 0; i < 8; i++) {
if (!(data->has_dts & (1 << i)))
continue;
tmp = w83795_read(client, W83795_REG_PECI_TBASE(i));
dev_info(&client->dev,
"PECI agent %d Tbase temperature: %u\n",
i + 1, (unsigned int)tmp & 0x7f);
}
}
data->has_gain = w83795_read(client, W83795_REG_VMIGB_CTRL) & 0x0f;
/* pwm and smart fan */
if (data->chip_type == w83795g)
data->has_pwm = 8;
else
data->has_pwm = 2;
/* Check if BEEP pin is available */
if (data->chip_type == w83795g) {
/* The W83795G has a dedicated BEEP pin */
data->enable_beep = 1;
} else {
/*
* The W83795ADG has a shared pin for OVT# and BEEP, so you
* can't have both
*/
tmp = w83795_read(client, W83795_REG_OVT_CFG);
if ((tmp & OVT_CFG_SEL) == 0)
data->enable_beep = 1;
}
err = w83795_handle_files(dev, device_create_file);
if (err)
goto exit_remove;
if (data->chip_type == w83795g)
w83795_check_dynamic_in_limits(client);
data->hwmon_dev = hwmon_device_register(dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove;
}
return 0;
exit_remove:
w83795_handle_files(dev, device_remove_file_wrapper);
kfree(data);
exit:
return err;
}
static int w83795_remove(struct i2c_client *client)
{
struct w83795_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
w83795_handle_files(&client->dev, device_remove_file_wrapper);
kfree(data);
return 0;
}
static const struct i2c_device_id w83795_id[] = {
{ "w83795g", w83795g },
{ "w83795adg", w83795adg },
{ }
};
MODULE_DEVICE_TABLE(i2c, w83795_id);
static struct i2c_driver w83795_driver = {
.driver = {
.name = "w83795",
},
.probe = w83795_probe,
.remove = w83795_remove,
.id_table = w83795_id,
.class = I2C_CLASS_HWMON,
.detect = w83795_detect,
.address_list = normal_i2c,
};
module_i2c_driver(w83795_driver);
MODULE_AUTHOR("Wei Song, Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
chrisk44/android_kernel_lge_hammerhead
|
arch/arm/mach-s3c24xx/irq-s3c2440.c
|
5112
|
3035
|
/* linux/arch/arm/mach-s3c2440/irq.c
*
* Copyright (c) 2003-2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <mach/regs-irq.h>
#include <mach/regs-gpio.h>
#include <plat/cpu.h>
#include <plat/pm.h>
#include <plat/irq.h>
/* WDT/AC97 */
static void s3c_irq_demux_wdtac97(unsigned int irq,
struct irq_desc *desc)
{
unsigned int subsrc, submsk;
/* read the current pending interrupts, and the mask
* for what it is available */
subsrc = __raw_readl(S3C2410_SUBSRCPND);
submsk = __raw_readl(S3C2410_INTSUBMSK);
subsrc &= ~submsk;
subsrc >>= 13;
subsrc &= 3;
if (subsrc != 0) {
if (subsrc & 1) {
generic_handle_irq(IRQ_S3C2440_WDT);
}
if (subsrc & 2) {
generic_handle_irq(IRQ_S3C2440_AC97);
}
}
}
#define INTMSK_WDT (1UL << (IRQ_WDT - IRQ_EINT0))
static void
s3c_irq_wdtac97_mask(struct irq_data *data)
{
s3c_irqsub_mask(data->irq, INTMSK_WDT, 3 << 13);
}
static void
s3c_irq_wdtac97_unmask(struct irq_data *data)
{
s3c_irqsub_unmask(data->irq, INTMSK_WDT);
}
static void
s3c_irq_wdtac97_ack(struct irq_data *data)
{
s3c_irqsub_maskack(data->irq, INTMSK_WDT, 3 << 13);
}
static struct irq_chip s3c_irq_wdtac97 = {
.irq_mask = s3c_irq_wdtac97_mask,
.irq_unmask = s3c_irq_wdtac97_unmask,
.irq_ack = s3c_irq_wdtac97_ack,
};
static int s3c2440_irq_add(struct device *dev, struct subsys_interface *sif)
{
unsigned int irqno;
printk("S3C2440: IRQ Support\n");
/* add new chained handler for wdt, ac7 */
irq_set_chip_and_handler(IRQ_WDT, &s3c_irq_level_chip,
handle_level_irq);
irq_set_chained_handler(IRQ_WDT, s3c_irq_demux_wdtac97);
for (irqno = IRQ_S3C2440_WDT; irqno <= IRQ_S3C2440_AC97; irqno++) {
irq_set_chip_and_handler(irqno, &s3c_irq_wdtac97,
handle_level_irq);
set_irq_flags(irqno, IRQF_VALID);
}
return 0;
}
static struct subsys_interface s3c2440_irq_interface = {
.name = "s3c2440_irq",
.subsys = &s3c2440_subsys,
.add_dev = s3c2440_irq_add,
};
static int s3c2440_irq_init(void)
{
return subsys_interface_register(&s3c2440_irq_interface);
}
arch_initcall(s3c2440_irq_init);
|
gpl-2.0
|
kundancool/android_kernel_xiaomi_msm8974
|
drivers/video/leo.c
|
8184
|
15812
|
/* leo.c: LEO frame buffer driver
*
* Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996-1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1997 Michal Rehacek (Michal.Rehacek@st.mff.cuni.cz)
*
* Driver layout based loosely on tgafb.c, see that file for credits.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/of_device.h>
#include <linux/io.h>
#include <asm/fbio.h>
#include "sbuslib.h"
/*
* Local functions.
*/
static int leo_setcolreg(unsigned, unsigned, unsigned, unsigned,
unsigned, struct fb_info *);
static int leo_blank(int, struct fb_info *);
static int leo_mmap(struct fb_info *, struct vm_area_struct *);
static int leo_ioctl(struct fb_info *, unsigned int, unsigned long);
static int leo_pan_display(struct fb_var_screeninfo *, struct fb_info *);
/*
* Frame buffer operations
*/
static struct fb_ops leo_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = leo_setcolreg,
.fb_blank = leo_blank,
.fb_pan_display = leo_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_mmap = leo_mmap,
.fb_ioctl = leo_ioctl,
#ifdef CONFIG_COMPAT
.fb_compat_ioctl = sbusfb_compat_ioctl,
#endif
};
#define LEO_OFF_LC_SS0_KRN 0x00200000UL
#define LEO_OFF_LC_SS0_USR 0x00201000UL
#define LEO_OFF_LC_SS1_KRN 0x01200000UL
#define LEO_OFF_LC_SS1_USR 0x01201000UL
#define LEO_OFF_LD_SS0 0x00400000UL
#define LEO_OFF_LD_SS1 0x01400000UL
#define LEO_OFF_LD_GBL 0x00401000UL
#define LEO_OFF_LX_KRN 0x00600000UL
#define LEO_OFF_LX_CURSOR 0x00601000UL
#define LEO_OFF_SS0 0x00800000UL
#define LEO_OFF_SS1 0x01800000UL
#define LEO_OFF_UNK 0x00602000UL
#define LEO_OFF_UNK2 0x00000000UL
#define LEO_CUR_ENABLE 0x00000080
#define LEO_CUR_UPDATE 0x00000030
#define LEO_CUR_PROGRESS 0x00000006
#define LEO_CUR_UPDATECMAP 0x00000003
#define LEO_CUR_TYPE_MASK 0x00000000
#define LEO_CUR_TYPE_IMAGE 0x00000020
#define LEO_CUR_TYPE_CMAP 0x00000050
struct leo_cursor {
u8 xxx0[16];
u32 cur_type;
u32 cur_misc;
u32 cur_cursxy;
u32 cur_data;
};
#define LEO_KRN_TYPE_CLUT0 0x00001000
#define LEO_KRN_TYPE_CLUT1 0x00001001
#define LEO_KRN_TYPE_CLUT2 0x00001002
#define LEO_KRN_TYPE_WID 0x00001003
#define LEO_KRN_TYPE_UNK 0x00001006
#define LEO_KRN_TYPE_VIDEO 0x00002003
#define LEO_KRN_TYPE_CLUTDATA 0x00004000
#define LEO_KRN_CSR_ENABLE 0x00000008
#define LEO_KRN_CSR_PROGRESS 0x00000004
#define LEO_KRN_CSR_UNK 0x00000002
#define LEO_KRN_CSR_UNK2 0x00000001
struct leo_lx_krn {
u32 krn_type;
u32 krn_csr;
u32 krn_value;
};
struct leo_lc_ss0_krn {
u32 misc;
u8 xxx0[0x800-4];
u32 rev;
};
struct leo_lc_ss0_usr {
u32 csr;
u32 addrspace;
u32 fontmsk;
u32 fontt;
u32 extent;
u32 src;
u32 dst;
u32 copy;
u32 fill;
};
struct leo_lc_ss1_krn {
u8 unknown;
};
struct leo_lc_ss1_usr {
u8 unknown;
};
struct leo_ld_ss0 {
u8 xxx0[0xe00];
u32 csr;
u32 wid;
u32 wmask;
u32 widclip;
u32 vclipmin;
u32 vclipmax;
u32 pickmin; /* SS1 only */
u32 pickmax; /* SS1 only */
u32 fg;
u32 bg;
u32 src; /* Copy/Scroll (SS0 only) */
u32 dst; /* Copy/Scroll/Fill (SS0 only) */
u32 extent; /* Copy/Scroll/Fill size (SS0 only) */
u32 xxx1[3];
u32 setsem; /* SS1 only */
u32 clrsem; /* SS1 only */
u32 clrpick; /* SS1 only */
u32 clrdat; /* SS1 only */
u32 alpha; /* SS1 only */
u8 xxx2[0x2c];
u32 winbg;
u32 planemask;
u32 rop;
u32 z;
u32 dczf; /* SS1 only */
u32 dczb; /* SS1 only */
u32 dcs; /* SS1 only */
u32 dczs; /* SS1 only */
u32 pickfb; /* SS1 only */
u32 pickbb; /* SS1 only */
u32 dcfc; /* SS1 only */
u32 forcecol; /* SS1 only */
u32 door[8]; /* SS1 only */
u32 pick[5]; /* SS1 only */
};
#define LEO_SS1_MISC_ENABLE 0x00000001
#define LEO_SS1_MISC_STEREO 0x00000002
struct leo_ld_ss1 {
u8 xxx0[0xef4];
u32 ss1_misc;
};
struct leo_ld_gbl {
u8 unknown;
};
struct leo_par {
spinlock_t lock;
struct leo_lx_krn __iomem *lx_krn;
struct leo_lc_ss0_usr __iomem *lc_ss0_usr;
struct leo_ld_ss0 __iomem *ld_ss0;
struct leo_ld_ss1 __iomem *ld_ss1;
struct leo_cursor __iomem *cursor;
u32 extent;
u32 clut_data[256];
u32 flags;
#define LEO_FLAG_BLANKED 0x00000001
unsigned long which_io;
};
static void leo_wait(struct leo_lx_krn __iomem *lx_krn)
{
int i;
for (i = 0;
(sbus_readl(&lx_krn->krn_csr) & LEO_KRN_CSR_PROGRESS) &&
i < 300000;
i++)
udelay(1); /* Busy wait at most 0.3 sec */
return;
}
static void leo_switch_from_graph(struct fb_info *info)
{
struct leo_par *par = (struct leo_par *) info->par;
struct leo_ld_ss0 __iomem *ss = par->ld_ss0;
struct leo_cursor __iomem *cursor = par->cursor;
unsigned long flags;
u32 val;
spin_lock_irqsave(&par->lock, flags);
par->extent = ((info->var.xres - 1) |
((info->var.yres - 1) << 16));
sbus_writel(0xffffffff, &ss->wid);
sbus_writel(0xffff, &ss->wmask);
sbus_writel(0, &ss->vclipmin);
sbus_writel(par->extent, &ss->vclipmax);
sbus_writel(0, &ss->fg);
sbus_writel(0xff000000, &ss->planemask);
sbus_writel(0x310850, &ss->rop);
sbus_writel(0, &ss->widclip);
sbus_writel((info->var.xres-1) | ((info->var.yres-1) << 11),
&par->lc_ss0_usr->extent);
sbus_writel(4, &par->lc_ss0_usr->addrspace);
sbus_writel(0x80000000, &par->lc_ss0_usr->fill);
sbus_writel(0, &par->lc_ss0_usr->fontt);
do {
val = sbus_readl(&par->lc_ss0_usr->csr);
} while (val & 0x20000000);
/* setup screen buffer for cfb_* functions */
sbus_writel(1, &ss->wid);
sbus_writel(0x00ffffff, &ss->planemask);
sbus_writel(0x310b90, &ss->rop);
sbus_writel(0, &par->lc_ss0_usr->addrspace);
/* hide cursor */
sbus_writel(sbus_readl(&cursor->cur_misc) & ~LEO_CUR_ENABLE, &cursor->cur_misc);
spin_unlock_irqrestore(&par->lock, flags);
}
static int leo_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
/* We just use this to catch switches out of
* graphics mode.
*/
leo_switch_from_graph(info);
if (var->xoffset || var->yoffset || var->vmode)
return -EINVAL;
return 0;
}
/**
* leo_setcolreg - Optional function. Sets a color register.
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
*/
static int leo_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct leo_par *par = (struct leo_par *) info->par;
struct leo_lx_krn __iomem *lx_krn = par->lx_krn;
unsigned long flags;
u32 val;
int i;
if (regno >= 256)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
par->clut_data[regno] = red | (green << 8) | (blue << 16);
spin_lock_irqsave(&par->lock, flags);
leo_wait(lx_krn);
sbus_writel(LEO_KRN_TYPE_CLUTDATA, &lx_krn->krn_type);
for (i = 0; i < 256; i++)
sbus_writel(par->clut_data[i], &lx_krn->krn_value);
sbus_writel(LEO_KRN_TYPE_CLUT0, &lx_krn->krn_type);
val = sbus_readl(&lx_krn->krn_csr);
val |= (LEO_KRN_CSR_UNK | LEO_KRN_CSR_UNK2);
sbus_writel(val, &lx_krn->krn_csr);
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
/**
* leo_blank - Optional function. Blanks the display.
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int leo_blank(int blank, struct fb_info *info)
{
struct leo_par *par = (struct leo_par *) info->par;
struct leo_lx_krn __iomem *lx_krn = par->lx_krn;
unsigned long flags;
u32 val;
spin_lock_irqsave(&par->lock, flags);
switch (blank) {
case FB_BLANK_UNBLANK: /* Unblanking */
val = sbus_readl(&lx_krn->krn_csr);
val |= LEO_KRN_CSR_ENABLE;
sbus_writel(val, &lx_krn->krn_csr);
par->flags &= ~LEO_FLAG_BLANKED;
break;
case FB_BLANK_NORMAL: /* Normal blanking */
case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
case FB_BLANK_POWERDOWN: /* Poweroff */
val = sbus_readl(&lx_krn->krn_csr);
val &= ~LEO_KRN_CSR_ENABLE;
sbus_writel(val, &lx_krn->krn_csr);
par->flags |= LEO_FLAG_BLANKED;
break;
}
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
static struct sbus_mmap_map leo_mmap_map[] = {
{
.voff = LEO_SS0_MAP,
.poff = LEO_OFF_SS0,
.size = 0x800000
},
{
.voff = LEO_LC_SS0_USR_MAP,
.poff = LEO_OFF_LC_SS0_USR,
.size = 0x1000
},
{
.voff = LEO_LD_SS0_MAP,
.poff = LEO_OFF_LD_SS0,
.size = 0x1000
},
{
.voff = LEO_LX_CURSOR_MAP,
.poff = LEO_OFF_LX_CURSOR,
.size = 0x1000
},
{
.voff = LEO_SS1_MAP,
.poff = LEO_OFF_SS1,
.size = 0x800000
},
{
.voff = LEO_LC_SS1_USR_MAP,
.poff = LEO_OFF_LC_SS1_USR,
.size = 0x1000
},
{
.voff = LEO_LD_SS1_MAP,
.poff = LEO_OFF_LD_SS1,
.size = 0x1000
},
{
.voff = LEO_UNK_MAP,
.poff = LEO_OFF_UNK,
.size = 0x1000
},
{
.voff = LEO_LX_KRN_MAP,
.poff = LEO_OFF_LX_KRN,
.size = 0x1000
},
{
.voff = LEO_LC_SS0_KRN_MAP,
.poff = LEO_OFF_LC_SS0_KRN,
.size = 0x1000
},
{
.voff = LEO_LC_SS1_KRN_MAP,
.poff = LEO_OFF_LC_SS1_KRN,
.size = 0x1000
},
{
.voff = LEO_LD_GBL_MAP,
.poff = LEO_OFF_LD_GBL,
.size = 0x1000
},
{
.voff = LEO_UNK2_MAP,
.poff = LEO_OFF_UNK2,
.size = 0x100000
},
{ .size = 0 }
};
static int leo_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct leo_par *par = (struct leo_par *)info->par;
return sbusfb_mmap_helper(leo_mmap_map,
info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
static int leo_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
return sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_SUNLEO, 32, info->fix.smem_len);
}
/*
* Initialisation
*/
static void
leo_init_fix(struct fb_info *info, struct device_node *dp)
{
strlcpy(info->fix.id, dp->name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.line_length = 8192;
info->fix.accel = FB_ACCEL_SUN_LEO;
}
static void leo_wid_put(struct fb_info *info, struct fb_wid_list *wl)
{
struct leo_par *par = (struct leo_par *) info->par;
struct leo_lx_krn __iomem *lx_krn = par->lx_krn;
struct fb_wid_item *wi;
unsigned long flags;
u32 val;
int i, j;
spin_lock_irqsave(&par->lock, flags);
leo_wait(lx_krn);
for (i = 0, wi = wl->wl_list; i < wl->wl_count; i++, wi++) {
switch (wi->wi_type) {
case FB_WID_DBL_8:
j = (wi->wi_index & 0xf) + 0x40;
break;
case FB_WID_DBL_24:
j = wi->wi_index & 0x3f;
break;
default:
continue;
};
sbus_writel(0x5800 + j, &lx_krn->krn_type);
sbus_writel(wi->wi_values[0], &lx_krn->krn_value);
}
sbus_writel(LEO_KRN_TYPE_WID, &lx_krn->krn_type);
val = sbus_readl(&lx_krn->krn_csr);
val |= (LEO_KRN_CSR_UNK | LEO_KRN_CSR_UNK2);
sbus_writel(val, &lx_krn->krn_csr);
spin_unlock_irqrestore(&par->lock, flags);
}
static void leo_init_wids(struct fb_info *info)
{
struct fb_wid_item wi;
struct fb_wid_list wl;
wl.wl_count = 1;
wl.wl_list = &wi;
wi.wi_type = FB_WID_DBL_8;
wi.wi_index = 0;
wi.wi_values [0] = 0x2c0;
leo_wid_put(info, &wl);
wi.wi_index = 1;
wi.wi_values [0] = 0x30;
leo_wid_put(info, &wl);
wi.wi_index = 2;
wi.wi_values [0] = 0x20;
leo_wid_put(info, &wl);
wi.wi_type = FB_WID_DBL_24;
wi.wi_index = 1;
wi.wi_values [0] = 0x30;
leo_wid_put(info, &wl);
}
static void leo_init_hw(struct fb_info *info)
{
struct leo_par *par = (struct leo_par *) info->par;
u32 val;
val = sbus_readl(&par->ld_ss1->ss1_misc);
val |= LEO_SS1_MISC_ENABLE;
sbus_writel(val, &par->ld_ss1->ss1_misc);
leo_switch_from_graph(info);
}
static void leo_fixup_var_rgb(struct fb_var_screeninfo *var)
{
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 16;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
}
static void leo_unmap_regs(struct platform_device *op, struct fb_info *info,
struct leo_par *par)
{
if (par->lc_ss0_usr)
of_iounmap(&op->resource[0], par->lc_ss0_usr, 0x1000);
if (par->ld_ss0)
of_iounmap(&op->resource[0], par->ld_ss0, 0x1000);
if (par->ld_ss1)
of_iounmap(&op->resource[0], par->ld_ss1, 0x1000);
if (par->lx_krn)
of_iounmap(&op->resource[0], par->lx_krn, 0x1000);
if (par->cursor)
of_iounmap(&op->resource[0],
par->cursor, sizeof(struct leo_cursor));
if (info->screen_base)
of_iounmap(&op->resource[0], info->screen_base, 0x800000);
}
static int __devinit leo_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct leo_par *par;
int linebytes, err;
info = framebuffer_alloc(sizeof(struct leo_par), &op->dev);
err = -ENOMEM;
if (!info)
goto out_err;
par = info->par;
spin_lock_init(&par->lock);
info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
sbusfb_fill_var(&info->var, dp, 32);
leo_fixup_var_rgb(&info->var);
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->lc_ss0_usr =
of_ioremap(&op->resource[0], LEO_OFF_LC_SS0_USR,
0x1000, "leolc ss0usr");
par->ld_ss0 =
of_ioremap(&op->resource[0], LEO_OFF_LD_SS0,
0x1000, "leold ss0");
par->ld_ss1 =
of_ioremap(&op->resource[0], LEO_OFF_LD_SS1,
0x1000, "leold ss1");
par->lx_krn =
of_ioremap(&op->resource[0], LEO_OFF_LX_KRN,
0x1000, "leolx krn");
par->cursor =
of_ioremap(&op->resource[0], LEO_OFF_LX_CURSOR,
sizeof(struct leo_cursor), "leolx cursor");
info->screen_base =
of_ioremap(&op->resource[0], LEO_OFF_SS0,
0x800000, "leo ram");
if (!par->lc_ss0_usr ||
!par->ld_ss0 ||
!par->ld_ss1 ||
!par->lx_krn ||
!par->cursor ||
!info->screen_base)
goto out_unmap_regs;
info->flags = FBINFO_DEFAULT;
info->fbops = &leo_ops;
info->pseudo_palette = par->clut_data;
leo_init_wids(info);
leo_init_hw(info);
leo_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_regs;
leo_init_fix(info, dp);
err = register_framebuffer(info);
if (err < 0)
goto out_dealloc_cmap;
dev_set_drvdata(&op->dev, info);
printk(KERN_INFO "%s: leo at %lx:%lx\n",
dp->full_name,
par->which_io, info->fix.smem_start);
return 0;
out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_regs:
leo_unmap_regs(op, info, par);
framebuffer_release(info);
out_err:
return err;
}
static int __devexit leo_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct leo_par *par = info->par;
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
leo_unmap_regs(op, info, par);
framebuffer_release(info);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id leo_match[] = {
{
.name = "SUNW,leo",
},
{},
};
MODULE_DEVICE_TABLE(of, leo_match);
static struct platform_driver leo_driver = {
.driver = {
.name = "leo",
.owner = THIS_MODULE,
.of_match_table = leo_match,
},
.probe = leo_probe,
.remove = __devexit_p(leo_remove),
};
static int __init leo_init(void)
{
if (fb_get_options("leofb", NULL))
return -ENODEV;
return platform_driver_register(&leo_driver);
}
static void __exit leo_exit(void)
{
platform_driver_unregister(&leo_driver);
}
module_init(leo_init);
module_exit(leo_exit);
MODULE_DESCRIPTION("framebuffer driver for LEO chipsets");
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
OptiPurity/kernel_lge_hammerhead
|
drivers/video/tcx.c
|
8184
|
12359
|
/* tcx.c: TCX frame buffer driver
*
* Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
*
* Driver layout based loosely on tgafb.c, see that file for credits.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/fb.h>
#include <linux/mm.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/fbio.h>
#include "sbuslib.h"
/*
* Local functions.
*/
static int tcx_setcolreg(unsigned, unsigned, unsigned, unsigned,
unsigned, struct fb_info *);
static int tcx_blank(int, struct fb_info *);
static int tcx_mmap(struct fb_info *, struct vm_area_struct *);
static int tcx_ioctl(struct fb_info *, unsigned int, unsigned long);
static int tcx_pan_display(struct fb_var_screeninfo *, struct fb_info *);
/*
* Frame buffer operations
*/
static struct fb_ops tcx_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = tcx_setcolreg,
.fb_blank = tcx_blank,
.fb_pan_display = tcx_pan_display,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_mmap = tcx_mmap,
.fb_ioctl = tcx_ioctl,
#ifdef CONFIG_COMPAT
.fb_compat_ioctl = sbusfb_compat_ioctl,
#endif
};
/* THC definitions */
#define TCX_THC_MISC_REV_SHIFT 16
#define TCX_THC_MISC_REV_MASK 15
#define TCX_THC_MISC_VSYNC_DIS (1 << 25)
#define TCX_THC_MISC_HSYNC_DIS (1 << 24)
#define TCX_THC_MISC_RESET (1 << 12)
#define TCX_THC_MISC_VIDEO (1 << 10)
#define TCX_THC_MISC_SYNC (1 << 9)
#define TCX_THC_MISC_VSYNC (1 << 8)
#define TCX_THC_MISC_SYNC_ENAB (1 << 7)
#define TCX_THC_MISC_CURS_RES (1 << 6)
#define TCX_THC_MISC_INT_ENAB (1 << 5)
#define TCX_THC_MISC_INT (1 << 4)
#define TCX_THC_MISC_INIT 0x9f
#define TCX_THC_REV_REV_SHIFT 20
#define TCX_THC_REV_REV_MASK 15
#define TCX_THC_REV_MINREV_SHIFT 28
#define TCX_THC_REV_MINREV_MASK 15
/* The contents are unknown */
struct tcx_tec {
u32 tec_matrix;
u32 tec_clip;
u32 tec_vdc;
};
struct tcx_thc {
u32 thc_rev;
u32 thc_pad0[511];
u32 thc_hs; /* hsync timing */
u32 thc_hsdvs;
u32 thc_hd;
u32 thc_vs; /* vsync timing */
u32 thc_vd;
u32 thc_refresh;
u32 thc_misc;
u32 thc_pad1[56];
u32 thc_cursxy; /* cursor x,y position (16 bits each) */
u32 thc_cursmask[32]; /* cursor mask bits */
u32 thc_cursbits[32]; /* what to show where mask enabled */
};
struct bt_regs {
u32 addr;
u32 color_map;
u32 control;
u32 cursor;
};
#define TCX_MMAP_ENTRIES 14
struct tcx_par {
spinlock_t lock;
struct bt_regs __iomem *bt;
struct tcx_thc __iomem *thc;
struct tcx_tec __iomem *tec;
u32 __iomem *cplane;
u32 flags;
#define TCX_FLAG_BLANKED 0x00000001
unsigned long which_io;
struct sbus_mmap_map mmap_map[TCX_MMAP_ENTRIES];
int lowdepth;
};
/* Reset control plane so that WID is 8-bit plane. */
static void __tcx_set_control_plane(struct fb_info *info)
{
struct tcx_par *par = info->par;
u32 __iomem *p, *pend;
if (par->lowdepth)
return;
p = par->cplane;
if (p == NULL)
return;
for (pend = p + info->fix.smem_len; p < pend; p++) {
u32 tmp = sbus_readl(p);
tmp &= 0xffffff;
sbus_writel(tmp, p);
}
}
static void tcx_reset(struct fb_info *info)
{
struct tcx_par *par = (struct tcx_par *) info->par;
unsigned long flags;
spin_lock_irqsave(&par->lock, flags);
__tcx_set_control_plane(info);
spin_unlock_irqrestore(&par->lock, flags);
}
static int tcx_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
{
tcx_reset(info);
return 0;
}
/**
* tcx_setcolreg - Optional function. Sets a color register.
* @regno: boolean, 0 copy local, 1 get_user() function
* @red: frame buffer colormap structure
* @green: The green value which can be up to 16 bits wide
* @blue: The blue value which can be up to 16 bits wide.
* @transp: If supported the alpha value which can be up to 16 bits wide.
* @info: frame buffer info structure
*/
static int tcx_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
struct tcx_par *par = (struct tcx_par *) info->par;
struct bt_regs __iomem *bt = par->bt;
unsigned long flags;
if (regno >= 256)
return 1;
red >>= 8;
green >>= 8;
blue >>= 8;
spin_lock_irqsave(&par->lock, flags);
sbus_writel(regno << 24, &bt->addr);
sbus_writel(red << 24, &bt->color_map);
sbus_writel(green << 24, &bt->color_map);
sbus_writel(blue << 24, &bt->color_map);
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
/**
* tcx_blank - Optional function. Blanks the display.
* @blank_mode: the blank mode we want.
* @info: frame buffer structure that represents a single frame buffer
*/
static int
tcx_blank(int blank, struct fb_info *info)
{
struct tcx_par *par = (struct tcx_par *) info->par;
struct tcx_thc __iomem *thc = par->thc;
unsigned long flags;
u32 val;
spin_lock_irqsave(&par->lock, flags);
val = sbus_readl(&thc->thc_misc);
switch (blank) {
case FB_BLANK_UNBLANK: /* Unblanking */
val &= ~(TCX_THC_MISC_VSYNC_DIS |
TCX_THC_MISC_HSYNC_DIS);
val |= TCX_THC_MISC_VIDEO;
par->flags &= ~TCX_FLAG_BLANKED;
break;
case FB_BLANK_NORMAL: /* Normal blanking */
val &= ~TCX_THC_MISC_VIDEO;
par->flags |= TCX_FLAG_BLANKED;
break;
case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
val |= TCX_THC_MISC_VSYNC_DIS;
break;
case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
val |= TCX_THC_MISC_HSYNC_DIS;
break;
case FB_BLANK_POWERDOWN: /* Poweroff */
break;
};
sbus_writel(val, &thc->thc_misc);
spin_unlock_irqrestore(&par->lock, flags);
return 0;
}
static struct sbus_mmap_map __tcx_mmap_map[TCX_MMAP_ENTRIES] = {
{
.voff = TCX_RAM8BIT,
.size = SBUS_MMAP_FBSIZE(1)
},
{
.voff = TCX_RAM24BIT,
.size = SBUS_MMAP_FBSIZE(4)
},
{
.voff = TCX_UNK3,
.size = SBUS_MMAP_FBSIZE(8)
},
{
.voff = TCX_UNK4,
.size = SBUS_MMAP_FBSIZE(8)
},
{
.voff = TCX_CONTROLPLANE,
.size = SBUS_MMAP_FBSIZE(4)
},
{
.voff = TCX_UNK6,
.size = SBUS_MMAP_FBSIZE(8)
},
{
.voff = TCX_UNK7,
.size = SBUS_MMAP_FBSIZE(8)
},
{
.voff = TCX_TEC,
.size = PAGE_SIZE
},
{
.voff = TCX_BTREGS,
.size = PAGE_SIZE
},
{
.voff = TCX_THC,
.size = PAGE_SIZE
},
{
.voff = TCX_DHC,
.size = PAGE_SIZE
},
{
.voff = TCX_ALT,
.size = PAGE_SIZE
},
{
.voff = TCX_UNK2,
.size = 0x20000
},
{ .size = 0 }
};
static int tcx_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct tcx_par *par = (struct tcx_par *)info->par;
return sbusfb_mmap_helper(par->mmap_map,
info->fix.smem_start, info->fix.smem_len,
par->which_io, vma);
}
static int tcx_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct tcx_par *par = (struct tcx_par *) info->par;
return sbusfb_ioctl_helper(cmd, arg, info,
FBTYPE_TCXCOLOR,
(par->lowdepth ? 8 : 24),
info->fix.smem_len);
}
/*
* Initialisation
*/
static void
tcx_init_fix(struct fb_info *info, int linebytes)
{
struct tcx_par *par = (struct tcx_par *)info->par;
const char *tcx_name;
if (par->lowdepth)
tcx_name = "TCX8";
else
tcx_name = "TCX24";
strlcpy(info->fix.id, tcx_name, sizeof(info->fix.id));
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
info->fix.line_length = linebytes;
info->fix.accel = FB_ACCEL_SUN_TCX;
}
static void tcx_unmap_regs(struct platform_device *op, struct fb_info *info,
struct tcx_par *par)
{
if (par->tec)
of_iounmap(&op->resource[7],
par->tec, sizeof(struct tcx_tec));
if (par->thc)
of_iounmap(&op->resource[9],
par->thc, sizeof(struct tcx_thc));
if (par->bt)
of_iounmap(&op->resource[8],
par->bt, sizeof(struct bt_regs));
if (par->cplane)
of_iounmap(&op->resource[4],
par->cplane, info->fix.smem_len * sizeof(u32));
if (info->screen_base)
of_iounmap(&op->resource[0],
info->screen_base, info->fix.smem_len);
}
static int __devinit tcx_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct tcx_par *par;
int linebytes, i, err;
info = framebuffer_alloc(sizeof(struct tcx_par), &op->dev);
err = -ENOMEM;
if (!info)
goto out_err;
par = info->par;
spin_lock_init(&par->lock);
par->lowdepth =
(of_find_property(dp, "tcx-8-bit", NULL) != NULL);
sbusfb_fill_var(&info->var, dp, 8);
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
linebytes = of_getintprop_default(dp, "linebytes",
info->var.xres);
info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
par->tec = of_ioremap(&op->resource[7], 0,
sizeof(struct tcx_tec), "tcx tec");
par->thc = of_ioremap(&op->resource[9], 0,
sizeof(struct tcx_thc), "tcx thc");
par->bt = of_ioremap(&op->resource[8], 0,
sizeof(struct bt_regs), "tcx dac");
info->screen_base = of_ioremap(&op->resource[0], 0,
info->fix.smem_len, "tcx ram");
if (!par->tec || !par->thc ||
!par->bt || !info->screen_base)
goto out_unmap_regs;
memcpy(&par->mmap_map, &__tcx_mmap_map, sizeof(par->mmap_map));
if (!par->lowdepth) {
par->cplane = of_ioremap(&op->resource[4], 0,
info->fix.smem_len * sizeof(u32),
"tcx cplane");
if (!par->cplane)
goto out_unmap_regs;
} else {
par->mmap_map[1].size = SBUS_MMAP_EMPTY;
par->mmap_map[4].size = SBUS_MMAP_EMPTY;
par->mmap_map[5].size = SBUS_MMAP_EMPTY;
par->mmap_map[6].size = SBUS_MMAP_EMPTY;
}
info->fix.smem_start = op->resource[0].start;
par->which_io = op->resource[0].flags & IORESOURCE_BITS;
for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
int j;
switch (i) {
case 10:
j = 12;
break;
case 11: case 12:
j = i - 1;
break;
default:
j = i;
break;
};
par->mmap_map[i].poff = op->resource[j].start;
}
info->flags = FBINFO_DEFAULT;
info->fbops = &tcx_ops;
/* Initialize brooktree DAC. */
sbus_writel(0x04 << 24, &par->bt->addr); /* color planes */
sbus_writel(0xff << 24, &par->bt->control);
sbus_writel(0x05 << 24, &par->bt->addr);
sbus_writel(0x00 << 24, &par->bt->control);
sbus_writel(0x06 << 24, &par->bt->addr); /* overlay plane */
sbus_writel(0x73 << 24, &par->bt->control);
sbus_writel(0x07 << 24, &par->bt->addr);
sbus_writel(0x00 << 24, &par->bt->control);
tcx_reset(info);
tcx_blank(FB_BLANK_UNBLANK, info);
if (fb_alloc_cmap(&info->cmap, 256, 0))
goto out_unmap_regs;
fb_set_cmap(&info->cmap, info);
tcx_init_fix(info, linebytes);
err = register_framebuffer(info);
if (err < 0)
goto out_dealloc_cmap;
dev_set_drvdata(&op->dev, info);
printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n",
dp->full_name,
par->which_io,
info->fix.smem_start,
par->lowdepth ? "8-bit only" : "24-bit depth");
return 0;
out_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
out_unmap_regs:
tcx_unmap_regs(op, info, par);
framebuffer_release(info);
out_err:
return err;
}
static int __devexit tcx_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct tcx_par *par = info->par;
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
tcx_unmap_regs(op, info, par);
framebuffer_release(info);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id tcx_match[] = {
{
.name = "SUNW,tcx",
},
{},
};
MODULE_DEVICE_TABLE(of, tcx_match);
static struct platform_driver tcx_driver = {
.driver = {
.name = "tcx",
.owner = THIS_MODULE,
.of_match_table = tcx_match,
},
.probe = tcx_probe,
.remove = __devexit_p(tcx_remove),
};
static int __init tcx_init(void)
{
if (fb_get_options("tcxfb", NULL))
return -ENODEV;
return platform_driver_register(&tcx_driver);
}
static void __exit tcx_exit(void)
{
platform_driver_unregister(&tcx_driver);
}
module_init(tcx_init);
module_exit(tcx_exit);
MODULE_DESCRIPTION("framebuffer driver for TCX chipsets");
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
tpmullan/android_kernel_asus_tf700
|
drivers/uwb/ie-rcv.c
|
12792
|
1632
|
/*
* Ultra Wide Band
* IE Received notification handling.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitmap.h>
#include "uwb-internal.h"
/*
* Process an incoming IE Received notification.
*/
int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt)
{
int result = -EINVAL;
struct device *dev = &evt->rc->uwb_dev.dev;
struct uwb_rc_evt_ie_rcv *iercv;
size_t iesize;
/* Is there enough data to decode it? */
if (evt->notif.size < sizeof(*iercv)) {
dev_err(dev, "IE Received notification: Not enough data to "
"decode (%zu vs %zu bytes needed)\n",
evt->notif.size, sizeof(*iercv));
goto error;
}
iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb);
iesize = le16_to_cpu(iercv->wIELength);
dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]);
if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) {
dev_warn(dev, "unhandled Relinquish Request IE\n");
}
return 0;
error:
return result;
}
|
gpl-2.0
|
javifo/samsung_stock_kernel_i9300
|
arch/cris/arch-v32/drivers/iop_fw_load.c
|
13816
|
5989
|
/*
* Firmware loader for ETRAX FS IO-Processor
*
* Copyright (C) 2004 Axis Communications AB
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/reg_map.h>
#include <hwregs/iop/iop_reg_space.h>
#include <hwregs/iop/iop_mpu_macros.h>
#include <hwregs/iop/iop_mpu_defs.h>
#include <hwregs/iop/iop_spu_defs.h>
#include <hwregs/iop/iop_sw_cpu_defs.h>
#define IOP_TIMEOUT 100
#error "This driver is broken with regard to its driver core usage."
#error "Please contact <greg@kroah.com> for details on how to fix it properly."
static struct device iop_spu_device[2] = {
{ .init_name = "iop-spu0", },
{ .init_name = "iop-spu1", },
};
static struct device iop_mpu_device = {
.init_name = "iop-mpu",
};
static int wait_mpu_idle(void)
{
reg_iop_mpu_r_stat mpu_stat;
unsigned int timeout = IOP_TIMEOUT;
do {
mpu_stat = REG_RD(iop_mpu, regi_iop_mpu, r_stat);
} while (mpu_stat.instr_reg_busy == regk_iop_mpu_yes && --timeout > 0);
if (timeout == 0) {
printk(KERN_ERR "Timeout waiting for MPU to be idle\n");
return -EBUSY;
}
return 0;
}
int iop_fw_load_spu(const unsigned char *fw_name, unsigned int spu_inst)
{
reg_iop_sw_cpu_rw_mc_ctrl mc_ctrl = {
.wr_spu0_mem = regk_iop_sw_cpu_no,
.wr_spu1_mem = regk_iop_sw_cpu_no,
.size = 4,
.cmd = regk_iop_sw_cpu_reg_copy,
.keep_owner = regk_iop_sw_cpu_yes
};
reg_iop_spu_rw_ctrl spu_ctrl = {
.en = regk_iop_spu_no,
.fsm = regk_iop_spu_no,
};
reg_iop_sw_cpu_r_mc_stat mc_stat;
const struct firmware *fw_entry;
u32 *data;
unsigned int timeout;
int retval, i;
if (spu_inst > 1)
return -ENODEV;
/* get firmware */
retval = request_firmware(&fw_entry,
fw_name,
&iop_spu_device[spu_inst]);
if (retval != 0)
{
printk(KERN_ERR
"iop_load_spu: Failed to load firmware \"%s\"\n",
fw_name);
return retval;
}
data = (u32 *) fw_entry->data;
/* acquire ownership of memory controller */
switch (spu_inst) {
case 0:
mc_ctrl.wr_spu0_mem = regk_iop_sw_cpu_yes;
REG_WR(iop_spu, regi_iop_spu0, rw_ctrl, spu_ctrl);
break;
case 1:
mc_ctrl.wr_spu1_mem = regk_iop_sw_cpu_yes;
REG_WR(iop_spu, regi_iop_spu1, rw_ctrl, spu_ctrl);
break;
}
timeout = IOP_TIMEOUT;
do {
REG_WR(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_ctrl, mc_ctrl);
mc_stat = REG_RD(iop_sw_cpu, regi_iop_sw_cpu, r_mc_stat);
} while (mc_stat.owned_by_cpu == regk_iop_sw_cpu_no && --timeout > 0);
if (timeout == 0) {
printk(KERN_ERR "Timeout waiting to acquire MC\n");
retval = -EBUSY;
goto out;
}
/* write to SPU memory */
for (i = 0; i < (fw_entry->size/4); i++) {
switch (spu_inst) {
case 0:
REG_WR_INT(iop_spu, regi_iop_spu0, rw_seq_pc, (i*4));
break;
case 1:
REG_WR_INT(iop_spu, regi_iop_spu1, rw_seq_pc, (i*4));
break;
}
REG_WR_INT(iop_sw_cpu, regi_iop_sw_cpu, rw_mc_data, *data);
data++;
}
/* release ownership of memory controller */
(void) REG_RD(iop_sw_cpu, regi_iop_sw_cpu, rs_mc_data);
out:
release_firmware(fw_entry);
return retval;
}
int iop_fw_load_mpu(unsigned char *fw_name)
{
const unsigned int start_addr = 0;
reg_iop_mpu_rw_ctrl mpu_ctrl;
const struct firmware *fw_entry;
u32 *data;
int retval, i;
/* get firmware */
retval = request_firmware(&fw_entry, fw_name, &iop_mpu_device);
if (retval != 0)
{
printk(KERN_ERR
"iop_load_spu: Failed to load firmware \"%s\"\n",
fw_name);
return retval;
}
data = (u32 *) fw_entry->data;
/* disable MPU */
mpu_ctrl.en = regk_iop_mpu_no;
REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl);
/* put start address in R0 */
REG_WR_VECT(iop_mpu, regi_iop_mpu, rw_r, 0, start_addr);
/* write to memory by executing 'SWX i, 4, R0' for each word */
if ((retval = wait_mpu_idle()) != 0)
goto out;
REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_SWX_IIR_INSTR(0, 4, 0));
for (i = 0; i < (fw_entry->size / 4); i++) {
REG_WR_INT(iop_mpu, regi_iop_mpu, rw_immediate, *data);
if ((retval = wait_mpu_idle()) != 0)
goto out;
data++;
}
out:
release_firmware(fw_entry);
return retval;
}
int iop_start_mpu(unsigned int start_addr)
{
reg_iop_mpu_rw_ctrl mpu_ctrl = { .en = regk_iop_mpu_yes };
int retval;
/* disable MPU */
if ((retval = wait_mpu_idle()) != 0)
goto out;
REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_HALT());
if ((retval = wait_mpu_idle()) != 0)
goto out;
/* set PC and wait for it to bite */
if ((retval = wait_mpu_idle()) != 0)
goto out;
REG_WR_INT(iop_mpu, regi_iop_mpu, rw_instr, MPU_BA_I(start_addr));
if ((retval = wait_mpu_idle()) != 0)
goto out;
/* make sure the MPU starts executing with interrupts disabled */
REG_WR(iop_mpu, regi_iop_mpu, rw_instr, MPU_DI());
if ((retval = wait_mpu_idle()) != 0)
goto out;
/* enable MPU */
REG_WR(iop_mpu, regi_iop_mpu, rw_ctrl, mpu_ctrl);
out:
return retval;
}
static int __init iop_fw_load_init(void)
{
#if 0
/*
* static struct devices can not be added directly to sysfs by ignoring
* the driver model infrastructure. To fix this properly, please use
* the platform_bus to register these devices to be able to properly
* use the firmware infrastructure.
*/
device_initialize(&iop_spu_device[0]);
kobject_set_name(&iop_spu_device[0].kobj, "iop-spu0");
kobject_add(&iop_spu_device[0].kobj);
device_initialize(&iop_spu_device[1]);
kobject_set_name(&iop_spu_device[1].kobj, "iop-spu1");
kobject_add(&iop_spu_device[1].kobj);
device_initialize(&iop_mpu_device);
kobject_set_name(&iop_mpu_device.kobj, "iop-mpu");
kobject_add(&iop_mpu_device.kobj);
#endif
return 0;
}
static void __exit iop_fw_load_exit(void)
{
}
module_init(iop_fw_load_init);
module_exit(iop_fw_load_exit);
MODULE_DESCRIPTION("ETRAX FS IO-Processor Firmware Loader");
MODULE_LICENSE("GPL");
EXPORT_SYMBOL(iop_fw_load_spu);
EXPORT_SYMBOL(iop_fw_load_mpu);
EXPORT_SYMBOL(iop_start_mpu);
|
gpl-2.0
|
HomerSp/shooter_u-ics
|
drivers/staging/vme/bridges/vme_tsi148.c
|
761
|
71690
|
/*
* Support for the Tundra TSI148 VME-PCI Bridge Chip
*
* Author: Martyn Welch <martyn.welch@ge.com>
* Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
*
* Based on work by Tom Armistead and Ajit Prem
* Copyright 2004 Motorola Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include "../vme.h"
#include "../vme_bridge.h"
#include "vme_tsi148.h"
static int __init tsi148_init(void);
static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
static void tsi148_remove(struct pci_dev *);
static void __exit tsi148_exit(void);
/* Module parameter */
static int err_chk;
static int geoid;
static char driver_name[] = "vme_tsi148";
static const struct pci_device_id tsi148_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
{ },
};
static struct pci_driver tsi148_driver = {
.name = driver_name,
.id_table = tsi148_ids,
.probe = tsi148_probe,
.remove = tsi148_remove,
};
static void reg_join(unsigned int high, unsigned int low,
unsigned long long *variable)
{
*variable = (unsigned long long)high << 32;
*variable |= (unsigned long long)low;
}
static void reg_split(unsigned long long variable, unsigned int *high,
unsigned int *low)
{
*low = (unsigned int)variable & 0xFFFFFFFF;
*high = (unsigned int)(variable >> 32);
}
/*
* Wakes up DMA queue.
*/
static u32 tsi148_DMA_irqhandler(struct tsi148_driver *bridge,
int channel_mask)
{
u32 serviced = 0;
if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
wake_up(&(bridge->dma_queue[0]));
serviced |= TSI148_LCSR_INTC_DMA0C;
}
if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
wake_up(&(bridge->dma_queue[1]));
serviced |= TSI148_LCSR_INTC_DMA1C;
}
return serviced;
}
/*
* Wake up location monitor queue
*/
static u32 tsi148_LM_irqhandler(struct tsi148_driver *bridge, u32 stat)
{
int i;
u32 serviced = 0;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_LMS[i]) {
/* We only enable interrupts if the callback is set */
bridge->lm_callback[i](i);
serviced |= TSI148_LCSR_INTC_LMC[i];
}
}
return serviced;
}
/*
* Wake up mail box queue.
*
* XXX This functionality is not exposed up though API.
*/
static u32 tsi148_MB_irqhandler(struct vme_bridge *tsi148_bridge, u32 stat)
{
int i;
u32 val;
u32 serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 0; i < 4; i++) {
if (stat & TSI148_LCSR_INTS_MBS[i]) {
val = ioread32be(bridge->base + TSI148_GCSR_MBOX[i]);
dev_err(tsi148_bridge->parent, "VME Mailbox %d received"
": 0x%x\n", i, val);
serviced |= TSI148_LCSR_INTC_MBC[i];
}
}
return serviced;
}
/*
* Display error & status message when PERR (PCI) exception interrupt occurs.
*/
static u32 tsi148_PERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
dev_err(tsi148_bridge->parent, "PCI Exception at address: 0x%08x:%08x, "
"attributes: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPAU),
ioread32be(bridge->base + TSI148_LCSR_EDPAL),
ioread32be(bridge->base + TSI148_LCSR_EDPAT));
dev_err(tsi148_bridge->parent, "PCI-X attribute reg: %08x, PCI-X split "
"completion reg: %08x\n",
ioread32be(bridge->base + TSI148_LCSR_EDPXA),
ioread32be(bridge->base + TSI148_LCSR_EDPXS));
iowrite32be(TSI148_LCSR_EDPAT_EDPCL, bridge->base + TSI148_LCSR_EDPAT);
return TSI148_LCSR_INTC_PERRC;
}
/*
* Save address and status when VME error interrupt occurs.
*/
static u32 tsi148_VERR_irqhandler(struct vme_bridge *tsi148_bridge)
{
unsigned int error_addr_high, error_addr_low;
unsigned long long error_addr;
u32 error_attrib;
struct vme_bus_error *error;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
error_addr_high = ioread32be(bridge->base + TSI148_LCSR_VEAU);
error_addr_low = ioread32be(bridge->base + TSI148_LCSR_VEAL);
error_attrib = ioread32be(bridge->base + TSI148_LCSR_VEAT);
reg_join(error_addr_high, error_addr_low, &error_addr);
/* Check for exception register overflow (we have lost error data) */
if (error_attrib & TSI148_LCSR_VEAT_VEOF) {
dev_err(tsi148_bridge->parent, "VME Bus Exception Overflow "
"Occurred\n");
}
error = kmalloc(sizeof(struct vme_bus_error), GFP_ATOMIC);
if (error) {
error->address = error_addr;
error->attributes = error_attrib;
list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
} else {
dev_err(tsi148_bridge->parent, "Unable to alloc memory for "
"VMEbus Error reporting\n");
dev_err(tsi148_bridge->parent, "VME Bus Error at address: "
"0x%llx, attributes: %08x\n", error_addr, error_attrib);
}
/* Clear Status */
iowrite32be(TSI148_LCSR_VEAT_VESCL, bridge->base + TSI148_LCSR_VEAT);
return TSI148_LCSR_INTC_VERRC;
}
/*
* Wake up IACK queue.
*/
static u32 tsi148_IACK_irqhandler(struct tsi148_driver *bridge)
{
wake_up(&(bridge->iack_queue));
return TSI148_LCSR_INTC_IACKC;
}
/*
* Calling VME bus interrupt callback if provided.
*/
static u32 tsi148_VIRQ_irqhandler(struct vme_bridge *tsi148_bridge,
u32 stat)
{
int vec, i, serviced = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
for (i = 7; i > 0; i--) {
if (stat & (1 << i)) {
/*
* Note: Even though the registers are defined as
* 32-bits in the spec, we only want to issue 8-bit
* IACK cycles on the bus, read from offset 3.
*/
vec = ioread8(bridge->base + TSI148_LCSR_VIACK[i] + 3);
vme_irq_handler(tsi148_bridge, i, vec);
serviced |= (1 << i);
}
}
return serviced;
}
/*
* Top level interrupt handler. Clears appropriate interrupt status bits and
* then calls appropriate sub handler(s).
*/
static irqreturn_t tsi148_irqhandler(int irq, void *ptr)
{
u32 stat, enable, serviced = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = ptr;
bridge = tsi148_bridge->driver_priv;
/* Determine which interrupts are unmasked and set */
enable = ioread32be(bridge->base + TSI148_LCSR_INTEO);
stat = ioread32be(bridge->base + TSI148_LCSR_INTS);
/* Only look at unmasked interrupts */
stat &= enable;
if (unlikely(!stat))
return IRQ_NONE;
/* Call subhandlers as appropriate */
/* DMA irqs */
if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
serviced |= tsi148_DMA_irqhandler(bridge, stat);
/* Location monitor irqs */
if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
serviced |= tsi148_LM_irqhandler(bridge, stat);
/* Mail box irqs */
if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
serviced |= tsi148_MB_irqhandler(tsi148_bridge, stat);
/* PCI bus error */
if (stat & TSI148_LCSR_INTS_PERRS)
serviced |= tsi148_PERR_irqhandler(tsi148_bridge);
/* VME bus error */
if (stat & TSI148_LCSR_INTS_VERRS)
serviced |= tsi148_VERR_irqhandler(tsi148_bridge);
/* IACK irq */
if (stat & TSI148_LCSR_INTS_IACKS)
serviced |= tsi148_IACK_irqhandler(bridge);
/* VME bus irqs */
if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
TSI148_LCSR_INTS_IRQ1S))
serviced |= tsi148_VIRQ_irqhandler(tsi148_bridge, stat);
/* Clear serviced interrupts */
iowrite32be(serviced, bridge->base + TSI148_LCSR_INTC);
return IRQ_HANDLED;
}
static int tsi148_irq_init(struct vme_bridge *tsi148_bridge)
{
int result;
unsigned int tmp;
struct pci_dev *pdev;
struct tsi148_driver *bridge;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
bridge = tsi148_bridge->driver_priv;
/* Initialise list for VME bus errors */
INIT_LIST_HEAD(&(tsi148_bridge->vme_errors));
mutex_init(&(tsi148_bridge->irq_mtx));
result = request_irq(pdev->irq,
tsi148_irqhandler,
IRQF_SHARED,
driver_name, tsi148_bridge);
if (result) {
dev_err(tsi148_bridge->parent, "Can't get assigned pci irq "
"vector %02X\n", pdev->irq);
return result;
}
/* Enable and unmask interrupts */
tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
TSI148_LCSR_INTEO_IACKEO;
/* This leaves the following interrupts masked.
* TSI148_LCSR_INTEO_VIEEO
* TSI148_LCSR_INTEO_SYSFLEO
* TSI148_LCSR_INTEO_ACFLEO
*/
/* Don't enable Location Monitor interrupts here - they will be
* enabled when the location monitors are properly configured and
* a callback has been attached.
* TSI148_LCSR_INTEO_LM0EO
* TSI148_LCSR_INTEO_LM1EO
* TSI148_LCSR_INTEO_LM2EO
* TSI148_LCSR_INTEO_LM3EO
*/
/* Don't enable VME interrupts until we add a handler, else the board
* will respond to it and we don't want that unless it knows how to
* properly deal with it.
* TSI148_LCSR_INTEO_IRQ7EO
* TSI148_LCSR_INTEO_IRQ6EO
* TSI148_LCSR_INTEO_IRQ5EO
* TSI148_LCSR_INTEO_IRQ4EO
* TSI148_LCSR_INTEO_IRQ3EO
* TSI148_LCSR_INTEO_IRQ2EO
* TSI148_LCSR_INTEO_IRQ1EO
*/
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
return 0;
}
static void tsi148_irq_exit(struct tsi148_driver *bridge, struct pci_dev *pdev)
{
/* Turn off interrupts */
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTEN);
/* Clear all interrupts */
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_INTC);
/* Detach interrupt handler */
free_irq(pdev->irq, pdev);
}
/*
* Check to see if an IACk has been received, return true (1) or false (0).
*/
int tsi148_iack_received(struct tsi148_driver *bridge)
{
u32 tmp;
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
if (tmp & TSI148_LCSR_VICR_IRQS)
return 0;
else
return 1;
}
/*
* Configure VME interrupt
*/
void tsi148_irq_set(struct vme_bridge *tsi148_bridge, int level,
int state, int sync)
{
struct pci_dev *pdev;
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* We need to do the ordering differently for enabling and disabling */
if (state == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
if (sync != 0) {
pdev = container_of(tsi148_bridge->parent,
struct pci_dev, dev);
synchronize_irq(pdev->irq);
}
} else {
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
}
}
/*
* Generate a VME bus interrupt at the requested level & vector. Wait for
* interrupt to be acked.
*/
int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level, int statid)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&(bridge->vme_int));
/* Read VICR register */
tmp = ioread32be(bridge->base + TSI148_LCSR_VICR);
/* Set Status/ID */
tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
(statid & TSI148_LCSR_VICR_STID_M);
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* Assert VMEbus IRQ */
tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
iowrite32be(tmp, bridge->base + TSI148_LCSR_VICR);
/* XXX Consider implementing a timeout? */
wait_event_interruptible(bridge->iack_queue,
tsi148_iack_received(bridge));
mutex_unlock(&(bridge->vme_int));
return 0;
}
/*
* Find the first error in this address range
*/
static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
unsigned long long bound;
bound = address + count;
/*
* XXX We are currently not looking at the address space when parsing
* for errors. This is because parsing the Address Modifier Codes
* is going to be quite resource intensive to do properly. We
* should be OK just looking at the addresses and this is certainly
* much better than what we had before.
*/
err_pos = NULL;
/* Iterate through errors */
list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
valid = vme_err;
break;
}
}
return valid;
}
/*
* Clear errors in the provided address range.
*/
static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
vme_address_t aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
unsigned long long bound;
bound = address + count;
/*
* XXX We are currently not looking at the address space when parsing
* for errors. This is because parsing the Address Modifier Codes
* is going to be quite resource intensive to do properly. We
* should be OK just looking at the addresses and this is certainly
* much better than what we had before.
*/
err_pos = NULL;
/* Iterate through errors */
list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
vme_err = list_entry(err_pos, struct vme_bus_error, list);
if ((vme_err->address >= address) &&
(vme_err->address < bound)) {
list_del(err_pos);
kfree(vme_err);
}
}
}
/*
* Initialize a slave window with the requested attributes.
*/
int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
i = image->number;
switch (aspace) {
case VME_A16:
granularity = 0x10;
addr |= TSI148_LCSR_ITAT_AS_A16;
break;
case VME_A24:
granularity = 0x1000;
addr |= TSI148_LCSR_ITAT_AS_A24;
break;
case VME_A32:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A32;
break;
case VME_A64:
granularity = 0x10000;
addr |= TSI148_LCSR_ITAT_AS_A64;
break;
case VME_CRCSR:
case VME_USER1:
case VME_USER2:
case VME_USER3:
case VME_USER4:
default:
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(vme_base, &vme_base_high, &vme_base_low);
/*
* Bound address is a valid address for the window, adjust
* accordingly
*/
vme_bound = vme_base + size - granularity;
reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
pci_offset = (unsigned long long)pci_base - vme_base;
reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
if (vme_base_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME base alignment\n");
return -EINVAL;
}
if (vme_bound_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid VME bound alignment\n");
return -EINVAL;
}
if (pci_offset_low & (granularity - 1)) {
dev_err(tsi148_bridge->parent, "Invalid PCI Offset "
"alignment\n");
return -EINVAL;
}
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
temp_ctl &= ~TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
/* Setup mapping */
iowrite32be(vme_base_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
iowrite32be(vme_base_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
iowrite32be(vme_bound_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
iowrite32be(vme_bound_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
iowrite32be(pci_offset_high, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
iowrite32be(pci_offset_low, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
break;
}
/* Setup cycle types */
temp_ctl &= ~(0x1F << 7);
if (cycle & VME_BLT)
temp_ctl |= TSI148_LCSR_ITAT_BLT;
if (cycle & VME_MBLT)
temp_ctl |= TSI148_LCSR_ITAT_MBLT;
if (cycle & VME_2eVME)
temp_ctl |= TSI148_LCSR_ITAT_2eVME;
if (cycle & VME_2eSST)
temp_ctl |= TSI148_LCSR_ITAT_2eSST;
if (cycle & VME_2eSSTB)
temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
temp_ctl |= addr;
temp_ctl &= ~0xF;
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
if (cycle & VME_USER)
temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_ITAT_PGM;
if (cycle & VME_DATA)
temp_ctl |= TSI148_LCSR_ITAT_DATA;
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
if (enabled)
temp_ctl |= TSI148_LCSR_ITAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
return 0;
}
/*
* Get slave window configuration.
*/
int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
unsigned int vme_bound_low, vme_bound_high;
unsigned int pci_offset_low, pci_offset_high;
unsigned long long vme_bound, pci_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
/* Read registers */
ctl = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
vme_base_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAU);
vme_base_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITSAL);
vme_bound_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAU);
vme_bound_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITEAL);
pci_offset_high = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFU);
pci_offset_low = ioread32be(bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(vme_base_high, vme_base_low, vme_base);
reg_join(vme_bound_high, vme_bound_low, &vme_bound);
reg_join(pci_offset_high, pci_offset_low, &pci_offset);
*pci_base = (dma_addr_t)vme_base + pci_offset;
*enabled = 0;
*aspace = 0;
*cycle = 0;
if (ctl & TSI148_LCSR_ITAT_EN)
*enabled = 1;
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
granularity = 0x10;
*aspace |= VME_A16;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
granularity = 0x1000;
*aspace |= VME_A24;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
granularity = 0x10000;
*aspace |= VME_A32;
}
if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
granularity = 0x10000;
*aspace |= VME_A64;
}
/* Need granularity before we set the size */
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
*cycle |= VME_2eSST320;
if (ctl & TSI148_LCSR_ITAT_BLT)
*cycle |= VME_BLT;
if (ctl & TSI148_LCSR_ITAT_MBLT)
*cycle |= VME_MBLT;
if (ctl & TSI148_LCSR_ITAT_2eVME)
*cycle |= VME_2eVME;
if (ctl & TSI148_LCSR_ITAT_2eSST)
*cycle |= VME_2eSST;
if (ctl & TSI148_LCSR_ITAT_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_ITAT_SUPR)
*cycle |= VME_SUPER;
if (ctl & TSI148_LCSR_ITAT_NPRIV)
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_ITAT_PGM)
*cycle |= VME_PROG;
if (ctl & TSI148_LCSR_ITAT_DATA)
*cycle |= VME_DATA;
return 0;
}
/*
* Allocate and map PCI Resource
*/
static int tsi148_alloc_resource(struct vme_master_resource *image,
unsigned long long size)
{
unsigned long long existing_size;
int retval = 0;
struct pci_dev *pdev;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
existing_size = (unsigned long long)(image->bus_resource.end -
image->bus_resource.start);
/* If the existing size is OK, return */
if ((size != 0) && (existing_size == (size - 1)))
return 0;
if (existing_size != 0) {
iounmap(image->kern_base);
image->kern_base = NULL;
if (image->bus_resource.name != NULL)
kfree(image->bus_resource.name);
release_resource(&(image->bus_resource));
memset(&(image->bus_resource), 0, sizeof(struct resource));
}
/* Exit here if size is zero */
if (size == 0)
return 0;
if (image->bus_resource.name == NULL) {
image->bus_resource.name = kmalloc(VMENAMSIZ+3, GFP_ATOMIC);
if (image->bus_resource.name == NULL) {
dev_err(tsi148_bridge->parent, "Unable to allocate "
"memory for resource name\n");
retval = -ENOMEM;
goto err_name;
}
}
sprintf((char *)image->bus_resource.name, "%s.%d", tsi148_bridge->name,
image->number);
image->bus_resource.start = 0;
image->bus_resource.end = (unsigned long)size;
image->bus_resource.flags = IORESOURCE_MEM;
retval = pci_bus_alloc_resource(pdev->bus,
&(image->bus_resource), size, size, PCIBIOS_MIN_MEM,
0, NULL, NULL);
if (retval) {
dev_err(tsi148_bridge->parent, "Failed to allocate mem "
"resource for window %d size 0x%lx start 0x%lx\n",
image->number, (unsigned long)size,
(unsigned long)image->bus_resource.start);
goto err_resource;
}
image->kern_base = ioremap_nocache(
image->bus_resource.start, size);
if (image->kern_base == NULL) {
dev_err(tsi148_bridge->parent, "Failed to remap resource\n");
retval = -ENOMEM;
goto err_remap;
}
return 0;
iounmap(image->kern_base);
image->kern_base = NULL;
err_remap:
release_resource(&(image->bus_resource));
err_resource:
kfree(image->bus_resource.name);
memset(&(image->bus_resource), 0, sizeof(struct resource));
err_name:
return retval;
}
/*
* Free and unmap PCI Resource
*/
static void tsi148_free_resource(struct vme_master_resource *image)
{
iounmap(image->kern_base);
image->kern_base = NULL;
release_resource(&(image->bus_resource));
kfree(image->bus_resource.name);
memset(&(image->bus_resource), 0, sizeof(struct resource));
}
/*
* Set the attributes of an outbound window.
*/
int tsi148_master_set(struct vme_master_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
int retval = 0;
unsigned int i;
unsigned int temp_ctl = 0;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_bound, vme_offset, pci_base;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
/* Verify input data */
if (vme_base & 0xFFFF) {
dev_err(tsi148_bridge->parent, "Invalid VME Window "
"alignment\n");
retval = -EINVAL;
goto err_window;
}
if ((size == 0) && (enabled != 0)) {
dev_err(tsi148_bridge->parent, "Size must be non-zero for "
"enabled windows\n");
retval = -EINVAL;
goto err_window;
}
spin_lock(&(image->lock));
/* Let's allocate the resource here rather than further up the stack as
* it avoids pushing loads of bus dependant stuff up the stack. If size
* is zero, any existing resource will be freed.
*/
retval = tsi148_alloc_resource(image, size);
if (retval) {
spin_unlock(&(image->lock));
dev_err(tsi148_bridge->parent, "Unable to allocate memory for "
"resource\n");
goto err_res;
}
if (size == 0) {
pci_base = 0;
pci_bound = 0;
vme_offset = 0;
} else {
pci_base = (unsigned long long)image->bus_resource.start;
/*
* Bound address is a valid address for the window, adjust
* according to window granularity.
*/
pci_bound = pci_base + (size - 0x10000);
vme_offset = vme_base - pci_base;
}
/* Convert 64-bit variables to 2x 32-bit variables */
reg_split(pci_base, &pci_base_high, &pci_base_low);
reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
if (pci_base_low & 0xFFFF) {
spin_unlock(&(image->lock));
dev_err(tsi148_bridge->parent, "Invalid PCI base alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (pci_bound_low & 0xFFFF) {
spin_unlock(&(image->lock));
dev_err(tsi148_bridge->parent, "Invalid PCI bound alignment\n");
retval = -EINVAL;
goto err_gran;
}
if (vme_offset_low & 0xFFFF) {
spin_unlock(&(image->lock));
dev_err(tsi148_bridge->parent, "Invalid VME Offset "
"alignment\n");
retval = -EINVAL;
goto err_gran;
}
i = image->number;
/* Disable while we are mucking around */
temp_ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
temp_ctl &= ~TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
/* Setup 2eSST speeds */
temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
break;
case VME_2eSST267:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
break;
case VME_2eSST320:
temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_BLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
}
if (cycle & VME_MBLT) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
}
if (cycle & VME_2eVME) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
}
if (cycle & VME_2eSST) {
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
}
if (cycle & VME_2eSSTB) {
dev_warn(tsi148_bridge->parent, "Currently not setting "
"Broadcast Select Registers\n");
temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
}
/* Setup data width */
temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
switch (dwidth) {
case VME_D16:
temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
break;
case VME_D32:
temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
break;
default:
spin_unlock(&(image->lock));
dev_err(tsi148_bridge->parent, "Invalid data width\n");
retval = -EINVAL;
goto err_dwidth;
}
/* Setup address space */
temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
switch (aspace) {
case VME_A16:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
break;
case VME_A24:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
break;
case VME_A32:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
break;
case VME_A64:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
break;
case VME_CRCSR:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
break;
case VME_USER1:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
break;
case VME_USER2:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
break;
case VME_USER3:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
break;
case VME_USER4:
temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
break;
default:
spin_unlock(&(image->lock));
dev_err(tsi148_bridge->parent, "Invalid address space\n");
retval = -EINVAL;
goto err_aspace;
break;
}
temp_ctl &= ~(3<<4);
if (cycle & VME_SUPER)
temp_ctl |= TSI148_LCSR_OTAT_SUP;
if (cycle & VME_PROG)
temp_ctl |= TSI148_LCSR_OTAT_PGM;
/* Setup mapping */
iowrite32be(pci_base_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
iowrite32be(pci_base_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
iowrite32be(pci_bound_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
iowrite32be(pci_bound_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
iowrite32be(vme_offset_high, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
iowrite32be(vme_offset_low, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Write ctl reg without enable */
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
if (enabled)
temp_ctl |= TSI148_LCSR_OTAT_EN;
iowrite32be(temp_ctl, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
spin_unlock(&(image->lock));
return 0;
err_aspace:
err_dwidth:
err_gran:
tsi148_free_resource(image);
err_res:
err_window:
return retval;
}
/*
* Set the attributes of an outbound window.
*
* XXX Not parsing prefetch information.
*/
int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
unsigned int pci_bound_low, pci_bound_high;
unsigned int vme_offset_low, vme_offset_high;
unsigned long long pci_base, pci_bound, vme_offset;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
i = image->number;
ctl = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
pci_base_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_base_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
pci_bound_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAU);
pci_bound_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTEAL);
vme_offset_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFU);
vme_offset_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTOFL);
/* Convert 64-bit variables to 2x 32-bit variables */
reg_join(pci_base_high, pci_base_low, &pci_base);
reg_join(pci_bound_high, pci_bound_low, &pci_bound);
reg_join(vme_offset_high, vme_offset_low, &vme_offset);
*vme_base = pci_base + vme_offset;
*size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
*enabled = 0;
*aspace = 0;
*cycle = 0;
*dwidth = 0;
if (ctl & TSI148_LCSR_OTAT_EN)
*enabled = 1;
/* Setup address space */
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
*aspace |= VME_A16;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
*aspace |= VME_A24;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
*aspace |= VME_A32;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
*aspace |= VME_A64;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
*aspace |= VME_CRCSR;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
*aspace |= VME_USER1;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
*aspace |= VME_USER2;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
*aspace |= VME_USER3;
if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
*aspace |= VME_USER4;
/* Setup 2eSST speeds */
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
*cycle |= VME_2eSST160;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
*cycle |= VME_2eSST267;
if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
*cycle |= VME_2eSST320;
/* Setup cycle types */
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_SCT)
*cycle |= VME_SCT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_BLT)
*cycle |= VME_BLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_MBLT)
*cycle |= VME_MBLT;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eVME)
*cycle |= VME_2eVME;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSST)
*cycle |= VME_2eSST;
if ((ctl & TSI148_LCSR_OTAT_TM_M) == TSI148_LCSR_OTAT_TM_2eSSTB)
*cycle |= VME_2eSSTB;
if (ctl & TSI148_LCSR_OTAT_SUP)
*cycle |= VME_SUPER;
else
*cycle |= VME_USER;
if (ctl & TSI148_LCSR_OTAT_PGM)
*cycle |= VME_PROG;
else
*cycle |= VME_DATA;
/* Setup data width */
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
*dwidth = VME_D16;
if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
*dwidth = VME_D32;
return 0;
}
int tsi148_master_get(struct vme_master_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
{
int retval;
spin_lock(&(image->lock));
retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
cycle, dwidth);
spin_unlock(&(image->lock));
return retval;
}
ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval, enabled;
unsigned long long vme_base, size;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = image->parent;
spin_lock(&(image->lock));
memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
retval = count;
if (!err_chk)
goto skip_chk;
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
if (vme_err != NULL) {
dev_err(image->parent->parent, "First VME read error detected "
"an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
count);
}
skip_chk:
spin_unlock(&(image->lock));
return retval;
}
ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
size_t count, loff_t offset)
{
int retval = 0, enabled;
unsigned long long vme_base, size;
vme_address_t aspace;
vme_cycle_t cycle;
vme_width_t dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = image->parent;
bridge = tsi148_bridge->driver_priv;
spin_lock(&(image->lock));
memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
retval = count;
/*
* Writes are posted. We need to do a read on the VME bus to flush out
* all of the writes before we check for errors. We can't guarentee
* that reading the data we have just written is safe. It is believed
* that there isn't any read, write re-ordering, so we can read any
* location in VME space, so lets read the Device ID from the tsi148's
* own registers as mapped into CR/CSR space.
*
* We check for saved errors in the written address range/space.
*/
if (!err_chk)
goto skip_chk;
/*
* Get window info first, to maximise the time that the buffers may
* fluch on their own
*/
__tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
&dwidth);
ioread16(bridge->flush_image->kern_base + 0x7F000);
vme_err = tsi148_find_error(tsi148_bridge, aspace, vme_base + offset,
count);
if (vme_err != NULL) {
dev_warn(tsi148_bridge->parent, "First VME write error detected"
" an at address 0x%llx\n", vme_err->address);
retval = vme_err->address - (vme_base + offset);
/* Clear down save errors in this address range */
tsi148_clear_errors(tsi148_bridge, aspace, vme_base + offset,
count);
}
skip_chk:
spin_unlock(&(image->lock));
return retval;
}
/*
* Perform an RMW cycle on the VME bus.
*
* Requires a previously configured master window, returns final value.
*/
unsigned int tsi148_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
unsigned long long pci_addr;
unsigned int pci_addr_high, pci_addr_low;
u32 tmp, result;
int i;
struct tsi148_driver *bridge;
bridge = image->parent->driver_priv;
/* Find the PCI address that maps to the desired VME address */
i = image->number;
/* Locking as we can only do one of these at a time */
mutex_lock(&(bridge->vme_rmw));
/* Lock image */
spin_lock(&(image->lock));
pci_addr_high = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAU);
pci_addr_low = ioread32be(bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTSAL);
reg_join(pci_addr_high, pci_addr_low, &pci_addr);
reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
/* Configure registers */
iowrite32be(mask, bridge->base + TSI148_LCSR_RMWEN);
iowrite32be(compare, bridge->base + TSI148_LCSR_RMWC);
iowrite32be(swap, bridge->base + TSI148_LCSR_RMWS);
iowrite32be(pci_addr_high, bridge->base + TSI148_LCSR_RMWAU);
iowrite32be(pci_addr_low, bridge->base + TSI148_LCSR_RMWAL);
/* Enable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp |= TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
/* Kick process off with a read to the required address. */
result = ioread32be(image->kern_base + offset);
/* Disable RMW */
tmp = ioread32be(bridge->base + TSI148_LCSR_VMCTRL);
tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_VMCTRL);
spin_unlock(&(image->lock));
mutex_unlock(&(bridge->vme_rmw));
return result;
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
*attr |= TSI148_LCSR_DSAT_2eSSTM_160;
break;
case VME_2eSST267:
*attr |= TSI148_LCSR_DSAT_2eSSTM_267;
break;
case VME_2eSST320:
*attr |= TSI148_LCSR_DSAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DSAT_TM_SCT;
if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DSAT_TM_BLT;
if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DSAT_TM_MBLT;
if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DSAT_TM_2eVME;
if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DSAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select "
"Registers\n");
*attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
*attr |= TSI148_LCSR_DSAT_DBW_16;
break;
case VME_D32:
*attr |= TSI148_LCSR_DSAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
*attr |= TSI148_LCSR_DSAT_AMODE_A16;
break;
case VME_A24:
*attr |= TSI148_LCSR_DSAT_AMODE_A24;
break;
case VME_A32:
*attr |= TSI148_LCSR_DSAT_AMODE_A32;
break;
case VME_A64:
*attr |= TSI148_LCSR_DSAT_AMODE_A64;
break;
case VME_CRCSR:
*attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
break;
case VME_USER1:
*attr |= TSI148_LCSR_DSAT_AMODE_USER1;
break;
case VME_USER2:
*attr |= TSI148_LCSR_DSAT_AMODE_USER2;
break;
case VME_USER3:
*attr |= TSI148_LCSR_DSAT_AMODE_USER3;
break;
case VME_USER4:
*attr |= TSI148_LCSR_DSAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
*attr |= TSI148_LCSR_DSAT_SUP;
if (cycle & VME_PROG)
*attr |= TSI148_LCSR_DSAT_PGM;
return 0;
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
case VME_2eSST160:
*attr |= TSI148_LCSR_DDAT_2eSSTM_160;
break;
case VME_2eSST267:
*attr |= TSI148_LCSR_DDAT_2eSSTM_267;
break;
case VME_2eSST320:
*attr |= TSI148_LCSR_DDAT_2eSSTM_320;
break;
}
/* Setup cycle types */
if (cycle & VME_SCT)
*attr |= TSI148_LCSR_DDAT_TM_SCT;
if (cycle & VME_BLT)
*attr |= TSI148_LCSR_DDAT_TM_BLT;
if (cycle & VME_MBLT)
*attr |= TSI148_LCSR_DDAT_TM_MBLT;
if (cycle & VME_2eVME)
*attr |= TSI148_LCSR_DDAT_TM_2eVME;
if (cycle & VME_2eSST)
*attr |= TSI148_LCSR_DDAT_TM_2eSST;
if (cycle & VME_2eSSTB) {
dev_err(dev, "Currently not setting Broadcast Select "
"Registers\n");
*attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
}
/* Setup data width */
switch (dwidth) {
case VME_D16:
*attr |= TSI148_LCSR_DDAT_DBW_16;
break;
case VME_D32:
*attr |= TSI148_LCSR_DDAT_DBW_32;
break;
default:
dev_err(dev, "Invalid data width\n");
return -EINVAL;
}
/* Setup address space */
switch (aspace) {
case VME_A16:
*attr |= TSI148_LCSR_DDAT_AMODE_A16;
break;
case VME_A24:
*attr |= TSI148_LCSR_DDAT_AMODE_A24;
break;
case VME_A32:
*attr |= TSI148_LCSR_DDAT_AMODE_A32;
break;
case VME_A64:
*attr |= TSI148_LCSR_DDAT_AMODE_A64;
break;
case VME_CRCSR:
*attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
break;
case VME_USER1:
*attr |= TSI148_LCSR_DDAT_AMODE_USER1;
break;
case VME_USER2:
*attr |= TSI148_LCSR_DDAT_AMODE_USER2;
break;
case VME_USER3:
*attr |= TSI148_LCSR_DDAT_AMODE_USER3;
break;
case VME_USER4:
*attr |= TSI148_LCSR_DDAT_AMODE_USER4;
break;
default:
dev_err(dev, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
*attr |= TSI148_LCSR_DDAT_SUP;
if (cycle & VME_PROG)
*attr |= TSI148_LCSR_DDAT_PGM;
return 0;
}
/*
* Add a link list descriptor to the list
*/
int tsi148_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
struct vme_dma_attr *dest, size_t count)
{
struct tsi148_dma_entry *entry, *prev;
u32 address_high, address_low;
struct vme_dma_pattern *pattern_attr;
struct vme_dma_pci *pci_attr;
struct vme_dma_vme *vme_attr;
dma_addr_t desc_ptr;
int retval = 0;
struct vme_bridge *tsi148_bridge;
tsi148_bridge = list->parent->parent;
/* Descriptor must be aligned on 64-bit boundaries */
entry = kmalloc(sizeof(struct tsi148_dma_entry), GFP_KERNEL);
if (entry == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_mem;
}
/* Test descriptor alignment */
if ((unsigned long)&(entry->descriptor) & 0x7) {
dev_err(tsi148_bridge->parent, "Descriptor not aligned to 8 "
"byte boundary as required: %p\n",
&(entry->descriptor));
retval = -EINVAL;
goto err_align;
}
/* Given we are going to fill out the structure, we probably don't
* need to zero it, but better safe than sorry for now.
*/
memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
/* Fill out source part */
switch (src->type) {
case VME_DMA_PATTERN:
pattern_attr = (struct vme_dma_pattern *)src->private;
entry->descriptor.dsal = pattern_attr->pattern;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
/* Default behaviour is 32 bit pattern */
if (pattern_attr->type & VME_DMA_PATTERN_BYTE)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
/* It seems that the default behaviour is to increment */
if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0)
entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
break;
case VME_DMA_PCI:
pci_attr = (struct vme_dma_pci *)src->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = address_high;
entry->descriptor.dsal = address_low;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
break;
case VME_DMA_VME:
vme_attr = (struct vme_dma_vme *)src->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.dsau = address_high;
entry->descriptor.dsal = address_low;
entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
retval = tsi148_dma_set_vme_src_attributes(
tsi148_bridge->parent, &(entry->descriptor.dsat),
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_source;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid source type\n");
retval = -EINVAL;
goto err_source;
break;
}
/* Assume last link - this will be over-written by adding another */
entry->descriptor.dnlau = 0;
entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
/* Fill out destination part */
switch (dest->type) {
case VME_DMA_PCI:
pci_attr = (struct vme_dma_pci *)dest->private;
reg_split((unsigned long long)pci_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = address_high;
entry->descriptor.ddal = address_low;
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
break;
case VME_DMA_VME:
vme_attr = (struct vme_dma_vme *)dest->private;
reg_split((unsigned long long)vme_attr->address, &address_high,
&address_low);
entry->descriptor.ddau = address_high;
entry->descriptor.ddal = address_low;
entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
retval = tsi148_dma_set_vme_dest_attributes(
tsi148_bridge->parent, &(entry->descriptor.ddat),
vme_attr->aspace, vme_attr->cycle, vme_attr->dwidth);
if (retval < 0)
goto err_dest;
break;
default:
dev_err(tsi148_bridge->parent, "Invalid destination type\n");
retval = -EINVAL;
goto err_dest;
break;
}
/* Fill out count */
entry->descriptor.dcnt = (u32)count;
/* Add to list */
list_add_tail(&(entry->list), &(list->entries));
/* Fill out previous descriptors "Next Address" */
if (entry->list.prev != &(list->entries)) {
prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
list);
/* We need the bus address for the pointer */
desc_ptr = virt_to_bus(&(entry->descriptor));
reg_split(desc_ptr, &(prev->descriptor.dnlau),
&(prev->descriptor.dnlal));
}
return 0;
err_dest:
err_source:
err_align:
kfree(entry);
err_mem:
return retval;
}
/*
* Check to see if the provided DMA channel is busy.
*/
static int tsi148_dma_busy(struct vme_bridge *tsi148_bridge, int channel)
{
u32 tmp;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
tmp = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (tmp & TSI148_LCSR_DSTA_BSY)
return 0;
else
return 1;
}
/*
* Execute a previously generated link list
*
* XXX Need to provide control register configuration.
*/
int tsi148_dma_list_exec(struct vme_dma_list *list)
{
struct vme_dma_resource *ctrlr;
int channel, retval = 0;
struct tsi148_dma_entry *entry;
dma_addr_t bus_addr;
u32 bus_addr_high, bus_addr_low;
u32 val, dctlreg = 0;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
ctrlr = list->parent;
tsi148_bridge = ctrlr->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&(ctrlr->mtx));
channel = ctrlr->number;
if (!list_empty(&(ctrlr->running))) {
/*
* XXX We have an active DMA transfer and currently haven't
* sorted out the mechanism for "pending" DMA transfers.
* Return busy.
*/
/* Need to add to pending here */
mutex_unlock(&(ctrlr->mtx));
return -EBUSY;
} else {
list_add(&(list->list), &(ctrlr->running));
}
/* Get first bus address and write into registers */
entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
list);
bus_addr = virt_to_bus(&(entry->descriptor));
mutex_unlock(&(ctrlr->mtx));
reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
iowrite32be(bus_addr_high, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
iowrite32be(bus_addr_low, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
/* Start the operation */
iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, bridge->base +
TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
wait_event_interruptible(bridge->dma_queue[channel],
tsi148_dma_busy(ctrlr->parent, channel));
/*
* Read status register, this register is valid until we kick off a
* new transfer.
*/
val = ioread32be(bridge->base + TSI148_LCSR_DMA[channel] +
TSI148_LCSR_OFFSET_DSTA);
if (val & TSI148_LCSR_DSTA_VBE) {
dev_err(tsi148_bridge->parent, "DMA Error. DSTA=%08X\n", val);
retval = -EIO;
}
/* Remove list from running list */
mutex_lock(&(ctrlr->mtx));
list_del(&(list->list));
mutex_unlock(&(ctrlr->mtx));
return retval;
}
/*
* Clean up a previously generated link list
*
* We have a separate function, don't assume that the chain can't be reused.
*/
int tsi148_dma_list_empty(struct vme_dma_list *list)
{
struct list_head *pos, *temp;
struct tsi148_dma_entry *entry;
/* detach and free each entry */
list_for_each_safe(pos, temp, &(list->entries)) {
list_del(pos);
entry = list_entry(pos, struct tsi148_dma_entry, list);
kfree(entry);
}
return 0;
}
/*
* All 4 location monitors reside at the same base - this is therefore a
* system wide configuration.
*
* This does not enable the LM monitor - that should be done when the first
* callback is attached and disabled when the last callback is removed.
*/
int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
vme_address_t aspace, vme_cycle_t cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&(lm->mtx));
/* If we already have a callback attached, we can't move it! */
for (i = 0; i < lm->monitors; i++) {
if (bridge->lm_callback[i] != NULL) {
mutex_unlock(&(lm->mtx));
dev_err(tsi148_bridge->parent, "Location monitor "
"callback attached, can't reset\n");
return -EBUSY;
}
}
switch (aspace) {
case VME_A16:
lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
break;
case VME_A24:
lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
break;
case VME_A32:
lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
break;
case VME_A64:
lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
break;
default:
mutex_unlock(&(lm->mtx));
dev_err(tsi148_bridge->parent, "Invalid address space\n");
return -EINVAL;
break;
}
if (cycle & VME_SUPER)
lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
if (cycle & VME_USER)
lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
if (cycle & VME_PROG)
lm_ctl |= TSI148_LCSR_LMAT_PGM;
if (cycle & VME_DATA)
lm_ctl |= TSI148_LCSR_LMAT_DATA;
reg_split(lm_base, &lm_base_high, &lm_base_low);
iowrite32be(lm_base_high, bridge->base + TSI148_LCSR_LMBAU);
iowrite32be(lm_base_low, bridge->base + TSI148_LCSR_LMBAL);
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
mutex_unlock(&(lm->mtx));
return 0;
}
/* Get configuration of the callback monitor and return whether it is enabled
* or disabled.
*/
int tsi148_lm_get(struct vme_lm_resource *lm, unsigned long long *lm_base,
vme_address_t *aspace, vme_cycle_t *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&(lm->mtx));
lm_base_high = ioread32be(bridge->base + TSI148_LCSR_LMBAU);
lm_base_low = ioread32be(bridge->base + TSI148_LCSR_LMBAL);
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
reg_join(lm_base_high, lm_base_low, lm_base);
if (lm_ctl & TSI148_LCSR_LMAT_EN)
enabled = 1;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16)
*aspace |= VME_A16;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24)
*aspace |= VME_A24;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32)
*aspace |= VME_A32;
if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64)
*aspace |= VME_A64;
if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
*cycle |= VME_SUPER;
if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
*cycle |= VME_USER;
if (lm_ctl & TSI148_LCSR_LMAT_PGM)
*cycle |= VME_PROG;
if (lm_ctl & TSI148_LCSR_LMAT_DATA)
*cycle |= VME_DATA;
mutex_unlock(&(lm->mtx));
return enabled;
}
/*
* Attach a callback to a specific location monitor.
*
* Callback will be passed the monitor triggered.
*/
int tsi148_lm_attach(struct vme_lm_resource *lm, int monitor,
void (*callback)(int))
{
u32 lm_ctl, tmp;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *bridge;
tsi148_bridge = lm->parent;
bridge = tsi148_bridge->driver_priv;
mutex_lock(&(lm->mtx));
/* Ensure that the location monitor is configured - need PGM or DATA */
lm_ctl = ioread32be(bridge->base + TSI148_LCSR_LMAT);
if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
mutex_unlock(&(lm->mtx));
dev_err(tsi148_bridge->parent, "Location monitor not properly "
"configured\n");
return -EINVAL;
}
/* Check that a callback isn't already attached */
if (bridge->lm_callback[monitor] != NULL) {
mutex_unlock(&(lm->mtx));
dev_err(tsi148_bridge->parent, "Existing callback attached\n");
return -EBUSY;
}
/* Attach callback */
bridge->lm_callback[monitor] = callback;
/* Enable Location Monitor interrupt */
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEN);
tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
/* Ensure that global Location Monitor Enable set */
if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
lm_ctl |= TSI148_LCSR_LMAT_EN;
iowrite32be(lm_ctl, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&(lm->mtx));
return 0;
}
/*
* Detach a callback function forn a specific location monitor.
*/
int tsi148_lm_detach(struct vme_lm_resource *lm, int monitor)
{
u32 lm_en, tmp;
struct tsi148_driver *bridge;
bridge = lm->parent->driver_priv;
mutex_lock(&(lm->mtx));
/* Disable Location Monitor and ensure previous interrupts are clear */
lm_en = ioread32be(bridge->base + TSI148_LCSR_INTEN);
lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
iowrite32be(lm_en, bridge->base + TSI148_LCSR_INTEN);
tmp = ioread32be(bridge->base + TSI148_LCSR_INTEO);
tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
bridge->base + TSI148_LCSR_INTC);
/* Detach callback */
bridge->lm_callback[monitor] = NULL;
/* If all location monitors disabled, disable global Location Monitor */
if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
tmp = ioread32be(bridge->base + TSI148_LCSR_LMAT);
tmp &= ~TSI148_LCSR_LMAT_EN;
iowrite32be(tmp, bridge->base + TSI148_LCSR_LMAT);
}
mutex_unlock(&(lm->mtx));
return 0;
}
/*
* Determine Geographical Addressing
*/
int tsi148_slot_get(struct vme_bridge *tsi148_bridge)
{
u32 slot = 0;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
if (!geoid) {
slot = ioread32be(bridge->base + TSI148_LCSR_VSTAT);
slot = slot & TSI148_LCSR_VSTAT_GA_M;
} else
slot = geoid;
return (int)slot;
}
static int __init tsi148_init(void)
{
return pci_register_driver(&tsi148_driver);
}
/*
* Configure CR/CSR space
*
* Access to the CR/CSR can be configured at power-up. The location of the
* CR/CSR registers in the CR/CSR address space is determined by the boards
* Auto-ID or Geographic address. This function ensures that the window is
* enabled at an offset consistent with the boards geopgraphic address.
*
* Each board has a 512kB window, with the highest 4kB being used for the
* boards registers, this means there is a fix length 508kB window which must
* be mapped onto PCI memory.
*/
static int tsi148_crcsr_init(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 cbar, crat, vstat;
u32 crcsr_bus_high, crcsr_bus_low;
int retval;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Allocate mem for CR/CSR image */
bridge->crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
&(bridge->crcsr_bus));
if (bridge->crcsr_kernel == NULL) {
dev_err(tsi148_bridge->parent, "Failed to allocate memory for "
"CR/CSR image\n");
return -ENOMEM;
}
memset(bridge->crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
reg_split(bridge->crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
iowrite32be(crcsr_bus_high, bridge->base + TSI148_LCSR_CROU);
iowrite32be(crcsr_bus_low, bridge->base + TSI148_LCSR_CROL);
/* Ensure that the CR/CSR is configured at the correct offset */
cbar = ioread32be(bridge->base + TSI148_CBAR);
cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
vstat = tsi148_slot_get(tsi148_bridge);
if (cbar != vstat) {
cbar = vstat;
dev_info(tsi148_bridge->parent, "Setting CR/CSR offset\n");
iowrite32be(cbar<<3, bridge->base + TSI148_CBAR);
}
dev_info(tsi148_bridge->parent, "CR/CSR Offset: %d\n", cbar);
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
if (crat & TSI148_LCSR_CRAT_EN) {
dev_info(tsi148_bridge->parent, "Enabling CR/CSR space\n");
iowrite32be(crat | TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
} else
dev_info(tsi148_bridge->parent, "CR/CSR already enabled\n");
/* If we want flushed, error-checked writes, set up a window
* over the CR/CSR registers. We read from here to safely flush
* through VME writes.
*/
if (err_chk) {
retval = tsi148_master_set(bridge->flush_image, 1,
(vstat * 0x80000), 0x80000, VME_CRCSR, VME_SCT,
VME_D16);
if (retval)
dev_err(tsi148_bridge->parent, "Configuring flush image"
" failed\n");
}
return 0;
}
static void tsi148_crcsr_exit(struct vme_bridge *tsi148_bridge,
struct pci_dev *pdev)
{
u32 crat;
struct tsi148_driver *bridge;
bridge = tsi148_bridge->driver_priv;
/* Turn off CR/CSR space */
crat = ioread32be(bridge->base + TSI148_LCSR_CRAT);
iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
bridge->base + TSI148_LCSR_CRAT);
/* Free image */
iowrite32be(0, bridge->base + TSI148_LCSR_CROU);
iowrite32be(0, bridge->base + TSI148_LCSR_CROL);
pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, bridge->crcsr_kernel,
bridge->crcsr_bus);
}
static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval, i, master_num;
u32 data;
struct list_head *pos = NULL;
struct vme_bridge *tsi148_bridge;
struct tsi148_driver *tsi148_device;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
struct vme_lm_resource *lm;
/* If we want to support more than one of each bridge, we need to
* dynamically generate this so we get one per device
*/
tsi148_bridge = kzalloc(sizeof(struct vme_bridge), GFP_KERNEL);
if (tsi148_bridge == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_struct;
}
tsi148_device = kzalloc(sizeof(struct tsi148_driver), GFP_KERNEL);
if (tsi148_device == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for device "
"structure\n");
retval = -ENOMEM;
goto err_driver;
}
tsi148_bridge->driver_priv = tsi148_device;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err_enable;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* map registers in BAR 0 */
tsi148_device->base = ioremap_nocache(pci_resource_start(pdev, 0),
4096);
if (!tsi148_device->base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Check to see if the mapping worked out */
data = ioread32(tsi148_device->base + TSI148_PCFS_ID) & 0x0000FFFF;
if (data != PCI_VENDOR_ID_TUNDRA) {
dev_err(&pdev->dev, "CRG region check failed\n");
retval = -EIO;
goto err_test;
}
/* Initialize wait queues & mutual exclusion flags */
init_waitqueue_head(&(tsi148_device->dma_queue[0]));
init_waitqueue_head(&(tsi148_device->dma_queue[1]));
init_waitqueue_head(&(tsi148_device->iack_queue));
mutex_init(&(tsi148_device->vme_int));
mutex_init(&(tsi148_device->vme_rmw));
tsi148_bridge->parent = &(pdev->dev);
strcpy(tsi148_bridge->name, driver_name);
/* Setup IRQ */
retval = tsi148_irq_init(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Initialization failed.\n");
goto err_irq;
}
/* If we are going to flush writes, we need to read from the VME bus.
* We need to do this safely, thus we read the devices own CR/CSR
* register. To do this we must set up a window in CR/CSR space and
* hence have one less master window resource available.
*/
master_num = TSI148_MAX_MASTER;
if (err_chk) {
master_num--;
tsi148_device->flush_image =
kmalloc(sizeof(struct vme_master_resource), GFP_KERNEL);
if (tsi148_device->flush_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"flush resource structure\n");
retval = -ENOMEM;
goto err_master;
}
tsi148_device->flush_image->parent = tsi148_bridge;
spin_lock_init(&(tsi148_device->flush_image->lock));
tsi148_device->flush_image->locked = 1;
tsi148_device->flush_image->number = master_num;
tsi148_device->flush_image->address_attr = VME_A16 | VME_A24 |
VME_A32 | VME_A64;
tsi148_device->flush_image->cycle_attr = VME_SCT | VME_BLT |
VME_MBLT | VME_2eVME | VME_2eSST | VME_2eSSTB |
VME_2eSST160 | VME_2eSST267 | VME_2eSST320 | VME_SUPER |
VME_USER | VME_PROG | VME_DATA;
tsi148_device->flush_image->width_attr = VME_D16 | VME_D32;
memset(&(tsi148_device->flush_image->bus_resource), 0,
sizeof(struct resource));
tsi148_device->flush_image->kern_base = NULL;
}
/* Add master windows to list */
INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
for (i = 0; i < master_num; i++) {
master_image = kmalloc(sizeof(struct vme_master_resource),
GFP_KERNEL);
if (master_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"master resource structure\n");
retval = -ENOMEM;
goto err_master;
}
master_image->parent = tsi148_bridge;
spin_lock_init(&(master_image->lock));
master_image->locked = 0;
master_image->number = i;
master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64;
master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
master_image->width_attr = VME_D16 | VME_D32;
memset(&(master_image->bus_resource), 0,
sizeof(struct resource));
master_image->kern_base = NULL;
list_add_tail(&(master_image->list),
&(tsi148_bridge->master_resources));
}
/* Add slave windows to list */
INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
for (i = 0; i < TSI148_MAX_SLAVE; i++) {
slave_image = kmalloc(sizeof(struct vme_slave_resource),
GFP_KERNEL);
if (slave_image == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"slave resource structure\n");
retval = -ENOMEM;
goto err_slave;
}
slave_image->parent = tsi148_bridge;
mutex_init(&(slave_image->mtx));
slave_image->locked = 0;
slave_image->number = i;
slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
VME_USER3 | VME_USER4;
slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
VME_PROG | VME_DATA;
list_add_tail(&(slave_image->list),
&(tsi148_bridge->slave_resources));
}
/* Add dma engines to list */
INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
for (i = 0; i < TSI148_MAX_DMA; i++) {
dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
GFP_KERNEL);
if (dma_ctrlr == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"dma resource structure\n");
retval = -ENOMEM;
goto err_dma;
}
dma_ctrlr->parent = tsi148_bridge;
mutex_init(&(dma_ctrlr->mtx));
dma_ctrlr->locked = 0;
dma_ctrlr->number = i;
dma_ctrlr->route_attr = VME_DMA_VME_TO_MEM |
VME_DMA_MEM_TO_VME | VME_DMA_VME_TO_VME |
VME_DMA_MEM_TO_MEM | VME_DMA_PATTERN_TO_VME |
VME_DMA_PATTERN_TO_MEM;
INIT_LIST_HEAD(&(dma_ctrlr->pending));
INIT_LIST_HEAD(&(dma_ctrlr->running));
list_add_tail(&(dma_ctrlr->list),
&(tsi148_bridge->dma_resources));
}
/* Add location monitor to list */
INIT_LIST_HEAD(&(tsi148_bridge->lm_resources));
lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
if (lm == NULL) {
dev_err(&pdev->dev, "Failed to allocate memory for "
"location monitor resource structure\n");
retval = -ENOMEM;
goto err_lm;
}
lm->parent = tsi148_bridge;
mutex_init(&(lm->mtx));
lm->locked = 0;
lm->number = 1;
lm->monitors = 4;
list_add_tail(&(lm->list), &(tsi148_bridge->lm_resources));
tsi148_bridge->slave_get = tsi148_slave_get;
tsi148_bridge->slave_set = tsi148_slave_set;
tsi148_bridge->master_get = tsi148_master_get;
tsi148_bridge->master_set = tsi148_master_set;
tsi148_bridge->master_read = tsi148_master_read;
tsi148_bridge->master_write = tsi148_master_write;
tsi148_bridge->master_rmw = tsi148_master_rmw;
tsi148_bridge->dma_list_add = tsi148_dma_list_add;
tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
tsi148_bridge->irq_set = tsi148_irq_set;
tsi148_bridge->irq_generate = tsi148_irq_generate;
tsi148_bridge->lm_set = tsi148_lm_set;
tsi148_bridge->lm_get = tsi148_lm_get;
tsi148_bridge->lm_attach = tsi148_lm_attach;
tsi148_bridge->lm_detach = tsi148_lm_detach;
tsi148_bridge->slot_get = tsi148_slot_get;
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
dev_info(&pdev->dev, "Board is%s the VME system controller\n",
(data & TSI148_LCSR_VSTAT_SCONS) ? "" : " not");
if (!geoid)
dev_info(&pdev->dev, "VME geographical address is %d\n",
data & TSI148_LCSR_VSTAT_GA_M);
else
dev_info(&pdev->dev, "VME geographical address is set to %d\n",
geoid);
dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
err_chk ? "enabled" : "disabled");
if (tsi148_crcsr_init(tsi148_bridge, pdev)) {
dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
goto err_crcsr;
}
retval = vme_register_bridge(tsi148_bridge);
if (retval != 0) {
dev_err(&pdev->dev, "Chip Registration failed.\n");
goto err_reg;
}
pci_set_drvdata(pdev, tsi148_bridge);
/* Clear VME bus "board fail", and "power-up reset" lines */
data = ioread32be(tsi148_device->base + TSI148_LCSR_VSTAT);
data &= ~TSI148_LCSR_VSTAT_BRDFL;
data |= TSI148_LCSR_VSTAT_CPURST;
iowrite32be(data, tsi148_device->base + TSI148_LCSR_VSTAT);
return 0;
vme_unregister_bridge(tsi148_bridge);
err_reg:
tsi148_crcsr_exit(tsi148_bridge, pdev);
err_crcsr:
err_lm:
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->lm_resources)) {
lm = list_entry(pos, struct vme_lm_resource, list);
list_del(pos);
kfree(lm);
}
err_dma:
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->dma_resources)) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
err_slave:
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->slave_resources)) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
err_master:
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->master_resources)) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
tsi148_irq_exit(tsi148_device, pdev);
err_irq:
err_test:
iounmap(tsi148_device->base);
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err_enable:
kfree(tsi148_device);
err_driver:
kfree(tsi148_bridge);
err_struct:
return retval;
}
static void tsi148_remove(struct pci_dev *pdev)
{
struct list_head *pos = NULL;
struct vme_master_resource *master_image;
struct vme_slave_resource *slave_image;
struct vme_dma_resource *dma_ctrlr;
int i;
struct tsi148_driver *bridge;
struct vme_bridge *tsi148_bridge = pci_get_drvdata(pdev);
bridge = tsi148_bridge->driver_priv;
dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
/*
* Shutdown all inbound and outbound windows.
*/
for (i = 0; i < 8; i++) {
iowrite32be(0, bridge->base + TSI148_LCSR_IT[i] +
TSI148_LCSR_OFFSET_ITAT);
iowrite32be(0, bridge->base + TSI148_LCSR_OT[i] +
TSI148_LCSR_OFFSET_OTAT);
}
/*
* Shutdown Location monitor.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_LMAT);
/*
* Shutdown CRG map.
*/
iowrite32be(0, bridge->base + TSI148_LCSR_CSRAT);
/*
* Clear error status.
*/
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_EDPAT);
iowrite32be(0xFFFFFFFF, bridge->base + TSI148_LCSR_VEAT);
iowrite32be(0x07000700, bridge->base + TSI148_LCSR_PSTAT);
/*
* Remove VIRQ interrupt (if any)
*/
if (ioread32be(bridge->base + TSI148_LCSR_VICR) & 0x800)
iowrite32be(0x8000, bridge->base + TSI148_LCSR_VICR);
/*
* Map all Interrupts to PCI INTA
*/
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM1);
iowrite32be(0x0, bridge->base + TSI148_LCSR_INTM2);
tsi148_irq_exit(bridge, pdev);
vme_unregister_bridge(tsi148_bridge);
tsi148_crcsr_exit(tsi148_bridge, pdev);
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->dma_resources)) {
dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
list_del(pos);
kfree(dma_ctrlr);
}
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->slave_resources)) {
slave_image = list_entry(pos, struct vme_slave_resource, list);
list_del(pos);
kfree(slave_image);
}
/* resources are stored in link list */
list_for_each(pos, &(tsi148_bridge->master_resources)) {
master_image = list_entry(pos, struct vme_master_resource,
list);
list_del(pos);
kfree(master_image);
}
tsi148_irq_exit(bridge, pdev);
iounmap(bridge->base);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(tsi148_bridge->driver_priv);
kfree(tsi148_bridge);
}
static void __exit tsi148_exit(void)
{
pci_unregister_driver(&tsi148_driver);
}
MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
module_param(err_chk, bool, 0);
MODULE_PARM_DESC(geoid, "Override geographical addressing");
module_param(geoid, int, 0);
MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
MODULE_LICENSE("GPL");
module_init(tsi148_init);
module_exit(tsi148_exit);
|
gpl-2.0
|
playfulgod/Kernel_AS85-LG-Ignite
|
fs/xfs/xfs_buf_item.c
|
761
|
31868
|
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_buf_item.h"
#include "xfs_trans_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
kmem_zone_t *xfs_buf_item_zone;
#ifdef XFS_TRANS_DEBUG
/*
* This function uses an alternate strategy for tracking the bytes
* that the user requests to be logged. This can then be used
* in conjunction with the bli_orig array in the buf log item to
* catch bugs in our callers' code.
*
* We also double check the bits set in xfs_buf_item_log using a
* simple algorithm to check that every byte is accounted for.
*/
STATIC void
xfs_buf_item_log_debug(
xfs_buf_log_item_t *bip,
uint first,
uint last)
{
uint x;
uint byte;
uint nbytes;
uint chunk_num;
uint word_num;
uint bit_num;
uint bit_set;
uint *wordp;
ASSERT(bip->bli_logged != NULL);
byte = first;
nbytes = last - first + 1;
bfset(bip->bli_logged, first, nbytes);
for (x = 0; x < nbytes; x++) {
chunk_num = byte >> XFS_BLF_SHIFT;
word_num = chunk_num >> BIT_TO_WORD_SHIFT;
bit_num = chunk_num & (NBWORD - 1);
wordp = &(bip->bli_format.blf_data_map[word_num]);
bit_set = *wordp & (1 << bit_num);
ASSERT(bit_set);
byte++;
}
}
/*
* This function is called when we flush something into a buffer without
* logging it. This happens for things like inodes which are logged
* separately from the buffer.
*/
void
xfs_buf_item_flush_log_debug(
xfs_buf_t *bp,
uint first,
uint last)
{
xfs_buf_log_item_t *bip;
uint nbytes;
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) {
return;
}
ASSERT(bip->bli_logged != NULL);
nbytes = last - first + 1;
bfset(bip->bli_logged, first, nbytes);
}
/*
* This function is called to verify that our callers have logged
* all the bytes that they changed.
*
* It does this by comparing the original copy of the buffer stored in
* the buf log item's bli_orig array to the current copy of the buffer
* and ensuring that all bytes which mismatch are set in the bli_logged
* array of the buf log item.
*/
STATIC void
xfs_buf_item_log_check(
xfs_buf_log_item_t *bip)
{
char *orig;
char *buffer;
int x;
xfs_buf_t *bp;
ASSERT(bip->bli_orig != NULL);
ASSERT(bip->bli_logged != NULL);
bp = bip->bli_buf;
ASSERT(XFS_BUF_COUNT(bp) > 0);
ASSERT(XFS_BUF_PTR(bp) != NULL);
orig = bip->bli_orig;
buffer = XFS_BUF_PTR(bp);
for (x = 0; x < XFS_BUF_COUNT(bp); x++) {
if (orig[x] != buffer[x] && !btst(bip->bli_logged, x))
cmn_err(CE_PANIC,
"xfs_buf_item_log_check bip %x buffer %x orig %x index %d",
bip, bp, orig, x);
}
}
#else
#define xfs_buf_item_log_debug(x,y,z)
#define xfs_buf_item_log_check(x)
#endif
STATIC void xfs_buf_error_relse(xfs_buf_t *bp);
STATIC void xfs_buf_do_callbacks(xfs_buf_t *bp, xfs_log_item_t *lip);
/*
* This returns the number of log iovecs needed to log the
* given buf log item.
*
* It calculates this as 1 iovec for the buf log format structure
* and 1 for each stretch of non-contiguous chunks to be logged.
* Contiguous chunks are logged in a single iovec.
*
* If the XFS_BLI_STALE flag has been set, then log nothing.
*/
STATIC uint
xfs_buf_item_size(
xfs_buf_log_item_t *bip)
{
uint nvecs;
int next_bit;
int last_bit;
xfs_buf_t *bp;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
/*
* The buffer is stale, so all we need to log
* is the buf log format structure with the
* cancel flag in it.
*/
trace_xfs_buf_item_size_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
return 1;
}
bp = bip->bli_buf;
ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
nvecs = 1;
last_bit = xfs_next_bit(bip->bli_format.blf_data_map,
bip->bli_format.blf_map_size, 0);
ASSERT(last_bit != -1);
nvecs++;
while (last_bit != -1) {
/*
* This takes the bit number to start looking from and
* returns the next set bit from there. It returns -1
* if there are no more bits set or the start bit is
* beyond the end of the bitmap.
*/
next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
bip->bli_format.blf_map_size,
last_bit + 1);
/*
* If we run out of bits, leave the loop,
* else if we find a new set of bits bump the number of vecs,
* else keep scanning the current set of bits.
*/
if (next_bit == -1) {
last_bit = -1;
} else if (next_bit != last_bit + 1) {
last_bit = next_bit;
nvecs++;
} else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
(xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
XFS_BLF_CHUNK)) {
last_bit = next_bit;
nvecs++;
} else {
last_bit++;
}
}
trace_xfs_buf_item_size(bip);
return nvecs;
}
/*
* This is called to fill in the vector of log iovecs for the
* given log buf item. It fills the first entry with a buf log
* format structure, and the rest point to contiguous chunks
* within the buffer.
*/
STATIC void
xfs_buf_item_format(
xfs_buf_log_item_t *bip,
xfs_log_iovec_t *log_vector)
{
uint base_size;
uint nvecs;
xfs_log_iovec_t *vecp;
xfs_buf_t *bp;
int first_bit;
int last_bit;
int next_bit;
uint nbits;
uint buffer_offset;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
bp = bip->bli_buf;
vecp = log_vector;
/*
* The size of the base structure is the size of the
* declared structure plus the space for the extra words
* of the bitmap. We subtract one from the map size, because
* the first element of the bitmap is accounted for in the
* size of the base structure.
*/
base_size =
(uint)(sizeof(xfs_buf_log_format_t) +
((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
vecp->i_len = base_size;
vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
nvecs = 1;
/*
* If it is an inode buffer, transfer the in-memory state to the
* format flags and clear the in-memory state. We do not transfer
* this state if the inode buffer allocation has not yet been committed
* to the log as setting the XFS_BLI_INODE_BUF flag will prevent
* correct replay of the inode allocation.
*/
if (bip->bli_flags & XFS_BLI_INODE_BUF) {
if (!((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
xfs_log_item_in_current_chkpt(&bip->bli_item)))
bip->bli_format.blf_flags |= XFS_BLF_INODE_BUF;
bip->bli_flags &= ~XFS_BLI_INODE_BUF;
}
if (bip->bli_flags & XFS_BLI_STALE) {
/*
* The buffer is stale, so all we need to log
* is the buf log format structure with the
* cancel flag in it.
*/
trace_xfs_buf_item_format_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
bip->bli_format.blf_size = nvecs;
return;
}
/*
* Fill in an iovec for each set of contiguous chunks.
*/
first_bit = xfs_next_bit(bip->bli_format.blf_data_map,
bip->bli_format.blf_map_size, 0);
ASSERT(first_bit != -1);
last_bit = first_bit;
nbits = 1;
for (;;) {
/*
* This takes the bit number to start looking from and
* returns the next set bit from there. It returns -1
* if there are no more bits set or the start bit is
* beyond the end of the bitmap.
*/
next_bit = xfs_next_bit(bip->bli_format.blf_data_map,
bip->bli_format.blf_map_size,
(uint)last_bit + 1);
/*
* If we run out of bits fill in the last iovec and get
* out of the loop.
* Else if we start a new set of bits then fill in the
* iovec for the series we were looking at and start
* counting the bits in the new one.
* Else we're still in the same set of bits so just
* keep counting and scanning.
*/
if (next_bit == -1) {
buffer_offset = first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
break;
} else if (next_bit != last_bit + 1) {
buffer_offset = first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
vecp++;
first_bit = next_bit;
last_bit = next_bit;
nbits = 1;
} else if (xfs_buf_offset(bp, next_bit << XFS_BLF_SHIFT) !=
(xfs_buf_offset(bp, last_bit << XFS_BLF_SHIFT) +
XFS_BLF_CHUNK)) {
buffer_offset = first_bit * XFS_BLF_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLF_CHUNK;
vecp->i_type = XLOG_REG_TYPE_BCHUNK;
/* You would think we need to bump the nvecs here too, but we do not
* this number is used by recovery, and it gets confused by the boundary
* split here
* nvecs++;
*/
vecp++;
first_bit = next_bit;
last_bit = next_bit;
nbits = 1;
} else {
last_bit++;
nbits++;
}
}
bip->bli_format.blf_size = nvecs;
/*
* Check to make sure everything is consistent.
*/
trace_xfs_buf_item_format(bip);
xfs_buf_item_log_check(bip);
}
/*
* This is called to pin the buffer associated with the buf log item in memory
* so it cannot be written out. Simply call bpin() on the buffer to do this.
*
* We also always take a reference to the buffer log item here so that the bli
* is held while the item is pinned in memory. This means that we can
* unconditionally drop the reference count a transaction holds when the
* transaction is completed.
*/
STATIC void
xfs_buf_item_pin(
xfs_buf_log_item_t *bip)
{
xfs_buf_t *bp;
bp = bip->bli_buf;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
atomic_inc(&bip->bli_refcount);
trace_xfs_buf_item_pin(bip);
xfs_bpin(bp);
}
/*
* This is called to unpin the buffer associated with the buf log
* item which was previously pinned with a call to xfs_buf_item_pin().
* Just call bunpin() on the buffer to do this.
*
* Also drop the reference to the buf item for the current transaction.
* If the XFS_BLI_STALE flag is set and we are the last reference,
* then free up the buf log item and unlock the buffer.
*/
STATIC void
xfs_buf_item_unpin(
xfs_buf_log_item_t *bip)
{
struct xfs_ail *ailp;
xfs_buf_t *bp;
int freed;
int stale = bip->bli_flags & XFS_BLI_STALE;
bp = bip->bli_buf;
ASSERT(bp != NULL);
ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_buf_item_unpin(bip);
freed = atomic_dec_and_test(&bip->bli_refcount);
ailp = bip->bli_item.li_ailp;
xfs_bunpin(bp);
if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
trace_xfs_buf_item_unpin_stale(bip);
/*
* If we get called here because of an IO error, we may
* or may not have the item on the AIL. xfs_trans_ail_delete()
* will take care of that situation.
* xfs_trans_ail_delete() drops the AIL lock.
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
xfs_buf_do_callbacks(bp, (xfs_log_item_t *)bip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
} else {
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
xfs_buf_item_relse(bp);
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
}
xfs_buf_relse(bp);
}
}
/*
* this is called from uncommit in the forced-shutdown path.
* we need to check to see if the reference count on the log item
* is going to drop to zero. If so, unpin will free the log item
* so we need to free the item's descriptor (that points to the item)
* in the transaction.
*/
STATIC void
xfs_buf_item_unpin_remove(
xfs_buf_log_item_t *bip,
xfs_trans_t *tp)
{
/* will xfs_buf_item_unpin() call xfs_buf_item_relse()? */
if ((atomic_read(&bip->bli_refcount) == 1) &&
(bip->bli_flags & XFS_BLI_STALE)) {
/*
* yes -- We can safely do some work here and then call
* buf_item_unpin to do the rest because we are
* are holding the buffer locked so no one else will be
* able to bump up the refcount. We have to remove the
* log item from the transaction as we are about to release
* our reference to the buffer. If we don't, the unlock that
* occurs later in the xfs_trans_uncommit() will try to
* reference the buffer which we no longer have a hold on.
*/
struct xfs_log_item_desc *lidp;
ASSERT(XFS_BUF_VALUSEMA(bip->bli_buf) <= 0);
trace_xfs_buf_item_unpin_stale(bip);
lidp = xfs_trans_find_item(tp, (xfs_log_item_t *)bip);
xfs_trans_free_item(tp, lidp);
/*
* Since the transaction no longer refers to the buffer, the
* buffer should no longer refer to the transaction.
*/
XFS_BUF_SET_FSPRIVATE2(bip->bli_buf, NULL);
}
xfs_buf_item_unpin(bip);
}
/*
* This is called to attempt to lock the buffer associated with this
* buf log item. Don't sleep on the buffer lock. If we can't get
* the lock right away, return 0. If we can get the lock, take a
* reference to the buffer. If this is a delayed write buffer that
* needs AIL help to be written back, invoke the pushbuf routine
* rather than the normal success path.
*/
STATIC uint
xfs_buf_item_trylock(
xfs_buf_log_item_t *bip)
{
xfs_buf_t *bp;
bp = bip->bli_buf;
if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED;
if (!XFS_BUF_CPSEMA(bp))
return XFS_ITEM_LOCKED;
/* take a reference to the buffer. */
XFS_BUF_HOLD(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_trylock(bip);
if (XFS_BUF_ISDELAYWRITE(bp))
return XFS_ITEM_PUSHBUF;
return XFS_ITEM_SUCCESS;
}
/*
* Release the buffer associated with the buf log item. If there is no dirty
* logged data associated with the buffer recorded in the buf log item, then
* free the buf log item and remove the reference to it in the buffer.
*
* This call ignores the recursion count. It is only called when the buffer
* should REALLY be unlocked, regardless of the recursion count.
*
* We unconditionally drop the transaction's reference to the log item. If the
* item was logged, then another reference was taken when it was pinned, so we
* can safely drop the transaction reference now. This also allows us to avoid
* potential races with the unpin code freeing the bli by not referencing the
* bli after we've dropped the reference count.
*
* If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
* if necessary but do not unlock the buffer. This is for support of
* xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
* free the item.
*/
STATIC void
xfs_buf_item_unlock(
xfs_buf_log_item_t *bip)
{
int aborted;
xfs_buf_t *bp;
uint hold;
bp = bip->bli_buf;
/* Clear the buffer's association with this transaction. */
XFS_BUF_SET_FSPRIVATE2(bp, NULL);
/*
* If this is a transaction abort, don't return early. Instead, allow
* the brelse to happen. Normally it would be done for stale
* (cancelled) buffers at unpin time, but we'll never go through the
* pin/unpin cycle if we abort inside commit.
*/
aborted = (bip->bli_item.li_flags & XFS_LI_ABORTED) != 0;
/*
* Before possibly freeing the buf item, determine if we should
* release the buffer at the end of this routine.
*/
hold = bip->bli_flags & XFS_BLI_HOLD;
/* Clear the per transaction state. */
bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD);
/*
* If the buf item is marked stale, then don't do anything. We'll
* unlock the buffer and free the buf item when the buffer is unpinned
* for the last time.
*/
if (bip->bli_flags & XFS_BLI_STALE) {
trace_xfs_buf_item_unlock_stale(bip);
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
if (!aborted) {
atomic_dec(&bip->bli_refcount);
return;
}
}
trace_xfs_buf_item_unlock(bip);
/*
* If the buf item isn't tracking any data, free it, otherwise drop the
* reference we hold to it.
*/
if (xfs_bitmap_empty(bip->bli_format.blf_data_map,
bip->bli_format.blf_map_size))
xfs_buf_item_relse(bp);
else
atomic_dec(&bip->bli_refcount);
if (!hold)
xfs_buf_relse(bp);
}
/*
* This is called to find out where the oldest active copy of the
* buf log item in the on disk log resides now that the last log
* write of it completed at the given lsn.
* We always re-log all the dirty data in a buffer, so usually the
* latest copy in the on disk log is the only one that matters. For
* those cases we simply return the given lsn.
*
* The one exception to this is for buffers full of newly allocated
* inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
* flag set, indicating that only the di_next_unlinked fields from the
* inodes in the buffers will be replayed during recovery. If the
* original newly allocated inode images have not yet been flushed
* when the buffer is so relogged, then we need to make sure that we
* keep the old images in the 'active' portion of the log. We do this
* by returning the original lsn of that transaction here rather than
* the current one.
*/
STATIC xfs_lsn_t
xfs_buf_item_committed(
xfs_buf_log_item_t *bip,
xfs_lsn_t lsn)
{
trace_xfs_buf_item_committed(bip);
if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
(bip->bli_item.li_lsn != 0)) {
return bip->bli_item.li_lsn;
}
return (lsn);
}
/*
* The buffer is locked, but is not a delayed write buffer. This happens
* if we race with IO completion and hence we don't want to try to write it
* again. Just release the buffer.
*/
STATIC void
xfs_buf_item_push(
xfs_buf_log_item_t *bip)
{
xfs_buf_t *bp;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_push(bip);
bp = bip->bli_buf;
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
xfs_buf_relse(bp);
}
/*
* The buffer is locked and is a delayed write buffer. Promote the buffer
* in the delayed write queue as the caller knows that they must invoke
* the xfsbufd to get this buffer written. We have to unlock the buffer
* to allow the xfsbufd to write it, too.
*/
STATIC void
xfs_buf_item_pushbuf(
xfs_buf_log_item_t *bip)
{
xfs_buf_t *bp;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_pushbuf(bip);
bp = bip->bli_buf;
ASSERT(XFS_BUF_ISDELAYWRITE(bp));
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
}
/* ARGSUSED */
STATIC void
xfs_buf_item_committing(xfs_buf_log_item_t *bip, xfs_lsn_t commit_lsn)
{
}
/*
* This is the ops vector shared by all buf log items.
*/
static struct xfs_item_ops xfs_buf_item_ops = {
.iop_size = (uint(*)(xfs_log_item_t*))xfs_buf_item_size,
.iop_format = (void(*)(xfs_log_item_t*, xfs_log_iovec_t*))
xfs_buf_item_format,
.iop_pin = (void(*)(xfs_log_item_t*))xfs_buf_item_pin,
.iop_unpin = (void(*)(xfs_log_item_t*))xfs_buf_item_unpin,
.iop_unpin_remove = (void(*)(xfs_log_item_t*, xfs_trans_t *))
xfs_buf_item_unpin_remove,
.iop_trylock = (uint(*)(xfs_log_item_t*))xfs_buf_item_trylock,
.iop_unlock = (void(*)(xfs_log_item_t*))xfs_buf_item_unlock,
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
.iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committing
};
/*
* Allocate a new buf log item to go with the given buffer.
* Set the buffer's b_fsprivate field to point to the new
* buf log item. If there are other item's attached to the
* buffer (see xfs_buf_attach_iodone() below), then put the
* buf log item at the front.
*/
void
xfs_buf_item_init(
xfs_buf_t *bp,
xfs_mount_t *mp)
{
xfs_log_item_t *lip;
xfs_buf_log_item_t *bip;
int chunks;
int map_size;
/*
* Check to see if there is already a buf log item for
* this buffer. If there is, it is guaranteed to be
* the first. If we do already have one, there is
* nothing to do here so return.
*/
if (bp->b_mount != mp)
bp->b_mount = mp;
XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
if (lip->li_type == XFS_LI_BUF) {
return;
}
}
/*
* chunks is the number of XFS_BLF_CHUNK size pieces
* the buffer can be divided into. Make sure not to
* truncate any pieces. map_size is the size of the
* bitmap needed to describe the chunks of the buffer.
*/
chunks = (int)((XFS_BUF_COUNT(bp) + (XFS_BLF_CHUNK - 1)) >> XFS_BLF_SHIFT);
map_size = (int)((chunks + NBWORD) >> BIT_TO_WORD_SHIFT);
bip = (xfs_buf_log_item_t*)kmem_zone_zalloc(xfs_buf_item_zone,
KM_SLEEP);
xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
bip->bli_buf = bp;
xfs_buf_hold(bp);
bip->bli_format.blf_type = XFS_LI_BUF;
bip->bli_format.blf_blkno = (__int64_t)XFS_BUF_ADDR(bp);
bip->bli_format.blf_len = (ushort)BTOBB(XFS_BUF_COUNT(bp));
bip->bli_format.blf_map_size = map_size;
#ifdef XFS_TRANS_DEBUG
/*
* Allocate the arrays for tracking what needs to be logged
* and what our callers request to be logged. bli_orig
* holds a copy of the original, clean buffer for comparison
* against, and bli_logged keeps a 1 bit flag per byte in
* the buffer to indicate which bytes the callers have asked
* to have logged.
*/
bip->bli_orig = (char *)kmem_alloc(XFS_BUF_COUNT(bp), KM_SLEEP);
memcpy(bip->bli_orig, XFS_BUF_PTR(bp), XFS_BUF_COUNT(bp));
bip->bli_logged = (char *)kmem_zalloc(XFS_BUF_COUNT(bp) / NBBY, KM_SLEEP);
#endif
/*
* Put the buf item into the list of items attached to the
* buffer at the front.
*/
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
bip->bli_item.li_bio_list =
XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
}
XFS_BUF_SET_FSPRIVATE(bp, bip);
}
/*
* Mark bytes first through last inclusive as dirty in the buf
* item's bitmap.
*/
void
xfs_buf_item_log(
xfs_buf_log_item_t *bip,
uint first,
uint last)
{
uint first_bit;
uint last_bit;
uint bits_to_set;
uint bits_set;
uint word_num;
uint *wordp;
uint bit;
uint end_bit;
uint mask;
/*
* Mark the item as having some dirty data for
* quick reference in xfs_buf_item_dirty.
*/
bip->bli_flags |= XFS_BLI_DIRTY;
/*
* Convert byte offsets to bit numbers.
*/
first_bit = first >> XFS_BLF_SHIFT;
last_bit = last >> XFS_BLF_SHIFT;
/*
* Calculate the total number of bits to be set.
*/
bits_to_set = last_bit - first_bit + 1;
/*
* Get a pointer to the first word in the bitmap
* to set a bit in.
*/
word_num = first_bit >> BIT_TO_WORD_SHIFT;
wordp = &(bip->bli_format.blf_data_map[word_num]);
/*
* Calculate the starting bit in the first word.
*/
bit = first_bit & (uint)(NBWORD - 1);
/*
* First set any bits in the first word of our range.
* If it starts at bit 0 of the word, it will be
* set below rather than here. That is what the variable
* bit tells us. The variable bits_set tracks the number
* of bits that have been set so far. End_bit is the number
* of the last bit to be set in this word plus one.
*/
if (bit) {
end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
mask = ((1 << (end_bit - bit)) - 1) << bit;
*wordp |= mask;
wordp++;
bits_set = end_bit - bit;
} else {
bits_set = 0;
}
/*
* Now set bits a whole word at a time that are between
* first_bit and last_bit.
*/
while ((bits_to_set - bits_set) >= NBWORD) {
*wordp |= 0xffffffff;
bits_set += NBWORD;
wordp++;
}
/*
* Finally, set any bits left to be set in one last partial word.
*/
end_bit = bits_to_set - bits_set;
if (end_bit) {
mask = (1 << end_bit) - 1;
*wordp |= mask;
}
xfs_buf_item_log_debug(bip, first, last);
}
/*
* Return 1 if the buffer has some data that has been logged (at any
* point, not just the current transaction) and 0 if not.
*/
uint
xfs_buf_item_dirty(
xfs_buf_log_item_t *bip)
{
return (bip->bli_flags & XFS_BLI_DIRTY);
}
STATIC void
xfs_buf_item_free(
xfs_buf_log_item_t *bip)
{
#ifdef XFS_TRANS_DEBUG
kmem_free(bip->bli_orig);
kmem_free(bip->bli_logged);
#endif /* XFS_TRANS_DEBUG */
kmem_zone_free(xfs_buf_item_zone, bip);
}
/*
* This is called when the buf log item is no longer needed. It should
* free the buf log item associated with the given buffer and clear
* the buffer's pointer to the buf log item. If there are no more
* items in the list, clear the b_iodone field of the buffer (see
* xfs_buf_attach_iodone() below).
*/
void
xfs_buf_item_relse(
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
trace_xfs_buf_item_relse(bp, _RET_IP_);
bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
(XFS_BUF_IODONE_FUNC(bp) != NULL)) {
XFS_BUF_CLR_IODONE_FUNC(bp);
}
xfs_buf_rele(bp);
xfs_buf_item_free(bip);
}
/*
* Add the given log item with its callback to the list of callbacks
* to be called when the buffer's I/O completes. If it is not set
* already, set the buffer's b_iodone() routine to be
* xfs_buf_iodone_callbacks() and link the log item into the list of
* items rooted at b_fsprivate. Items are always added as the second
* entry in the list if there is a first, because the buf item code
* assumes that the buf log item is first.
*/
void
xfs_buf_attach_iodone(
xfs_buf_t *bp,
void (*cb)(xfs_buf_t *, xfs_log_item_t *),
xfs_log_item_t *lip)
{
xfs_log_item_t *head_lip;
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
lip->li_cb = cb;
if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
lip->li_bio_list = head_lip->li_bio_list;
head_lip->li_bio_list = lip;
} else {
XFS_BUF_SET_FSPRIVATE(bp, lip);
}
ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) ||
(XFS_BUF_IODONE_FUNC(bp) == NULL));
XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
}
STATIC void
xfs_buf_do_callbacks(
xfs_buf_t *bp,
xfs_log_item_t *lip)
{
xfs_log_item_t *nlip;
while (lip != NULL) {
nlip = lip->li_bio_list;
ASSERT(lip->li_cb != NULL);
/*
* Clear the next pointer so we don't have any
* confusion if the item is added to another buf.
* Don't touch the log item after calling its
* callback, because it could have freed itself.
*/
lip->li_bio_list = NULL;
lip->li_cb(bp, lip);
lip = nlip;
}
}
/*
* This is the iodone() function for buffers which have had callbacks
* attached to them by xfs_buf_attach_iodone(). It should remove each
* log item from the buffer's list and call the callback of each in turn.
* When done, the buffer's fsprivate field is set to NULL and the buffer
* is unlocked with a call to iodone().
*/
void
xfs_buf_iodone_callbacks(
xfs_buf_t *bp)
{
xfs_log_item_t *lip;
static ulong lasttime;
static xfs_buftarg_t *lasttarg;
xfs_mount_t *mp;
ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
if (XFS_BUF_GETERROR(bp) != 0) {
/*
* If we've already decided to shutdown the filesystem
* because of IO errors, there's no point in giving this
* a retry.
*/
mp = lip->li_mountp;
if (XFS_FORCED_SHUTDOWN(mp)) {
ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
XFS_BUF_SUPER_STALE(bp);
trace_xfs_buf_item_iodone(bp, _RET_IP_);
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
xfs_biodone(bp);
return;
}
if ((XFS_BUF_TARGET(bp) != lasttarg) ||
(time_after(jiffies, (lasttime + 5*HZ)))) {
lasttime = jiffies;
cmn_err(CE_ALERT, "Device %s, XFS metadata write error"
" block 0x%llx in %s",
XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
(__uint64_t)XFS_BUF_ADDR(bp), mp->m_fsname);
}
lasttarg = XFS_BUF_TARGET(bp);
if (XFS_BUF_ISASYNC(bp)) {
/*
* If the write was asynchronous then noone will be
* looking for the error. Clear the error state
* and write the buffer out again delayed write.
*
* XXXsup This is OK, so long as we catch these
* before we start the umount; we don't want these
* DELWRI metadata bufs to be hanging around.
*/
XFS_BUF_ERROR(bp,0); /* errno of 0 unsets the flag */
if (!(XFS_BUF_ISSTALE(bp))) {
XFS_BUF_DELAYWRITE(bp);
XFS_BUF_DONE(bp);
XFS_BUF_SET_START(bp);
}
ASSERT(XFS_BUF_IODONE_FUNC(bp));
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
xfs_buf_relse(bp);
} else {
/*
* If the write of the buffer was not asynchronous,
* then we want to make sure to return the error
* to the caller of bwrite(). Because of this we
* cannot clear the B_ERROR state at this point.
* Instead we install a callback function that
* will be called when the buffer is released, and
* that routine will clear the error state and
* set the buffer to be written out again after
* some delay.
*/
/* We actually overwrite the existing b-relse
function at times, but we're gonna be shutting down
anyway. */
XFS_BUF_SET_BRELSE_FUNC(bp,xfs_buf_error_relse);
XFS_BUF_DONE(bp);
XFS_BUF_FINISH_IOWAIT(bp);
}
return;
}
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
xfs_biodone(bp);
}
/*
* This is a callback routine attached to a buffer which gets an error
* when being written out synchronously.
*/
STATIC void
xfs_buf_error_relse(
xfs_buf_t *bp)
{
xfs_log_item_t *lip;
xfs_mount_t *mp;
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
mp = (xfs_mount_t *)lip->li_mountp;
ASSERT(XFS_BUF_TARGET(bp) == mp->m_ddev_targp);
XFS_BUF_STALE(bp);
XFS_BUF_DONE(bp);
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_ERROR(bp,0);
trace_xfs_buf_error_relse(bp, _RET_IP_);
if (! XFS_FORCED_SHUTDOWN(mp))
xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
/*
* We have to unpin the pinned buffers so do the
* callbacks.
*/
xfs_buf_do_callbacks(bp, lip);
XFS_BUF_SET_FSPRIVATE(bp, NULL);
XFS_BUF_CLR_IODONE_FUNC(bp);
XFS_BUF_SET_BRELSE_FUNC(bp,NULL);
xfs_buf_relse(bp);
}
/*
* This is the iodone() function for buffers which have been
* logged. It is called when they are eventually flushed out.
* It should remove the buf item from the AIL, and free the buf item.
* It is called by xfs_buf_iodone_callbacks() above which will take
* care of cleaning up the buffer itself.
*/
/* ARGSUSED */
void
xfs_buf_iodone(
xfs_buf_t *bp,
xfs_buf_log_item_t *bip)
{
struct xfs_ail *ailp = bip->bli_item.li_ailp;
ASSERT(bip->bli_buf == bp);
xfs_buf_rele(bp);
/*
* If we are forcibly shutting down, this may well be
* off the AIL already. That's because we simulate the
* log-committed callbacks to unpin these buffers. Or we may never
* have put this item on AIL because of the transaction was
* aborted forcibly. xfs_trans_ail_delete() takes care of these.
*
* Either way, AIL is useless if we're forcing a shutdown.
*/
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
xfs_buf_item_free(bip);
}
|
gpl-2.0
|
downthemachine/VM696-Kernel
|
drivers/char/xilinx_hwicap/xilinx_hwicap.c
|
761
|
22910
|
/*****************************************************************************
*
* Author: Xilinx, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* XILINX IS PROVIDING THIS DESIGN, CODE, OR INFORMATION "AS IS"
* AS A COURTESY TO YOU, SOLELY FOR USE IN DEVELOPING PROGRAMS AND
* SOLUTIONS FOR XILINX DEVICES. BY PROVIDING THIS DESIGN, CODE,
* OR INFORMATION AS ONE POSSIBLE IMPLEMENTATION OF THIS FEATURE,
* APPLICATION OR STANDARD, XILINX IS MAKING NO REPRESENTATION
* THAT THIS IMPLEMENTATION IS FREE FROM ANY CLAIMS OF INFRINGEMENT,
* AND YOU ARE RESPONSIBLE FOR OBTAINING ANY RIGHTS YOU MAY REQUIRE
* FOR YOUR IMPLEMENTATION. XILINX EXPRESSLY DISCLAIMS ANY
* WARRANTY WHATSOEVER WITH RESPECT TO THE ADEQUACY OF THE
* IMPLEMENTATION, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OR
* REPRESENTATIONS THAT THIS IMPLEMENTATION IS FREE FROM CLAIMS OF
* INFRINGEMENT, IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE.
*
* (c) Copyright 2002 Xilinx Inc., Systems Engineering Group
* (c) Copyright 2004 Xilinx Inc., Systems Engineering Group
* (c) Copyright 2007-2008 Xilinx Inc.
* All rights reserved.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*****************************************************************************/
/*
* This is the code behind /dev/icap* -- it allows a user-space
* application to use the Xilinx ICAP subsystem.
*
* The following operations are possible:
*
* open open the port and initialize for access.
* release release port
* write Write a bitstream to the configuration processor.
* read Read a data stream from the configuration processor.
*
* After being opened, the port is initialized and accessed to avoid a
* corrupted first read which may occur with some hardware. The port
* is left in a desynched state, requiring that a synch sequence be
* transmitted before any valid configuration data. A user will have
* exclusive access to the device while it remains open, and the state
* of the ICAP cannot be guaranteed after the device is closed. Note
* that a complete reset of the core and the state of the ICAP cannot
* be performed on many versions of the cores, hence users of this
* device should avoid making inconsistent accesses to the device. In
* particular, accessing the read interface, without first generating
* a write containing a readback packet can leave the ICAP in an
* inaccessible state.
*
* Note that in order to use the read interface, it is first necessary
* to write a request packet to the write interface. i.e., it is not
* possible to simply readback the bitstream (or any configuration
* bits) from a device without specifically requesting them first.
* The code to craft such packets is intended to be part of the
* user-space application code that uses this device. The simplest
* way to use this interface is simply:
*
* cp foo.bit /dev/icap0
*
* Note that unless foo.bit is an appropriately constructed partial
* bitstream, this has a high likelyhood of overwriting the design
* currently programmed in the FPGA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/smp_lock.h>
#include <linux/sysctl.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#ifdef CONFIG_OF
/* For open firmware. */
#include <linux/of_device.h>
#include <linux/of_platform.h>
#endif
#include "xilinx_hwicap.h"
#include "buffer_icap.h"
#include "fifo_icap.h"
#define DRIVER_NAME "icap"
#define HWICAP_REGS (0x10000)
#define XHWICAP_MAJOR 259
#define XHWICAP_MINOR 0
#define HWICAP_DEVICES 1
/* An array, which is set to true when the device is registered. */
static bool probed_devices[HWICAP_DEVICES];
static struct mutex icap_sem;
static struct class *icap_class;
#define UNIMPLEMENTED 0xFFFF
static const struct config_registers v2_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = 11,
.KEY = 12,
.CBC = 13,
.IDCODE = 14,
.AXSS = UNIMPLEMENTED,
.C0R_1 = UNIMPLEMENTED,
.CSOB = UNIMPLEMENTED,
.WBSTAR = UNIMPLEMENTED,
.TIMER = UNIMPLEMENTED,
.BOOTSTS = UNIMPLEMENTED,
.CTL_1 = UNIMPLEMENTED,
};
static const struct config_registers v4_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = UNIMPLEMENTED,
.KEY = UNIMPLEMENTED,
.CBC = 11,
.IDCODE = 12,
.AXSS = 13,
.C0R_1 = UNIMPLEMENTED,
.CSOB = UNIMPLEMENTED,
.WBSTAR = UNIMPLEMENTED,
.TIMER = UNIMPLEMENTED,
.BOOTSTS = UNIMPLEMENTED,
.CTL_1 = UNIMPLEMENTED,
};
static const struct config_registers v5_config_registers = {
.CRC = 0,
.FAR = 1,
.FDRI = 2,
.FDRO = 3,
.CMD = 4,
.CTL = 5,
.MASK = 6,
.STAT = 7,
.LOUT = 8,
.COR = 9,
.MFWR = 10,
.FLR = UNIMPLEMENTED,
.KEY = UNIMPLEMENTED,
.CBC = 11,
.IDCODE = 12,
.AXSS = 13,
.C0R_1 = 14,
.CSOB = 15,
.WBSTAR = 16,
.TIMER = 17,
.BOOTSTS = 18,
.CTL_1 = 19,
};
/**
* hwicap_command_desync - Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
*/
static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
{
u32 buffer[4];
u32 index = 0;
/*
* Create the data to be written to the ICAP.
*/
buffer[index++] = hwicap_type_1_write(drvdata->config_regs->CMD) | 1;
buffer[index++] = XHI_CMD_DESYNCH;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
/*
* Write the data to the FIFO and intiate the transfer of data present
* in the FIFO to the ICAP device.
*/
return drvdata->config->set_configuration(drvdata,
&buffer[0], index);
}
/**
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
* register value to be returned.
* Examples: XHI_IDCODE, XHI_FLR.
* @reg_data: returns the value of the register.
*
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
static int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
u32 reg, u32 *reg_data)
{
int status;
u32 buffer[6];
u32 index = 0;
/*
* Create the data to be written to the ICAP.
*/
buffer[index++] = XHI_DUMMY_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_SYNC_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
/*
* Write the data to the FIFO and initiate the transfer of data present
* in the FIFO to the ICAP device.
*/
status = drvdata->config->set_configuration(drvdata,
&buffer[0], index);
if (status)
return status;
/* If the syncword was not found, then we need to start over. */
status = drvdata->config->get_status(drvdata);
if ((status & XHI_SR_DALIGN_MASK) != XHI_SR_DALIGN_MASK)
return -EIO;
index = 0;
buffer[index++] = hwicap_type_1_read(reg) | 1;
buffer[index++] = XHI_NOOP_PACKET;
buffer[index++] = XHI_NOOP_PACKET;
/*
* Write the data to the FIFO and intiate the transfer of data present
* in the FIFO to the ICAP device.
*/
status = drvdata->config->set_configuration(drvdata,
&buffer[0], index);
if (status)
return status;
/*
* Read the configuration register
*/
status = drvdata->config->get_configuration(drvdata, reg_data, 1);
if (status)
return status;
return 0;
}
static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
{
int status;
u32 idcode;
dev_dbg(drvdata->dev, "initializing\n");
/* Abort any current transaction, to make sure we have the
* ICAP in a good state. */
dev_dbg(drvdata->dev, "Reset...\n");
drvdata->config->reset(drvdata);
dev_dbg(drvdata->dev, "Desync...\n");
status = hwicap_command_desync(drvdata);
if (status)
return status;
/* Attempt to read the IDCODE from ICAP. This
* may not be returned correctly, due to the design of the
* hardware.
*/
dev_dbg(drvdata->dev, "Reading IDCODE...\n");
status = hwicap_get_configuration_register(
drvdata, drvdata->config_regs->IDCODE, &idcode);
dev_dbg(drvdata->dev, "IDCODE = %x\n", idcode);
if (status)
return status;
dev_dbg(drvdata->dev, "Desync...\n");
status = hwicap_command_desync(drvdata);
if (status)
return status;
return 0;
}
static ssize_t
hwicap_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct hwicap_drvdata *drvdata = file->private_data;
ssize_t bytes_to_read = 0;
u32 *kbuf;
u32 words;
u32 bytes_remaining;
int status;
status = mutex_lock_interruptible(&drvdata->sem);
if (status)
return status;
if (drvdata->read_buffer_in_use) {
/* If there are leftover bytes in the buffer, just */
/* return them and don't try to read more from the */
/* ICAP device. */
bytes_to_read =
(count < drvdata->read_buffer_in_use) ? count :
drvdata->read_buffer_in_use;
/* Return the data currently in the read buffer. */
if (copy_to_user(buf, drvdata->read_buffer, bytes_to_read)) {
status = -EFAULT;
goto error;
}
drvdata->read_buffer_in_use -= bytes_to_read;
memmove(drvdata->read_buffer,
drvdata->read_buffer + bytes_to_read,
4 - bytes_to_read);
} else {
/* Get new data from the ICAP, and return was was requested. */
kbuf = (u32 *) get_zeroed_page(GFP_KERNEL);
if (!kbuf) {
status = -ENOMEM;
goto error;
}
/* The ICAP device is only able to read complete */
/* words. If a number of bytes that do not correspond */
/* to complete words is requested, then we read enough */
/* words to get the required number of bytes, and then */
/* save the remaining bytes for the next read. */
/* Determine the number of words to read, rounding up */
/* if necessary. */
words = ((count + 3) >> 2);
bytes_to_read = words << 2;
if (bytes_to_read > PAGE_SIZE)
bytes_to_read = PAGE_SIZE;
/* Ensure we only read a complete number of words. */
bytes_remaining = bytes_to_read & 3;
bytes_to_read &= ~3;
words = bytes_to_read >> 2;
status = drvdata->config->get_configuration(drvdata,
kbuf, words);
/* If we didn't read correctly, then bail out. */
if (status) {
free_page((unsigned long)kbuf);
goto error;
}
/* If we fail to return the data to the user, then bail out. */
if (copy_to_user(buf, kbuf, bytes_to_read)) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
memcpy(drvdata->read_buffer,
kbuf,
bytes_remaining);
drvdata->read_buffer_in_use = bytes_remaining;
free_page((unsigned long)kbuf);
}
status = bytes_to_read;
error:
mutex_unlock(&drvdata->sem);
return status;
}
static ssize_t
hwicap_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct hwicap_drvdata *drvdata = file->private_data;
ssize_t written = 0;
ssize_t left = count;
u32 *kbuf;
ssize_t len;
ssize_t status;
status = mutex_lock_interruptible(&drvdata->sem);
if (status)
return status;
left += drvdata->write_buffer_in_use;
/* Only write multiples of 4 bytes. */
if (left < 4) {
status = 0;
goto error;
}
kbuf = (u32 *) __get_free_page(GFP_KERNEL);
if (!kbuf) {
status = -ENOMEM;
goto error;
}
while (left > 3) {
/* only write multiples of 4 bytes, so there might */
/* be as many as 3 bytes left (at the end). */
len = left;
if (len > PAGE_SIZE)
len = PAGE_SIZE;
len &= ~3;
if (drvdata->write_buffer_in_use) {
memcpy(kbuf, drvdata->write_buffer,
drvdata->write_buffer_in_use);
if (copy_from_user(
(((char *)kbuf) + drvdata->write_buffer_in_use),
buf + written,
len - (drvdata->write_buffer_in_use))) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
} else {
if (copy_from_user(kbuf, buf + written, len)) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
}
status = drvdata->config->set_configuration(drvdata,
kbuf, len >> 2);
if (status) {
free_page((unsigned long)kbuf);
status = -EFAULT;
goto error;
}
if (drvdata->write_buffer_in_use) {
len -= drvdata->write_buffer_in_use;
left -= drvdata->write_buffer_in_use;
drvdata->write_buffer_in_use = 0;
}
written += len;
left -= len;
}
if ((left > 0) && (left < 4)) {
if (!copy_from_user(drvdata->write_buffer,
buf + written, left)) {
drvdata->write_buffer_in_use = left;
written += left;
left = 0;
}
}
free_page((unsigned long)kbuf);
status = written;
error:
mutex_unlock(&drvdata->sem);
return status;
}
static int hwicap_open(struct inode *inode, struct file *file)
{
struct hwicap_drvdata *drvdata;
int status;
lock_kernel();
drvdata = container_of(inode->i_cdev, struct hwicap_drvdata, cdev);
status = mutex_lock_interruptible(&drvdata->sem);
if (status)
goto out;
if (drvdata->is_open) {
status = -EBUSY;
goto error;
}
status = hwicap_initialize_hwicap(drvdata);
if (status) {
dev_err(drvdata->dev, "Failed to open file");
goto error;
}
file->private_data = drvdata;
drvdata->write_buffer_in_use = 0;
drvdata->read_buffer_in_use = 0;
drvdata->is_open = 1;
error:
mutex_unlock(&drvdata->sem);
out:
unlock_kernel();
return status;
}
static int hwicap_release(struct inode *inode, struct file *file)
{
struct hwicap_drvdata *drvdata = file->private_data;
int i;
int status = 0;
mutex_lock(&drvdata->sem);
if (drvdata->write_buffer_in_use) {
/* Flush write buffer. */
for (i = drvdata->write_buffer_in_use; i < 4; i++)
drvdata->write_buffer[i] = 0;
status = drvdata->config->set_configuration(drvdata,
(u32 *) drvdata->write_buffer, 1);
if (status)
goto error;
}
status = hwicap_command_desync(drvdata);
if (status)
goto error;
error:
drvdata->is_open = 0;
mutex_unlock(&drvdata->sem);
return status;
}
static const struct file_operations hwicap_fops = {
.owner = THIS_MODULE,
.write = hwicap_write,
.read = hwicap_read,
.open = hwicap_open,
.release = hwicap_release,
};
static int __devinit hwicap_setup(struct device *dev, int id,
const struct resource *regs_res,
const struct hwicap_driver_config *config,
const struct config_registers *config_regs)
{
dev_t devt;
struct hwicap_drvdata *drvdata = NULL;
int retval = 0;
dev_info(dev, "Xilinx icap port driver\n");
mutex_lock(&icap_sem);
if (id < 0) {
for (id = 0; id < HWICAP_DEVICES; id++)
if (!probed_devices[id])
break;
}
if (id < 0 || id >= HWICAP_DEVICES) {
mutex_unlock(&icap_sem);
dev_err(dev, "%s%i too large\n", DRIVER_NAME, id);
return -EINVAL;
}
if (probed_devices[id]) {
mutex_unlock(&icap_sem);
dev_err(dev, "cannot assign to %s%i; it is already in use\n",
DRIVER_NAME, id);
return -EBUSY;
}
probed_devices[id] = 1;
mutex_unlock(&icap_sem);
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR + id);
drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
dev_err(dev, "Couldn't allocate device private record\n");
retval = -ENOMEM;
goto failed0;
}
dev_set_drvdata(dev, (void *)drvdata);
if (!regs_res) {
dev_err(dev, "Couldn't get registers resource\n");
retval = -EFAULT;
goto failed1;
}
drvdata->mem_start = regs_res->start;
drvdata->mem_end = regs_res->end;
drvdata->mem_size = regs_res->end - regs_res->start + 1;
if (!request_mem_region(drvdata->mem_start,
drvdata->mem_size, DRIVER_NAME)) {
dev_err(dev, "Couldn't lock memory region at %Lx\n",
(unsigned long long) regs_res->start);
retval = -EBUSY;
goto failed1;
}
drvdata->devt = devt;
drvdata->dev = dev;
drvdata->base_address = ioremap(drvdata->mem_start, drvdata->mem_size);
if (!drvdata->base_address) {
dev_err(dev, "ioremap() failed\n");
goto failed2;
}
drvdata->config = config;
drvdata->config_regs = config_regs;
mutex_init(&drvdata->sem);
drvdata->is_open = 0;
dev_info(dev, "ioremap %llx to %p with size %llx\n",
(unsigned long long) drvdata->mem_start,
drvdata->base_address,
(unsigned long long) drvdata->mem_size);
cdev_init(&drvdata->cdev, &hwicap_fops);
drvdata->cdev.owner = THIS_MODULE;
retval = cdev_add(&drvdata->cdev, devt, 1);
if (retval) {
dev_err(dev, "cdev_add() failed\n");
goto failed3;
}
device_create(icap_class, dev, devt, NULL, "%s%d", DRIVER_NAME, id);
return 0; /* success */
failed3:
iounmap(drvdata->base_address);
failed2:
release_mem_region(regs_res->start, drvdata->mem_size);
failed1:
kfree(drvdata);
failed0:
mutex_lock(&icap_sem);
probed_devices[id] = 0;
mutex_unlock(&icap_sem);
return retval;
}
static struct hwicap_driver_config buffer_icap_config = {
.get_configuration = buffer_icap_get_configuration,
.set_configuration = buffer_icap_set_configuration,
.get_status = buffer_icap_get_status,
.reset = buffer_icap_reset,
};
static struct hwicap_driver_config fifo_icap_config = {
.get_configuration = fifo_icap_get_configuration,
.set_configuration = fifo_icap_set_configuration,
.get_status = fifo_icap_get_status,
.reset = fifo_icap_reset,
};
static int __devexit hwicap_remove(struct device *dev)
{
struct hwicap_drvdata *drvdata;
drvdata = (struct hwicap_drvdata *)dev_get_drvdata(dev);
if (!drvdata)
return 0;
device_destroy(icap_class, drvdata->devt);
cdev_del(&drvdata->cdev);
iounmap(drvdata->base_address);
release_mem_region(drvdata->mem_start, drvdata->mem_size);
kfree(drvdata);
dev_set_drvdata(dev, NULL);
mutex_lock(&icap_sem);
probed_devices[MINOR(dev->devt)-XHWICAP_MINOR] = 0;
mutex_unlock(&icap_sem);
return 0; /* success */
}
static int __devinit hwicap_drv_probe(struct platform_device *pdev)
{
struct resource *res;
const struct config_registers *regs;
const char *family;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
family = pdev->dev.platform_data;
if (family) {
if (!strcmp(family, "virtex2p")) {
regs = &v2_config_registers;
} else if (!strcmp(family, "virtex4")) {
regs = &v4_config_registers;
} else if (!strcmp(family, "virtex5")) {
regs = &v5_config_registers;
}
}
return hwicap_setup(&pdev->dev, pdev->id, res,
&buffer_icap_config, regs);
}
static int __devexit hwicap_drv_remove(struct platform_device *pdev)
{
return hwicap_remove(&pdev->dev);
}
static struct platform_driver hwicap_platform_driver = {
.probe = hwicap_drv_probe,
.remove = hwicap_drv_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
},
};
/* ---------------------------------------------------------------------
* OF bus binding
*/
#if defined(CONFIG_OF)
static int __devinit
hwicap_of_probe(struct of_device *op, const struct of_device_id *match)
{
struct resource res;
const unsigned int *id;
const char *family;
int rc;
const struct hwicap_driver_config *config = match->data;
const struct config_registers *regs;
dev_dbg(&op->dev, "hwicap_of_probe(%p, %p)\n", op, match);
rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
return rc;
}
id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
specified */
regs = &v4_config_registers;
family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
if (!strcmp(family, "virtex2p")) {
regs = &v2_config_registers;
} else if (!strcmp(family, "virtex4")) {
regs = &v4_config_registers;
} else if (!strcmp(family, "virtex5")) {
regs = &v5_config_registers;
}
}
return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
regs);
}
static int __devexit hwicap_of_remove(struct of_device *op)
{
return hwicap_remove(&op->dev);
}
/* Match table for of_platform binding */
static const struct of_device_id __devinitconst hwicap_of_match[] = {
{ .compatible = "xlnx,opb-hwicap-1.00.b", .data = &buffer_icap_config},
{ .compatible = "xlnx,xps-hwicap-1.00.a", .data = &fifo_icap_config},
{},
};
MODULE_DEVICE_TABLE(of, hwicap_of_match);
static struct of_platform_driver hwicap_of_driver = {
.probe = hwicap_of_probe,
.remove = __devexit_p(hwicap_of_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = hwicap_of_match,
},
};
/* Registration helpers to keep the number of #ifdefs to a minimum */
static inline int __init hwicap_of_register(void)
{
pr_debug("hwicap: calling of_register_platform_driver()\n");
return of_register_platform_driver(&hwicap_of_driver);
}
static inline void __exit hwicap_of_unregister(void)
{
of_unregister_platform_driver(&hwicap_of_driver);
}
#else /* CONFIG_OF */
/* CONFIG_OF not enabled; do nothing helpers */
static inline int __init hwicap_of_register(void) { return 0; }
static inline void __exit hwicap_of_unregister(void) { }
#endif /* CONFIG_OF */
static int __init hwicap_module_init(void)
{
dev_t devt;
int retval;
icap_class = class_create(THIS_MODULE, "xilinx_config");
mutex_init(&icap_sem);
devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
retval = register_chrdev_region(devt,
HWICAP_DEVICES,
DRIVER_NAME);
if (retval < 0)
return retval;
retval = platform_driver_register(&hwicap_platform_driver);
if (retval)
goto failed1;
retval = hwicap_of_register();
if (retval)
goto failed2;
return retval;
failed2:
platform_driver_unregister(&hwicap_platform_driver);
failed1:
unregister_chrdev_region(devt, HWICAP_DEVICES);
return retval;
}
static void __exit hwicap_module_cleanup(void)
{
dev_t devt = MKDEV(XHWICAP_MAJOR, XHWICAP_MINOR);
class_destroy(icap_class);
platform_driver_unregister(&hwicap_platform_driver);
hwicap_of_unregister();
unregister_chrdev_region(devt, HWICAP_DEVICES);
}
module_init(hwicap_module_init);
module_exit(hwicap_module_cleanup);
MODULE_AUTHOR("Xilinx, Inc; Xilinx Research Labs Group");
MODULE_DESCRIPTION("Xilinx ICAP Port Driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
Jocala/kernel.ancora_tmo.ics
|
fs/xfs/xfs_error.c
|
761
|
4710
|
/*
* Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_utils.h"
#include "xfs_error.h"
#ifdef DEBUG
int xfs_etrap[XFS_ERROR_NTRAP] = {
0,
};
int
xfs_error_trap(int e)
{
int i;
if (!e)
return 0;
for (i = 0; i < XFS_ERROR_NTRAP; i++) {
if (xfs_etrap[i] == 0)
break;
if (e != xfs_etrap[i])
continue;
cmn_err(CE_NOTE, "xfs_error_trap: error %d", e);
BUG();
break;
}
return e;
}
int xfs_etest[XFS_NUM_INJECT_ERROR];
int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
int
xfs_error_test(int error_tag, int *fsidp, char *expression,
int line, char *file, unsigned long randfactor)
{
int i;
int64_t fsid;
if (random32() % randfactor)
return 0;
memcpy(&fsid, fsidp, sizeof(xfs_fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest[i] == error_tag && xfs_etest_fsid[i] == fsid) {
cmn_err(CE_WARN,
"Injecting error (%s) at file %s, line %d, on filesystem \"%s\"",
expression, file, line, xfs_etest_fsname[i]);
return 1;
}
}
return 0;
}
int
xfs_errortag_add(int error_tag, xfs_mount_t *mp)
{
int i;
int len;
int64_t fsid;
memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest_fsid[i] == fsid && xfs_etest[i] == error_tag) {
cmn_err(CE_WARN, "XFS error tag #%d on", error_tag);
return 0;
}
}
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if (xfs_etest[i] == 0) {
cmn_err(CE_WARN, "Turned on XFS error tag #%d",
error_tag);
xfs_etest[i] = error_tag;
xfs_etest_fsid[i] = fsid;
len = strlen(mp->m_fsname);
xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP);
strcpy(xfs_etest_fsname[i], mp->m_fsname);
return 0;
}
}
cmn_err(CE_WARN, "error tag overflow, too many turned on");
return 1;
}
int
xfs_errortag_clearall(xfs_mount_t *mp, int loud)
{
int64_t fsid;
int cleared = 0;
int i;
memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
if ((fsid == 0LL || xfs_etest_fsid[i] == fsid) &&
xfs_etest[i] != 0) {
cleared = 1;
cmn_err(CE_WARN, "Clearing XFS error tag #%d",
xfs_etest[i]);
xfs_etest[i] = 0;
xfs_etest_fsid[i] = 0LL;
kmem_free(xfs_etest_fsname[i]);
xfs_etest_fsname[i] = NULL;
}
}
if (loud || cleared)
cmn_err(CE_WARN,
"Cleared all XFS error tags for filesystem \"%s\"",
mp->m_fsname);
return 0;
}
#endif /* DEBUG */
void
xfs_fs_cmn_err(int level, xfs_mount_t *mp, char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
xfs_fs_vcmn_err(level, mp, fmt, ap);
va_end(ap);
}
void
xfs_cmn_err(int panic_tag, int level, xfs_mount_t *mp, char *fmt, ...)
{
va_list ap;
#ifdef DEBUG
xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
#endif
if (xfs_panic_mask && (xfs_panic_mask & panic_tag)
&& (level & CE_ALERT)) {
level &= ~CE_ALERT;
level |= CE_PANIC;
cmn_err(CE_ALERT, "XFS: Transforming an alert into a BUG.");
}
va_start(ap, fmt);
xfs_fs_vcmn_err(level, mp, fmt, ap);
va_end(ap);
}
void
xfs_error_report(
const char *tag,
int level,
struct xfs_mount *mp,
const char *filename,
int linenum,
inst_t *ra)
{
if (level <= xfs_error_level) {
xfs_cmn_err(XFS_PTAG_ERROR_REPORT,
CE_ALERT, mp,
"XFS internal error %s at line %d of file %s. Caller 0x%p\n",
tag, linenum, filename, ra);
xfs_stack_trace();
}
}
void
xfs_corruption_error(
const char *tag,
int level,
struct xfs_mount *mp,
void *p,
const char *filename,
int linenum,
inst_t *ra)
{
if (level <= xfs_error_level)
xfs_hex_dump(p, 16);
xfs_error_report(tag, level, mp, filename, linenum, ra);
}
|
gpl-2.0
|
Icenowy/linux-kernel-u8800-aosc-oses
|
drivers/infiniband/hw/qib/qib_sdma.c
|
761
|
26331
|
/*
* Copyright (c) 2007, 2008, 2009, 2010 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/spinlock.h>
#include <linux/netdevice.h>
#include "qib.h"
#include "qib_common.h"
/* default pio off, sdma on */
static ushort sdma_descq_cnt = 256;
module_param_named(sdma_descq_cnt, sdma_descq_cnt, ushort, S_IRUGO);
MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
/*
* Bits defined in the send DMA descriptor.
*/
#define SDMA_DESC_LAST (1ULL << 11)
#define SDMA_DESC_FIRST (1ULL << 12)
#define SDMA_DESC_DMA_HEAD (1ULL << 13)
#define SDMA_DESC_USE_LARGE_BUF (1ULL << 14)
#define SDMA_DESC_INTR (1ULL << 15)
#define SDMA_DESC_COUNT_LSB 16
#define SDMA_DESC_GEN_LSB 30
char *qib_sdma_state_names[] = {
[qib_sdma_state_s00_hw_down] = "s00_HwDown",
[qib_sdma_state_s10_hw_start_up_wait] = "s10_HwStartUpWait",
[qib_sdma_state_s20_idle] = "s20_Idle",
[qib_sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
[qib_sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
[qib_sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
[qib_sdma_state_s99_running] = "s99_Running",
};
char *qib_sdma_event_names[] = {
[qib_sdma_event_e00_go_hw_down] = "e00_GoHwDown",
[qib_sdma_event_e10_go_hw_start] = "e10_GoHwStart",
[qib_sdma_event_e20_hw_started] = "e20_HwStarted",
[qib_sdma_event_e30_go_running] = "e30_GoRunning",
[qib_sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
[qib_sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
[qib_sdma_event_e60_hw_halted] = "e60_HwHalted",
[qib_sdma_event_e70_go_idle] = "e70_GoIdle",
[qib_sdma_event_e7220_err_halted] = "e7220_ErrHalted",
[qib_sdma_event_e7322_err_halted] = "e7322_ErrHalted",
[qib_sdma_event_e90_timer_tick] = "e90_TimerTick",
};
/* declare all statics here rather than keep sorting */
static int alloc_sdma(struct qib_pportdata *);
static void sdma_complete(struct kref *);
static void sdma_finalput(struct qib_sdma_state *);
static void sdma_get(struct qib_sdma_state *);
static void sdma_put(struct qib_sdma_state *);
static void sdma_set_state(struct qib_pportdata *, enum qib_sdma_states);
static void sdma_start_sw_clean_up(struct qib_pportdata *);
static void sdma_sw_clean_up_task(unsigned long);
static void unmap_desc(struct qib_pportdata *, unsigned);
static void sdma_get(struct qib_sdma_state *ss)
{
kref_get(&ss->kref);
}
static void sdma_complete(struct kref *kref)
{
struct qib_sdma_state *ss =
container_of(kref, struct qib_sdma_state, kref);
complete(&ss->comp);
}
static void sdma_put(struct qib_sdma_state *ss)
{
kref_put(&ss->kref, sdma_complete);
}
static void sdma_finalput(struct qib_sdma_state *ss)
{
sdma_put(ss);
wait_for_completion(&ss->comp);
}
/*
* Complete all the sdma requests on the active list, in the correct
* order, and with appropriate processing. Called when cleaning up
* after sdma shutdown, and when new sdma requests are submitted for
* a link that is down. This matches what is done for requests
* that complete normally, it's just the full list.
*
* Must be called with sdma_lock held
*/
static void clear_sdma_activelist(struct qib_pportdata *ppd)
{
struct qib_sdma_txreq *txp, *txp_next;
list_for_each_entry_safe(txp, txp_next, &ppd->sdma_activelist, list) {
list_del_init(&txp->list);
if (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) {
unsigned idx;
idx = txp->start_idx;
while (idx != txp->next_descq_idx) {
unmap_desc(ppd, idx);
if (++idx == ppd->sdma_descq_cnt)
idx = 0;
}
}
if (txp->callback)
(*txp->callback)(txp, QIB_SDMA_TXREQ_S_ABORTED);
}
}
static void sdma_sw_clean_up_task(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *) opaque;
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
/*
* At this point, the following should always be true:
* - We are halted, so no more descriptors are getting retired.
* - We are not running, so no one is submitting new work.
* - Only we can send the e40_sw_cleaned, so we can't start
* running again until we say so. So, the active list and
* descq are ours to play with.
*/
/* Process all retired requests. */
qib_sdma_make_progress(ppd);
clear_sdma_activelist(ppd);
/*
* Resync count of added and removed. It is VERY important that
* sdma_descq_removed NEVER decrement - user_sdma depends on it.
*/
ppd->sdma_descq_removed = ppd->sdma_descq_added;
/*
* Reset our notion of head and tail.
* Note that the HW registers will be reset when switching states
* due to calling __qib_sdma_process_event() below.
*/
ppd->sdma_descq_tail = 0;
ppd->sdma_descq_head = 0;
ppd->sdma_head_dma[0] = 0;
ppd->sdma_generation = 0;
__qib_sdma_process_event(ppd, qib_sdma_event_e40_sw_cleaned);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
/*
* This is called when changing to state qib_sdma_state_s10_hw_start_up_wait
* as a result of send buffer errors or send DMA descriptor errors.
* We want to disarm the buffers in these cases.
*/
static void sdma_hw_start_up(struct qib_pportdata *ppd)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
unsigned bufno;
for (bufno = ss->first_sendbuf; bufno < ss->last_sendbuf; ++bufno)
ppd->dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_BUF(bufno));
ppd->dd->f_sdma_hw_start_up(ppd);
}
static void sdma_sw_tear_down(struct qib_pportdata *ppd)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
/* Releasing this reference means the state machine has stopped. */
sdma_put(ss);
}
static void sdma_start_sw_clean_up(struct qib_pportdata *ppd)
{
tasklet_hi_schedule(&ppd->sdma_sw_clean_up_task);
}
static void sdma_set_state(struct qib_pportdata *ppd,
enum qib_sdma_states next_state)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
struct sdma_set_state_action *action = ss->set_state_action;
unsigned op = 0;
/* debugging bookkeeping */
ss->previous_state = ss->current_state;
ss->previous_op = ss->current_op;
ss->current_state = next_state;
if (action[next_state].op_enable)
op |= QIB_SDMA_SENDCTRL_OP_ENABLE;
if (action[next_state].op_intenable)
op |= QIB_SDMA_SENDCTRL_OP_INTENABLE;
if (action[next_state].op_halt)
op |= QIB_SDMA_SENDCTRL_OP_HALT;
if (action[next_state].op_drain)
op |= QIB_SDMA_SENDCTRL_OP_DRAIN;
if (action[next_state].go_s99_running_tofalse)
ss->go_s99_running = 0;
if (action[next_state].go_s99_running_totrue)
ss->go_s99_running = 1;
ss->current_op = op;
ppd->dd->f_sdma_sendctrl(ppd, ss->current_op);
}
static void unmap_desc(struct qib_pportdata *ppd, unsigned head)
{
__le64 *descqp = &ppd->sdma_descq[head].qw[0];
u64 desc[2];
dma_addr_t addr;
size_t len;
desc[0] = le64_to_cpu(descqp[0]);
desc[1] = le64_to_cpu(descqp[1]);
addr = (desc[1] << 32) | (desc[0] >> 32);
len = (desc[0] >> 14) & (0x7ffULL << 2);
dma_unmap_single(&ppd->dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
}
static int alloc_sdma(struct qib_pportdata *ppd)
{
ppd->sdma_descq_cnt = sdma_descq_cnt;
if (!ppd->sdma_descq_cnt)
ppd->sdma_descq_cnt = 256;
/* Allocate memory for SendDMA descriptor FIFO */
ppd->sdma_descq = dma_alloc_coherent(&ppd->dd->pcidev->dev,
ppd->sdma_descq_cnt * sizeof(u64[2]), &ppd->sdma_descq_phys,
GFP_KERNEL);
if (!ppd->sdma_descq) {
qib_dev_err(ppd->dd, "failed to allocate SendDMA descriptor "
"FIFO memory\n");
goto bail;
}
/* Allocate memory for DMA of head register to memory */
ppd->sdma_head_dma = dma_alloc_coherent(&ppd->dd->pcidev->dev,
PAGE_SIZE, &ppd->sdma_head_phys, GFP_KERNEL);
if (!ppd->sdma_head_dma) {
qib_dev_err(ppd->dd, "failed to allocate SendDMA "
"head memory\n");
goto cleanup_descq;
}
ppd->sdma_head_dma[0] = 0;
return 0;
cleanup_descq:
dma_free_coherent(&ppd->dd->pcidev->dev,
ppd->sdma_descq_cnt * sizeof(u64[2]), (void *)ppd->sdma_descq,
ppd->sdma_descq_phys);
ppd->sdma_descq = NULL;
ppd->sdma_descq_phys = 0;
bail:
ppd->sdma_descq_cnt = 0;
return -ENOMEM;
}
static void free_sdma(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
if (ppd->sdma_head_dma) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
(void *)ppd->sdma_head_dma,
ppd->sdma_head_phys);
ppd->sdma_head_dma = NULL;
ppd->sdma_head_phys = 0;
}
if (ppd->sdma_descq) {
dma_free_coherent(&dd->pcidev->dev,
ppd->sdma_descq_cnt * sizeof(u64[2]),
ppd->sdma_descq, ppd->sdma_descq_phys);
ppd->sdma_descq = NULL;
ppd->sdma_descq_phys = 0;
}
}
static inline void make_sdma_desc(struct qib_pportdata *ppd,
u64 *sdmadesc, u64 addr, u64 dwlen,
u64 dwoffset)
{
WARN_ON(addr & 3);
/* SDmaPhyAddr[47:32] */
sdmadesc[1] = addr >> 32;
/* SDmaPhyAddr[31:0] */
sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
/* SDmaGeneration[1:0] */
sdmadesc[0] |= (ppd->sdma_generation & 3ULL) <<
SDMA_DESC_GEN_LSB;
/* SDmaDwordCount[10:0] */
sdmadesc[0] |= (dwlen & 0x7ffULL) << SDMA_DESC_COUNT_LSB;
/* SDmaBufOffset[12:2] */
sdmadesc[0] |= dwoffset & 0x7ffULL;
}
/* sdma_lock must be held */
int qib_sdma_make_progress(struct qib_pportdata *ppd)
{
struct list_head *lp = NULL;
struct qib_sdma_txreq *txp = NULL;
struct qib_devdata *dd = ppd->dd;
int progress = 0;
u16 hwhead;
u16 idx = 0;
hwhead = dd->f_sdma_gethead(ppd);
/* The reason for some of the complexity of this code is that
* not all descriptors have corresponding txps. So, we have to
* be able to skip over descs until we wander into the range of
* the next txp on the list.
*/
if (!list_empty(&ppd->sdma_activelist)) {
lp = ppd->sdma_activelist.next;
txp = list_entry(lp, struct qib_sdma_txreq, list);
idx = txp->start_idx;
}
while (ppd->sdma_descq_head != hwhead) {
/* if desc is part of this txp, unmap if needed */
if (txp && (txp->flags & QIB_SDMA_TXREQ_F_FREEDESC) &&
(idx == ppd->sdma_descq_head)) {
unmap_desc(ppd, ppd->sdma_descq_head);
if (++idx == ppd->sdma_descq_cnt)
idx = 0;
}
/* increment dequed desc count */
ppd->sdma_descq_removed++;
/* advance head, wrap if needed */
if (++ppd->sdma_descq_head == ppd->sdma_descq_cnt)
ppd->sdma_descq_head = 0;
/* if now past this txp's descs, do the callback */
if (txp && txp->next_descq_idx == ppd->sdma_descq_head) {
/* remove from active list */
list_del_init(&txp->list);
if (txp->callback)
(*txp->callback)(txp, QIB_SDMA_TXREQ_S_OK);
/* see if there is another txp */
if (list_empty(&ppd->sdma_activelist))
txp = NULL;
else {
lp = ppd->sdma_activelist.next;
txp = list_entry(lp, struct qib_sdma_txreq,
list);
idx = txp->start_idx;
}
}
progress = 1;
}
if (progress)
qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
return progress;
}
/*
* This is called from interrupt context.
*/
void qib_sdma_intr(struct qib_pportdata *ppd)
{
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
__qib_sdma_intr(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
void __qib_sdma_intr(struct qib_pportdata *ppd)
{
if (__qib_sdma_running(ppd))
qib_sdma_make_progress(ppd);
}
int qib_setup_sdma(struct qib_pportdata *ppd)
{
struct qib_devdata *dd = ppd->dd;
unsigned long flags;
int ret = 0;
ret = alloc_sdma(ppd);
if (ret)
goto bail;
/* set consistent sdma state */
ppd->dd->f_sdma_init_early(ppd);
spin_lock_irqsave(&ppd->sdma_lock, flags);
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
/* set up reference counting */
kref_init(&ppd->sdma_state.kref);
init_completion(&ppd->sdma_state.comp);
ppd->sdma_generation = 0;
ppd->sdma_descq_head = 0;
ppd->sdma_descq_removed = 0;
ppd->sdma_descq_added = 0;
INIT_LIST_HEAD(&ppd->sdma_activelist);
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
(unsigned long)ppd);
ret = dd->f_init_sdma_regs(ppd);
if (ret)
goto bail_alloc;
qib_sdma_process_event(ppd, qib_sdma_event_e10_go_hw_start);
return 0;
bail_alloc:
qib_teardown_sdma(ppd);
bail:
return ret;
}
void qib_teardown_sdma(struct qib_pportdata *ppd)
{
qib_sdma_process_event(ppd, qib_sdma_event_e00_go_hw_down);
/*
* This waits for the state machine to exit so it is not
* necessary to kill the sdma_sw_clean_up_task to make sure
* it is not running.
*/
sdma_finalput(&ppd->sdma_state);
free_sdma(ppd);
}
int qib_sdma_running(struct qib_pportdata *ppd)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&ppd->sdma_lock, flags);
ret = __qib_sdma_running(ppd);
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
/*
* Complete a request when sdma not running; likely only request
* but to simplify the code, always queue it, then process the full
* activelist. We process the entire list to ensure that this particular
* request does get it's callback, but in the correct order.
* Must be called with sdma_lock held
*/
static void complete_sdma_err_req(struct qib_pportdata *ppd,
struct qib_verbs_txreq *tx)
{
atomic_inc(&tx->qp->s_dma_busy);
/* no sdma descriptors, so no unmap_desc */
tx->txreq.start_idx = 0;
tx->txreq.next_descq_idx = 0;
list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
clear_sdma_activelist(ppd);
}
/*
* This function queues one IB packet onto the send DMA queue per call.
* The caller is responsible for checking:
* 1) The number of send DMA descriptor entries is less than the size of
* the descriptor queue.
* 2) The IB SGE addresses and lengths are 32-bit aligned
* (except possibly the last SGE's length)
* 3) The SGE addresses are suitable for passing to dma_map_single().
*/
int qib_sdma_verbs_send(struct qib_pportdata *ppd,
struct qib_sge_state *ss, u32 dwords,
struct qib_verbs_txreq *tx)
{
unsigned long flags;
struct qib_sge *sge;
struct qib_qp *qp;
int ret = 0;
u16 tail;
__le64 *descqp;
u64 sdmadesc[2];
u32 dwoffset;
dma_addr_t addr;
spin_lock_irqsave(&ppd->sdma_lock, flags);
retry:
if (unlikely(!__qib_sdma_running(ppd))) {
complete_sdma_err_req(ppd, tx);
goto unlock;
}
if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) {
if (qib_sdma_make_progress(ppd))
goto retry;
if (ppd->dd->flags & QIB_HAS_SDMA_TIMEOUT)
ppd->dd->f_sdma_set_desc_cnt(ppd,
ppd->sdma_descq_cnt / 2);
goto busy;
}
dwoffset = tx->hdr_dwords;
make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0);
sdmadesc[0] |= SDMA_DESC_FIRST;
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
/* write to the descq */
tail = ppd->sdma_descq_tail;
descqp = &ppd->sdma_descq[tail].qw[0];
*descqp++ = cpu_to_le64(sdmadesc[0]);
*descqp++ = cpu_to_le64(sdmadesc[1]);
/* increment the tail */
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
descqp = &ppd->sdma_descq[0].qw[0];
++ppd->sdma_generation;
}
tx->txreq.start_idx = tail;
sge = &ss->sge;
while (dwords) {
u32 dw;
u32 len;
len = dwords << 2;
if (len > sge->length)
len = sge->length;
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
dw = (len + 3) >> 2;
addr = dma_map_single(&ppd->dd->pcidev->dev, sge->vaddr,
dw << 2, DMA_TO_DEVICE);
if (dma_mapping_error(&ppd->dd->pcidev->dev, addr))
goto unmap;
sdmadesc[0] = 0;
make_sdma_desc(ppd, sdmadesc, (u64) addr, dw, dwoffset);
/* SDmaUseLargeBuf has to be set in every descriptor */
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF)
sdmadesc[0] |= SDMA_DESC_USE_LARGE_BUF;
/* write to the descq */
*descqp++ = cpu_to_le64(sdmadesc[0]);
*descqp++ = cpu_to_le64(sdmadesc[1]);
/* increment the tail */
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
descqp = &ppd->sdma_descq[0].qw[0];
++ppd->sdma_generation;
}
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
if (sge->sge_length == 0) {
if (--ss->num_sge)
*sge = *ss->sg_list++;
} else if (sge->length == 0 && sge->mr->lkey) {
if (++sge->n >= QIB_SEGSZ) {
if (++sge->m >= sge->mr->mapsz)
break;
sge->n = 0;
}
sge->vaddr =
sge->mr->map[sge->m]->segs[sge->n].vaddr;
sge->length =
sge->mr->map[sge->m]->segs[sge->n].length;
}
dwoffset += dw;
dwords -= dw;
}
if (!tail)
descqp = &ppd->sdma_descq[ppd->sdma_descq_cnt].qw[0];
descqp -= 2;
descqp[0] |= cpu_to_le64(SDMA_DESC_LAST);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST)
descqp[0] |= cpu_to_le64(SDMA_DESC_DMA_HEAD);
if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ)
descqp[0] |= cpu_to_le64(SDMA_DESC_INTR);
atomic_inc(&tx->qp->s_dma_busy);
tx->txreq.next_descq_idx = tail;
ppd->dd->f_sdma_update_tail(ppd, tail);
ppd->sdma_descq_added += tx->txreq.sg_count;
list_add_tail(&tx->txreq.list, &ppd->sdma_activelist);
goto unlock;
unmap:
for (;;) {
if (!tail)
tail = ppd->sdma_descq_cnt - 1;
else
tail--;
if (tail == ppd->sdma_descq_tail)
break;
unmap_desc(ppd, tail);
}
qp = tx->qp;
qib_put_txreq(tx);
spin_lock(&qp->s_lock);
if (qp->ibqp.qp_type == IB_QPT_RC) {
/* XXX what about error sending RDMA read responses? */
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)
qib_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
/* return zero to process the next send work request */
goto unlock;
busy:
qp = tx->qp;
spin_lock(&qp->s_lock);
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
struct qib_ibdev *dev;
/*
* If we couldn't queue the DMA request, save the info
* and try again later rather than destroying the
* buffer and undoing the side effects of the copy.
*/
tx->ss = ss;
tx->dwords = dwords;
qp->s_tx = tx;
dev = &ppd->dd->verbs_dev;
spin_lock(&dev->pending_lock);
if (list_empty(&qp->iowait)) {
struct qib_ibport *ibp;
ibp = &ppd->ibport_data;
ibp->n_dmawait++;
qp->s_flags |= QIB_S_WAIT_DMA_DESC;
list_add_tail(&qp->iowait, &dev->dmawait);
}
spin_unlock(&dev->pending_lock);
qp->s_flags &= ~QIB_S_BUSY;
spin_unlock(&qp->s_lock);
ret = -EBUSY;
} else {
spin_unlock(&qp->s_lock);
qib_put_txreq(tx);
}
unlock:
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
void qib_sdma_process_event(struct qib_pportdata *ppd,
enum qib_sdma_events event)
{
unsigned long flags;
spin_lock_irqsave(&ppd->sdma_lock, flags);
__qib_sdma_process_event(ppd, event);
if (ppd->sdma_state.current_state == qib_sdma_state_s99_running)
qib_verbs_sdma_desc_avail(ppd, qib_sdma_descq_freecnt(ppd));
spin_unlock_irqrestore(&ppd->sdma_lock, flags);
}
void __qib_sdma_process_event(struct qib_pportdata *ppd,
enum qib_sdma_events event)
{
struct qib_sdma_state *ss = &ppd->sdma_state;
switch (ss->current_state) {
case qib_sdma_state_s00_hw_down:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
break;
case qib_sdma_event_e30_go_running:
/*
* If down, but running requested (usually result
* of link up, then we need to start up.
* This can happen when hw down is requested while
* bringing the link up with traffic active on
* 7220, e.g. */
ss->go_s99_running = 1;
/* fall through and start dma engine */
case qib_sdma_event_e10_go_hw_start:
/* This reference means the state machine is started */
sdma_get(&ppd->sdma_state);
sdma_set_state(ppd,
qib_sdma_state_s10_hw_start_up_wait);
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e40_sw_cleaned:
sdma_sw_tear_down(ppd);
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s10_hw_start_up_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_sw_tear_down(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
sdma_set_state(ppd, ss->go_s99_running ?
qib_sdma_state_s99_running :
qib_sdma_state_s20_idle);
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s20_idle:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_sw_tear_down(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
sdma_set_state(ppd, qib_sdma_state_s99_running);
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s30_sw_clean_up_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
sdma_set_state(ppd,
qib_sdma_state_s10_hw_start_up_wait);
sdma_hw_start_up(ppd);
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s40_hw_clean_up_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
sdma_set_state(ppd,
qib_sdma_state_s30_sw_clean_up_wait);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e60_hw_halted:
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s50_hw_halt_wait:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
ss->go_s99_running = 1;
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
sdma_set_state(ppd,
qib_sdma_state_s40_hw_clean_up_wait);
ppd->dd->f_sdma_hw_clean_up(ppd);
break;
case qib_sdma_event_e70_go_idle:
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
break;
case qib_sdma_event_e7322_err_halted:
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
case qib_sdma_state_s99_running:
switch (event) {
case qib_sdma_event_e00_go_hw_down:
sdma_set_state(ppd, qib_sdma_state_s00_hw_down);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e10_go_hw_start:
break;
case qib_sdma_event_e20_hw_started:
break;
case qib_sdma_event_e30_go_running:
break;
case qib_sdma_event_e40_sw_cleaned:
break;
case qib_sdma_event_e50_hw_cleaned:
break;
case qib_sdma_event_e60_hw_halted:
sdma_set_state(ppd,
qib_sdma_state_s30_sw_clean_up_wait);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e70_go_idle:
sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
ss->go_s99_running = 0;
break;
case qib_sdma_event_e7220_err_halted:
sdma_set_state(ppd,
qib_sdma_state_s30_sw_clean_up_wait);
sdma_start_sw_clean_up(ppd);
break;
case qib_sdma_event_e7322_err_halted:
sdma_set_state(ppd, qib_sdma_state_s50_hw_halt_wait);
break;
case qib_sdma_event_e90_timer_tick:
break;
}
break;
}
ss->last_event = event;
}
|
gpl-2.0
|
KOala888/GB_kernel
|
fs/ocfs2/dlmglue.c
|
761
|
117380
|
/* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* dlmglue.c
*
* Code which implements an OCFS2 specific interface to our DLM.
*
* Copyright (C) 2003, 2004 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/kthread.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/quotaops.h>
#define MLOG_MASK_PREFIX ML_DLM_GLUE
#include <cluster/masklog.h>
#include "ocfs2.h"
#include "ocfs2_lockingver.h"
#include "alloc.h"
#include "dcache.h"
#include "dlmglue.h"
#include "extent_map.h"
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
#include "journal.h"
#include "stackglue.h"
#include "slot_map.h"
#include "super.h"
#include "uptodate.h"
#include "quota.h"
#include "refcounttree.h"
#include "buffer_head_io.h"
struct ocfs2_mask_waiter {
struct list_head mw_item;
int mw_status;
struct completion mw_complete;
unsigned long mw_mask;
unsigned long mw_goal;
#ifdef CONFIG_OCFS2_FS_STATS
unsigned long long mw_lock_start;
#endif
};
static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres);
static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres);
/*
* Return value from ->downconvert_worker functions.
*
* These control the precise actions of ocfs2_unblock_lock()
* and ocfs2_process_blocked_lock()
*
*/
enum ocfs2_unblock_action {
UNBLOCK_CONTINUE = 0, /* Continue downconvert */
UNBLOCK_CONTINUE_POST = 1, /* Continue downconvert, fire
* ->post_unlock callback */
UNBLOCK_STOP_POST = 2, /* Do not downconvert, fire
* ->post_unlock() callback. */
};
struct ocfs2_unblock_ctl {
int requeue;
enum ocfs2_unblock_action unblock_action;
};
/* Lockdep class keys */
struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES];
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres);
static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
int blocking);
static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
int blocking);
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres);
static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
int blocking);
#define mlog_meta_lvb(__level, __lockres) ocfs2_dump_meta_lvb_info(__level, __PRETTY_FUNCTION__, __LINE__, __lockres)
/* This aids in debugging situations where a bad LVB might be involved. */
static void ocfs2_dump_meta_lvb_info(u64 level,
const char *function,
unsigned int line,
struct ocfs2_lock_res *lockres)
{
struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
mlog(level, "LVB information for %s (called from %s:%u):\n",
lockres->l_name, function, line);
mlog(level, "version: %u, clusters: %u, generation: 0x%x\n",
lvb->lvb_version, be32_to_cpu(lvb->lvb_iclusters),
be32_to_cpu(lvb->lvb_igeneration));
mlog(level, "size: %llu, uid %u, gid %u, mode 0x%x\n",
(unsigned long long)be64_to_cpu(lvb->lvb_isize),
be32_to_cpu(lvb->lvb_iuid), be32_to_cpu(lvb->lvb_igid),
be16_to_cpu(lvb->lvb_imode));
mlog(level, "nlink %u, atime_packed 0x%llx, ctime_packed 0x%llx, "
"mtime_packed 0x%llx iattr 0x%x\n", be16_to_cpu(lvb->lvb_inlink),
(long long)be64_to_cpu(lvb->lvb_iatime_packed),
(long long)be64_to_cpu(lvb->lvb_ictime_packed),
(long long)be64_to_cpu(lvb->lvb_imtime_packed),
be32_to_cpu(lvb->lvb_iattr));
}
/*
* OCFS2 Lock Resource Operations
*
* These fine tune the behavior of the generic dlmglue locking infrastructure.
*
* The most basic of lock types can point ->l_priv to their respective
* struct ocfs2_super and allow the default actions to manage things.
*
* Right now, each lock type also needs to implement an init function,
* and trivial lock/unlock wrappers. ocfs2_simple_drop_lockres()
* should be called when the lock is no longer needed (i.e., object
* destruction time).
*/
struct ocfs2_lock_res_ops {
/*
* Translate an ocfs2_lock_res * into an ocfs2_super *. Define
* this callback if ->l_priv is not an ocfs2_super pointer
*/
struct ocfs2_super * (*get_osb)(struct ocfs2_lock_res *);
/*
* Optionally called in the downconvert thread after a
* successful downconvert. The lockres will not be referenced
* after this callback is called, so it is safe to free
* memory, etc.
*
* The exact semantics of when this is called are controlled
* by ->downconvert_worker()
*/
void (*post_unlock)(struct ocfs2_super *, struct ocfs2_lock_res *);
/*
* Allow a lock type to add checks to determine whether it is
* safe to downconvert a lock. Return 0 to re-queue the
* downconvert at a later time, nonzero to continue.
*
* For most locks, the default checks that there are no
* incompatible holders are sufficient.
*
* Called with the lockres spinlock held.
*/
int (*check_downconvert)(struct ocfs2_lock_res *, int);
/*
* Allows a lock type to populate the lock value block. This
* is called on downconvert, and when we drop a lock.
*
* Locks that want to use this should set LOCK_TYPE_USES_LVB
* in the flags field.
*
* Called with the lockres spinlock held.
*/
void (*set_lvb)(struct ocfs2_lock_res *);
/*
* Called from the downconvert thread when it is determined
* that a lock will be downconverted. This is called without
* any locks held so the function can do work that might
* schedule (syncing out data, etc).
*
* This should return any one of the ocfs2_unblock_action
* values, depending on what it wants the thread to do.
*/
int (*downconvert_worker)(struct ocfs2_lock_res *, int);
/*
* LOCK_TYPE_* flags which describe the specific requirements
* of a lock type. Descriptions of each individual flag follow.
*/
int flags;
};
/*
* Some locks want to "refresh" potentially stale data when a
* meaningful (PRMODE or EXMODE) lock level is first obtained. If this
* flag is set, the OCFS2_LOCK_NEEDS_REFRESH flag will be set on the
* individual lockres l_flags member from the ast function. It is
* expected that the locking wrapper will clear the
* OCFS2_LOCK_NEEDS_REFRESH flag when done.
*/
#define LOCK_TYPE_REQUIRES_REFRESH 0x1
/*
* Indicate that a lock type makes use of the lock value block. The
* ->set_lvb lock type callback must be defined.
*/
#define LOCK_TYPE_USES_LVB 0x2
static struct ocfs2_lock_res_ops ocfs2_inode_rw_lops = {
.get_osb = ocfs2_get_inode_osb,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_inode_inode_lops = {
.get_osb = ocfs2_get_inode_osb,
.check_downconvert = ocfs2_check_meta_downconvert,
.set_lvb = ocfs2_set_meta_lvb,
.downconvert_worker = ocfs2_data_convert_worker,
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_super_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH,
};
static struct ocfs2_lock_res_ops ocfs2_rename_lops = {
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_nfs_sync_lops = {
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_orphan_scan_lops = {
.flags = LOCK_TYPE_REQUIRES_REFRESH|LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_dentry_lops = {
.get_osb = ocfs2_get_dentry_osb,
.post_unlock = ocfs2_dentry_post_unlock,
.downconvert_worker = ocfs2_dentry_convert_worker,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_inode_open_lops = {
.get_osb = ocfs2_get_inode_osb,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_flock_lops = {
.get_osb = ocfs2_get_file_osb,
.flags = 0,
};
static struct ocfs2_lock_res_ops ocfs2_qinfo_lops = {
.set_lvb = ocfs2_set_qinfo_lvb,
.get_osb = ocfs2_get_qinfo_osb,
.flags = LOCK_TYPE_REQUIRES_REFRESH | LOCK_TYPE_USES_LVB,
};
static struct ocfs2_lock_res_ops ocfs2_refcount_block_lops = {
.check_downconvert = ocfs2_check_refcount_downconvert,
.downconvert_worker = ocfs2_refcount_convert_worker,
.flags = 0,
};
static inline int ocfs2_is_inode_lock(struct ocfs2_lock_res *lockres)
{
return lockres->l_type == OCFS2_LOCK_TYPE_META ||
lockres->l_type == OCFS2_LOCK_TYPE_RW ||
lockres->l_type == OCFS2_LOCK_TYPE_OPEN;
}
static inline struct ocfs2_lock_res *ocfs2_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
{
return container_of(lksb, struct ocfs2_lock_res, l_lksb);
}
static inline struct inode *ocfs2_lock_res_inode(struct ocfs2_lock_res *lockres)
{
BUG_ON(!ocfs2_is_inode_lock(lockres));
return (struct inode *) lockres->l_priv;
}
static inline struct ocfs2_dentry_lock *ocfs2_lock_res_dl(struct ocfs2_lock_res *lockres)
{
BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_DENTRY);
return (struct ocfs2_dentry_lock *)lockres->l_priv;
}
static inline struct ocfs2_mem_dqinfo *ocfs2_lock_res_qinfo(struct ocfs2_lock_res *lockres)
{
BUG_ON(lockres->l_type != OCFS2_LOCK_TYPE_QINFO);
return (struct ocfs2_mem_dqinfo *)lockres->l_priv;
}
static inline struct ocfs2_refcount_tree *
ocfs2_lock_res_refcount_tree(struct ocfs2_lock_res *res)
{
return container_of(res, struct ocfs2_refcount_tree, rf_lockres);
}
static inline struct ocfs2_super *ocfs2_get_lockres_osb(struct ocfs2_lock_res *lockres)
{
if (lockres->l_ops->get_osb)
return lockres->l_ops->get_osb(lockres);
return (struct ocfs2_super *)lockres->l_priv;
}
static int ocfs2_lock_create(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 dlm_flags);
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
int wanted);
static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level, unsigned long caller_ip);
static inline void ocfs2_cluster_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level)
{
__ocfs2_cluster_unlock(osb, lockres, level, _RET_IP_);
}
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres);
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres);
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres, int level);
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert);
#define ocfs2_log_dlm_error(_func, _err, _lockres) do { \
if ((_lockres)->l_type != OCFS2_LOCK_TYPE_DENTRY) \
mlog(ML_ERROR, "DLM error %d while calling %s on resource %s\n", \
_err, _func, _lockres->l_name); \
else \
mlog(ML_ERROR, "DLM error %d while calling %s on resource %.*s%08x\n", \
_err, _func, OCFS2_DENTRY_LOCK_INO_START - 1, (_lockres)->l_name, \
(unsigned int)ocfs2_get_dentry_lock_ino(_lockres)); \
} while (0)
static int ocfs2_downconvert_thread(void *arg);
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static int ocfs2_inode_lock_update(struct inode *inode,
struct buffer_head **bh);
static void ocfs2_drop_osb_locks(struct ocfs2_super *osb);
static inline int ocfs2_highest_compat_lock_level(int level);
static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
int new_level);
static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int new_level,
int lvb,
unsigned int generation);
static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static int ocfs2_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres);
static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
u64 blkno,
u32 generation,
char *name)
{
int len;
mlog_entry_void();
BUG_ON(type >= OCFS2_NUM_LOCK_TYPES);
len = snprintf(name, OCFS2_LOCK_ID_MAX_LEN, "%c%s%016llx%08x",
ocfs2_lock_type_char(type), OCFS2_LOCK_ID_PAD,
(long long)blkno, generation);
BUG_ON(len != (OCFS2_LOCK_ID_MAX_LEN - 1));
mlog(0, "built lock resource with name: %s\n", name);
mlog_exit_void();
}
static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
struct ocfs2_dlm_debug *dlm_debug)
{
mlog(0, "Add tracking for lockres %s\n", res->l_name);
spin_lock(&ocfs2_dlm_tracking_lock);
list_add(&res->l_debug_list, &dlm_debug->d_lockres_tracking);
spin_unlock(&ocfs2_dlm_tracking_lock);
}
static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
{
spin_lock(&ocfs2_dlm_tracking_lock);
if (!list_empty(&res->l_debug_list))
list_del_init(&res->l_debug_list);
spin_unlock(&ocfs2_dlm_tracking_lock);
}
#ifdef CONFIG_OCFS2_FS_STATS
static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
res->l_lock_num_prmode = 0;
res->l_lock_num_prmode_failed = 0;
res->l_lock_total_prmode = 0;
res->l_lock_max_prmode = 0;
res->l_lock_num_exmode = 0;
res->l_lock_num_exmode_failed = 0;
res->l_lock_total_exmode = 0;
res->l_lock_max_exmode = 0;
res->l_lock_refresh = 0;
}
static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
struct ocfs2_mask_waiter *mw, int ret)
{
unsigned long long *num, *sum;
unsigned int *max, *failed;
struct timespec ts = current_kernel_time();
unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
if (level == LKM_PRMODE) {
num = &res->l_lock_num_prmode;
sum = &res->l_lock_total_prmode;
max = &res->l_lock_max_prmode;
failed = &res->l_lock_num_prmode_failed;
} else if (level == LKM_EXMODE) {
num = &res->l_lock_num_exmode;
sum = &res->l_lock_total_exmode;
max = &res->l_lock_max_exmode;
failed = &res->l_lock_num_exmode_failed;
} else
return;
(*num)++;
(*sum) += time;
if (time > *max)
*max = time;
if (ret)
(*failed)++;
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
lockres->l_lock_refresh++;
}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
struct timespec ts = current_kernel_time();
mw->mw_lock_start = timespec_to_ns(&ts);
}
#else
static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
{
}
static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
int level, struct ocfs2_mask_waiter *mw, int ret)
{
}
static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
{
}
static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
{
}
#endif
static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
struct ocfs2_lock_res *res,
enum ocfs2_lock_type type,
struct ocfs2_lock_res_ops *ops,
void *priv)
{
res->l_type = type;
res->l_ops = ops;
res->l_priv = priv;
res->l_level = DLM_LOCK_IV;
res->l_requested = DLM_LOCK_IV;
res->l_blocking = DLM_LOCK_IV;
res->l_action = OCFS2_AST_INVALID;
res->l_unlock_action = OCFS2_UNLOCK_INVALID;
res->l_flags = OCFS2_LOCK_INITIALIZED;
ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
ocfs2_init_lock_stats(res);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (type != OCFS2_LOCK_TYPE_OPEN)
lockdep_init_map(&res->l_lockdep_map, ocfs2_lock_type_strings[type],
&lockdep_keys[type], 0);
else
res->l_lockdep_map.key = NULL;
#endif
}
void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
{
/* This also clears out the lock status block */
memset(res, 0, sizeof(struct ocfs2_lock_res));
spin_lock_init(&res->l_lock);
init_waitqueue_head(&res->l_event);
INIT_LIST_HEAD(&res->l_blocked_list);
INIT_LIST_HEAD(&res->l_mask_waiters);
}
void ocfs2_inode_lock_res_init(struct ocfs2_lock_res *res,
enum ocfs2_lock_type type,
unsigned int generation,
struct inode *inode)
{
struct ocfs2_lock_res_ops *ops;
switch(type) {
case OCFS2_LOCK_TYPE_RW:
ops = &ocfs2_inode_rw_lops;
break;
case OCFS2_LOCK_TYPE_META:
ops = &ocfs2_inode_inode_lops;
break;
case OCFS2_LOCK_TYPE_OPEN:
ops = &ocfs2_inode_open_lops;
break;
default:
mlog_bug_on_msg(1, "type: %d\n", type);
ops = NULL; /* thanks, gcc */
break;
};
ocfs2_build_lock_name(type, OCFS2_I(inode)->ip_blkno,
generation, res->l_name);
ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), res, type, ops, inode);
}
static struct ocfs2_super *ocfs2_get_inode_osb(struct ocfs2_lock_res *lockres)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
return OCFS2_SB(inode->i_sb);
}
static struct ocfs2_super *ocfs2_get_qinfo_osb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_mem_dqinfo *info = lockres->l_priv;
return OCFS2_SB(info->dqi_gi.dqi_sb);
}
static struct ocfs2_super *ocfs2_get_file_osb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_file_private *fp = lockres->l_priv;
return OCFS2_SB(fp->fp_file->f_mapping->host->i_sb);
}
static __u64 ocfs2_get_dentry_lock_ino(struct ocfs2_lock_res *lockres)
{
__be64 inode_blkno_be;
memcpy(&inode_blkno_be, &lockres->l_name[OCFS2_DENTRY_LOCK_INO_START],
sizeof(__be64));
return be64_to_cpu(inode_blkno_be);
}
static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_dentry_lock *dl = lockres->l_priv;
return OCFS2_SB(dl->dl_inode->i_sb);
}
void ocfs2_dentry_lock_res_init(struct ocfs2_dentry_lock *dl,
u64 parent, struct inode *inode)
{
int len;
u64 inode_blkno = OCFS2_I(inode)->ip_blkno;
__be64 inode_blkno_be = cpu_to_be64(inode_blkno);
struct ocfs2_lock_res *lockres = &dl->dl_lockres;
ocfs2_lock_res_init_once(lockres);
/*
* Unfortunately, the standard lock naming scheme won't work
* here because we have two 16 byte values to use. Instead,
* we'll stuff the inode number as a binary value. We still
* want error prints to show something without garbling the
* display, so drop a null byte in there before the inode
* number. A future version of OCFS2 will likely use all
* binary lock names. The stringified names have been a
* tremendous aid in debugging, but now that the debugfs
* interface exists, we can mangle things there if need be.
*
* NOTE: We also drop the standard "pad" value (the total lock
* name size stays the same though - the last part is all
* zeros due to the memset in ocfs2_lock_res_init_once()
*/
len = snprintf(lockres->l_name, OCFS2_DENTRY_LOCK_INO_START,
"%c%016llx",
ocfs2_lock_type_char(OCFS2_LOCK_TYPE_DENTRY),
(long long)parent);
BUG_ON(len != (OCFS2_DENTRY_LOCK_INO_START - 1));
memcpy(&lockres->l_name[OCFS2_DENTRY_LOCK_INO_START], &inode_blkno_be,
sizeof(__be64));
ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
OCFS2_LOCK_TYPE_DENTRY, &ocfs2_dentry_lops,
dl);
}
static void ocfs2_super_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
/* Superblock lockres doesn't come from a slab so we call init
* once on it manually. */
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_SUPER, OCFS2_SUPER_BLOCK_BLKNO,
0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_SUPER,
&ocfs2_super_lops, osb);
}
static void ocfs2_rename_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
/* Rename lockres doesn't come from a slab so we call init
* once on it manually. */
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_RENAME, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_RENAME,
&ocfs2_rename_lops, osb);
}
static void ocfs2_nfs_sync_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
/* nfs_sync lockres doesn't come from a slab so we call init
* once on it manually. */
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_NFS_SYNC, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_NFS_SYNC,
&ocfs2_nfs_sync_lops, osb);
}
static void ocfs2_orphan_scan_lock_res_init(struct ocfs2_lock_res *res,
struct ocfs2_super *osb)
{
ocfs2_lock_res_init_once(res);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_ORPHAN_SCAN, 0, 0, res->l_name);
ocfs2_lock_res_init_common(osb, res, OCFS2_LOCK_TYPE_ORPHAN_SCAN,
&ocfs2_orphan_scan_lops, osb);
}
void ocfs2_file_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_file_private *fp)
{
struct inode *inode = fp->fp_file->f_mapping->host;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_FLOCK, oi->ip_blkno,
inode->i_generation, lockres->l_name);
ocfs2_lock_res_init_common(OCFS2_SB(inode->i_sb), lockres,
OCFS2_LOCK_TYPE_FLOCK, &ocfs2_flock_lops,
fp);
lockres->l_flags |= OCFS2_LOCK_NOCACHE;
}
void ocfs2_qinfo_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_mem_dqinfo *info)
{
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_QINFO, info->dqi_gi.dqi_type,
0, lockres->l_name);
ocfs2_lock_res_init_common(OCFS2_SB(info->dqi_gi.dqi_sb), lockres,
OCFS2_LOCK_TYPE_QINFO, &ocfs2_qinfo_lops,
info);
}
void ocfs2_refcount_lock_res_init(struct ocfs2_lock_res *lockres,
struct ocfs2_super *osb, u64 ref_blkno,
unsigned int generation)
{
ocfs2_lock_res_init_once(lockres);
ocfs2_build_lock_name(OCFS2_LOCK_TYPE_REFCOUNT, ref_blkno,
generation, lockres->l_name);
ocfs2_lock_res_init_common(osb, lockres, OCFS2_LOCK_TYPE_REFCOUNT,
&ocfs2_refcount_block_lops, osb);
}
void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
{
mlog_entry_void();
if (!(res->l_flags & OCFS2_LOCK_INITIALIZED))
return;
ocfs2_remove_lockres_tracking(res);
mlog_bug_on_msg(!list_empty(&res->l_blocked_list),
"Lockres %s is on the blocked list\n",
res->l_name);
mlog_bug_on_msg(!list_empty(&res->l_mask_waiters),
"Lockres %s has mask waiters pending\n",
res->l_name);
mlog_bug_on_msg(spin_is_locked(&res->l_lock),
"Lockres %s is locked\n",
res->l_name);
mlog_bug_on_msg(res->l_ro_holders,
"Lockres %s has %u ro holders\n",
res->l_name, res->l_ro_holders);
mlog_bug_on_msg(res->l_ex_holders,
"Lockres %s has %u ex holders\n",
res->l_name, res->l_ex_holders);
/* Need to clear out the lock status block for the dlm */
memset(&res->l_lksb, 0, sizeof(res->l_lksb));
res->l_flags = 0UL;
mlog_exit_void();
}
static inline void ocfs2_inc_holders(struct ocfs2_lock_res *lockres,
int level)
{
mlog_entry_void();
BUG_ON(!lockres);
switch(level) {
case DLM_LOCK_EX:
lockres->l_ex_holders++;
break;
case DLM_LOCK_PR:
lockres->l_ro_holders++;
break;
default:
BUG();
}
mlog_exit_void();
}
static inline void ocfs2_dec_holders(struct ocfs2_lock_res *lockres,
int level)
{
mlog_entry_void();
BUG_ON(!lockres);
switch(level) {
case DLM_LOCK_EX:
BUG_ON(!lockres->l_ex_holders);
lockres->l_ex_holders--;
break;
case DLM_LOCK_PR:
BUG_ON(!lockres->l_ro_holders);
lockres->l_ro_holders--;
break;
default:
BUG();
}
mlog_exit_void();
}
/* WARNING: This function lives in a world where the only three lock
* levels are EX, PR, and NL. It *will* have to be adjusted when more
* lock types are added. */
static inline int ocfs2_highest_compat_lock_level(int level)
{
int new_level = DLM_LOCK_EX;
if (level == DLM_LOCK_EX)
new_level = DLM_LOCK_NL;
else if (level == DLM_LOCK_PR)
new_level = DLM_LOCK_PR;
return new_level;
}
static void lockres_set_flags(struct ocfs2_lock_res *lockres,
unsigned long newflags)
{
struct ocfs2_mask_waiter *mw, *tmp;
assert_spin_locked(&lockres->l_lock);
lockres->l_flags = newflags;
list_for_each_entry_safe(mw, tmp, &lockres->l_mask_waiters, mw_item) {
if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
continue;
list_del_init(&mw->mw_item);
mw->mw_status = 0;
complete(&mw->mw_complete);
}
}
static void lockres_or_flags(struct ocfs2_lock_res *lockres, unsigned long or)
{
lockres_set_flags(lockres, lockres->l_flags | or);
}
static void lockres_clear_flags(struct ocfs2_lock_res *lockres,
unsigned long clear)
{
lockres_set_flags(lockres, lockres->l_flags & ~clear);
}
static inline void ocfs2_generic_handle_downconvert_action(struct ocfs2_lock_res *lockres)
{
mlog_entry_void();
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
lockres->l_level = lockres->l_requested;
if (lockres->l_level <=
ocfs2_highest_compat_lock_level(lockres->l_blocking)) {
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
}
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
mlog_exit_void();
}
static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lockres)
{
mlog_entry_void();
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_ATTACHED));
/* Convert from RO to EX doesn't really need anything as our
* information is already up to data. Convert from NL to
* *anything* however should mark ourselves as needing an
* update */
if (lockres->l_level == DLM_LOCK_NL &&
lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
/*
* We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
* the OCFS2_LOCK_BUSY flag to prevent the dc thread from
* downconverting the lock before the upconvert has fully completed.
*/
lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
mlog_exit_void();
}
static inline void ocfs2_generic_handle_attach_action(struct ocfs2_lock_res *lockres)
{
mlog_entry_void();
BUG_ON((!(lockres->l_flags & OCFS2_LOCK_BUSY)));
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
if (lockres->l_requested > DLM_LOCK_NL &&
!(lockres->l_flags & OCFS2_LOCK_LOCAL) &&
lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
lockres_or_flags(lockres, OCFS2_LOCK_ATTACHED);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
mlog_exit_void();
}
static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
int level)
{
int needs_downconvert = 0;
mlog_entry_void();
assert_spin_locked(&lockres->l_lock);
if (level > lockres->l_blocking) {
/* only schedule a downconvert if we haven't already scheduled
* one that goes low enough to satisfy the level we're
* blocking. this also catches the case where we get
* duplicate BASTs */
if (ocfs2_highest_compat_lock_level(level) <
ocfs2_highest_compat_lock_level(lockres->l_blocking))
needs_downconvert = 1;
lockres->l_blocking = level;
}
mlog(ML_BASTS, "lockres %s, block %d, level %d, l_block %d, dwn %d\n",
lockres->l_name, level, lockres->l_level, lockres->l_blocking,
needs_downconvert);
if (needs_downconvert)
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
mlog_exit(needs_downconvert);
return needs_downconvert;
}
/*
* OCFS2_LOCK_PENDING and l_pending_gen.
*
* Why does OCFS2_LOCK_PENDING exist? To close a race between setting
* OCFS2_LOCK_BUSY and calling ocfs2_dlm_lock(). See ocfs2_unblock_lock()
* for more details on the race.
*
* OCFS2_LOCK_PENDING closes the race quite nicely. However, it introduces
* a race on itself. In o2dlm, we can get the ast before ocfs2_dlm_lock()
* returns. The ast clears OCFS2_LOCK_BUSY, and must therefore clear
* OCFS2_LOCK_PENDING at the same time. When ocfs2_dlm_lock() returns,
* the caller is going to try to clear PENDING again. If nothing else is
* happening, __lockres_clear_pending() sees PENDING is unset and does
* nothing.
*
* But what if another path (eg downconvert thread) has just started a
* new locking action? The other path has re-set PENDING. Our path
* cannot clear PENDING, because that will re-open the original race
* window.
*
* [Example]
*
* ocfs2_meta_lock()
* ocfs2_cluster_lock()
* set BUSY
* set PENDING
* drop l_lock
* ocfs2_dlm_lock()
* ocfs2_locking_ast() ocfs2_downconvert_thread()
* clear PENDING ocfs2_unblock_lock()
* take_l_lock
* !BUSY
* ocfs2_prepare_downconvert()
* set BUSY
* set PENDING
* drop l_lock
* take l_lock
* clear PENDING
* drop l_lock
* <window>
* ocfs2_dlm_lock()
*
* So as you can see, we now have a window where l_lock is not held,
* PENDING is not set, and ocfs2_dlm_lock() has not been called.
*
* The core problem is that ocfs2_cluster_lock() has cleared the PENDING
* set by ocfs2_prepare_downconvert(). That wasn't nice.
*
* To solve this we introduce l_pending_gen. A call to
* lockres_clear_pending() will only do so when it is passed a generation
* number that matches the lockres. lockres_set_pending() will return the
* current generation number. When ocfs2_cluster_lock() goes to clear
* PENDING, it passes the generation it got from set_pending(). In our
* example above, the generation numbers will *not* match. Thus,
* ocfs2_cluster_lock() will not clear the PENDING set by
* ocfs2_prepare_downconvert().
*/
/* Unlocked version for ocfs2_locking_ast() */
static void __lockres_clear_pending(struct ocfs2_lock_res *lockres,
unsigned int generation,
struct ocfs2_super *osb)
{
assert_spin_locked(&lockres->l_lock);
/*
* The ast and locking functions can race us here. The winner
* will clear pending, the loser will not.
*/
if (!(lockres->l_flags & OCFS2_LOCK_PENDING) ||
(lockres->l_pending_gen != generation))
return;
lockres_clear_flags(lockres, OCFS2_LOCK_PENDING);
lockres->l_pending_gen++;
/*
* The downconvert thread may have skipped us because we
* were PENDING. Wake it up.
*/
if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
ocfs2_wake_downconvert_thread(osb);
}
/* Locked version for callers of ocfs2_dlm_lock() */
static void lockres_clear_pending(struct ocfs2_lock_res *lockres,
unsigned int generation,
struct ocfs2_super *osb)
{
unsigned long flags;
spin_lock_irqsave(&lockres->l_lock, flags);
__lockres_clear_pending(lockres, generation, osb);
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
static unsigned int lockres_set_pending(struct ocfs2_lock_res *lockres)
{
assert_spin_locked(&lockres->l_lock);
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BUSY));
lockres_or_flags(lockres, OCFS2_LOCK_PENDING);
return lockres->l_pending_gen;
}
static void ocfs2_blocking_ast(struct ocfs2_dlm_lksb *lksb, int level)
{
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
int needs_downconvert;
unsigned long flags;
BUG_ON(level <= DLM_LOCK_NL);
mlog(ML_BASTS, "BAST fired for lockres %s, blocking %d, level %d, "
"type %s\n", lockres->l_name, level, lockres->l_level,
ocfs2_lock_type_string(lockres->l_type));
/*
* We can skip the bast for locks which don't enable caching -
* they'll be dropped at the earliest possible time anyway.
*/
if (lockres->l_flags & OCFS2_LOCK_NOCACHE)
return;
spin_lock_irqsave(&lockres->l_lock, flags);
needs_downconvert = ocfs2_generic_handle_bast(lockres, level);
if (needs_downconvert)
ocfs2_schedule_blocked_lock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
ocfs2_wake_downconvert_thread(osb);
}
static void ocfs2_locking_ast(struct ocfs2_dlm_lksb *lksb)
{
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
int status;
spin_lock_irqsave(&lockres->l_lock, flags);
status = ocfs2_dlm_lock_status(&lockres->l_lksb);
if (status == -EAGAIN) {
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
goto out;
}
if (status) {
mlog(ML_ERROR, "lockres %s: lksb status value of %d!\n",
lockres->l_name, status);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return;
}
mlog(ML_BASTS, "AST fired for lockres %s, action %d, unlock %d, "
"level %d => %d\n", lockres->l_name, lockres->l_action,
lockres->l_unlock_action, lockres->l_level, lockres->l_requested);
switch(lockres->l_action) {
case OCFS2_AST_ATTACH:
ocfs2_generic_handle_attach_action(lockres);
lockres_clear_flags(lockres, OCFS2_LOCK_LOCAL);
break;
case OCFS2_AST_CONVERT:
ocfs2_generic_handle_convert_action(lockres);
break;
case OCFS2_AST_DOWNCONVERT:
ocfs2_generic_handle_downconvert_action(lockres);
break;
default:
mlog(ML_ERROR, "lockres %s: AST fired with invalid action: %u, "
"flags 0x%lx, unlock: %u\n",
lockres->l_name, lockres->l_action, lockres->l_flags,
lockres->l_unlock_action);
BUG();
}
out:
/* set it to something invalid so if we get called again we
* can catch it. */
lockres->l_action = OCFS2_AST_INVALID;
/* Did we try to cancel this lock? Clear that state */
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT)
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
/*
* We may have beaten the locking functions here. We certainly
* know that dlm_lock() has been called :-)
* Because we can't have two lock calls in flight at once, we
* can use lockres->l_pending_gen.
*/
__lockres_clear_pending(lockres, lockres->l_pending_gen, osb);
wake_up(&lockres->l_event);
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
static void ocfs2_unlock_ast(struct ocfs2_dlm_lksb *lksb, int error)
{
struct ocfs2_lock_res *lockres = ocfs2_lksb_to_lock_res(lksb);
unsigned long flags;
mlog_entry_void();
mlog(ML_BASTS, "UNLOCK AST fired for lockres %s, action = %d\n",
lockres->l_name, lockres->l_unlock_action);
spin_lock_irqsave(&lockres->l_lock, flags);
if (error) {
mlog(ML_ERROR, "Dlm passes error %d for lock %s, "
"unlock_action %d\n", error, lockres->l_name,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog_exit_void();
return;
}
switch(lockres->l_unlock_action) {
case OCFS2_UNLOCK_CANCEL_CONVERT:
mlog(0, "Cancel convert success for %s\n", lockres->l_name);
lockres->l_action = OCFS2_AST_INVALID;
/* Downconvert thread may have requeued this lock, we
* need to wake it. */
if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
break;
case OCFS2_UNLOCK_DROP_LOCK:
lockres->l_level = DLM_LOCK_IV;
break;
default:
BUG();
}
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
wake_up(&lockres->l_event);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog_exit_void();
}
/*
* This is the filesystem locking protocol. It provides the lock handling
* hooks for the underlying DLM. It has a maximum version number.
* The version number allows interoperability with systems running at
* the same major number and an equal or smaller minor number.
*
* Whenever the filesystem does new things with locks (adds or removes a
* lock, orders them differently, does different things underneath a lock),
* the version must be changed. The protocol is negotiated when joining
* the dlm domain. A node may join the domain if its major version is
* identical to all other nodes and its minor version is greater than
* or equal to all other nodes. When its minor version is greater than
* the other nodes, it will run at the minor version specified by the
* other nodes.
*
* If a locking change is made that will not be compatible with older
* versions, the major number must be increased and the minor version set
* to zero. If a change merely adds a behavior that can be disabled when
* speaking to older versions, the minor version must be increased. If a
* change adds a fully backwards compatible change (eg, LVB changes that
* are just ignored by older versions), the version does not need to be
* updated.
*/
static struct ocfs2_locking_protocol lproto = {
.lp_max_version = {
.pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
.pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
},
.lp_lock_ast = ocfs2_locking_ast,
.lp_blocking_ast = ocfs2_blocking_ast,
.lp_unlock_ast = ocfs2_unlock_ast,
};
void ocfs2_set_locking_protocol(void)
{
ocfs2_stack_glue_set_max_proto_version(&lproto.lp_max_version);
}
static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
int convert)
{
unsigned long flags;
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
if (convert)
lockres->l_action = OCFS2_AST_INVALID;
else
lockres->l_unlock_action = OCFS2_UNLOCK_INVALID;
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
mlog_exit_void();
}
/* Note: If we detect another process working on the lock (i.e.,
* OCFS2_LOCK_BUSY), we'll bail out returning 0. It's up to the caller
* to do the right thing in that case.
*/
static int ocfs2_lock_create(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 dlm_flags)
{
int ret = 0;
unsigned long flags;
unsigned int gen;
mlog_entry_void();
mlog(0, "lock %s, level = %d, flags = %u\n", lockres->l_name, level,
dlm_flags);
spin_lock_irqsave(&lockres->l_lock, flags);
if ((lockres->l_flags & OCFS2_LOCK_ATTACHED) ||
(lockres->l_flags & OCFS2_LOCK_BUSY)) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto bail;
}
lockres->l_action = OCFS2_AST_ATTACH;
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
gen = lockres_set_pending(lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn,
level,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 1);
}
mlog(0, "lock %s, return from ocfs2_dlm_lock\n", lockres->l_name);
bail:
mlog_exit(ret);
return ret;
}
static inline int ocfs2_check_wait_flag(struct ocfs2_lock_res *lockres,
int flag)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&lockres->l_lock, flags);
ret = lockres->l_flags & flag;
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ret;
}
static inline void ocfs2_wait_on_busy_lock(struct ocfs2_lock_res *lockres)
{
wait_event(lockres->l_event,
!ocfs2_check_wait_flag(lockres, OCFS2_LOCK_BUSY));
}
static inline void ocfs2_wait_on_refreshing_lock(struct ocfs2_lock_res *lockres)
{
wait_event(lockres->l_event,
!ocfs2_check_wait_flag(lockres, OCFS2_LOCK_REFRESHING));
}
/* predict what lock level we'll be dropping down to on behalf
* of another node, and return true if the currently wanted
* level will be compatible with it. */
static inline int ocfs2_may_continue_on_blocked_lock(struct ocfs2_lock_res *lockres,
int wanted)
{
BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
return wanted <= ocfs2_highest_compat_lock_level(lockres->l_blocking);
}
static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
{
INIT_LIST_HEAD(&mw->mw_item);
init_completion(&mw->mw_complete);
ocfs2_init_start_time(mw);
}
static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
{
wait_for_completion(&mw->mw_complete);
/* Re-arm the completion in case we want to wait on it again */
INIT_COMPLETION(mw->mw_complete);
return mw->mw_status;
}
static void lockres_add_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw,
unsigned long mask,
unsigned long goal)
{
BUG_ON(!list_empty(&mw->mw_item));
assert_spin_locked(&lockres->l_lock);
list_add_tail(&mw->mw_item, &lockres->l_mask_waiters);
mw->mw_mask = mask;
mw->mw_goal = goal;
}
/* returns 0 if the mw that was removed was already satisfied, -EBUSY
* if the mask still hadn't reached its goal */
static int lockres_remove_mask_waiter(struct ocfs2_lock_res *lockres,
struct ocfs2_mask_waiter *mw)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
if (!list_empty(&mw->mw_item)) {
if ((lockres->l_flags & mw->mw_mask) != mw->mw_goal)
ret = -EBUSY;
list_del_init(&mw->mw_item);
init_completion(&mw->mw_complete);
}
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ret;
}
static int ocfs2_wait_for_mask_interruptible(struct ocfs2_mask_waiter *mw,
struct ocfs2_lock_res *lockres)
{
int ret;
ret = wait_for_completion_interruptible(&mw->mw_complete);
if (ret)
lockres_remove_mask_waiter(lockres, mw);
else
ret = mw->mw_status;
/* Re-arm the completion in case we want to wait on it again */
INIT_COMPLETION(mw->mw_complete);
return ret;
}
static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 lkm_flags,
int arg_flags,
int l_subclass,
unsigned long caller_ip)
{
struct ocfs2_mask_waiter mw;
int wait, catch_signals = !(osb->s_mount_opt & OCFS2_MOUNT_NOINTR);
int ret = 0; /* gcc doesn't realize wait = 1 guarantees ret is set */
unsigned long flags;
unsigned int gen;
int noqueue_attempted = 0;
mlog_entry_void();
ocfs2_init_mask_waiter(&mw);
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lkm_flags |= DLM_LKF_VALBLK;
again:
wait = 0;
spin_lock_irqsave(&lockres->l_lock, flags);
if (catch_signals && signal_pending(current)) {
ret = -ERESTARTSYS;
goto unlock;
}
mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
"Cluster lock called on freeing lockres %s! flags "
"0x%lx\n", lockres->l_name, lockres->l_flags);
/* We only compare against the currently granted level
* here. If the lock is blocked waiting on a downconvert,
* we'll get caught below. */
if (lockres->l_flags & OCFS2_LOCK_BUSY &&
level > lockres->l_level) {
/* is someone sitting in dlm_lock? If so, wait on
* them. */
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
wait = 1;
goto unlock;
}
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
/*
* We've upconverted. If the lock now has a level we can
* work with, we take it. If, however, the lock is not at the
* required level, we go thru the full cycle. One way this could
* happen is if a process requesting an upconvert to PR is
* closely followed by another requesting upconvert to an EX.
* If the process requesting EX lands here, we want it to
* continue attempting to upconvert and let the process
* requesting PR take the lock.
* If multiple processes request upconvert to PR, the first one
* here will take the lock. The others will have to go thru the
* OCFS2_LOCK_BLOCKED check to ensure that there is no pending
* downconvert request.
*/
if (level <= lockres->l_level)
goto update_holders;
}
if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
!ocfs2_may_continue_on_blocked_lock(lockres, level)) {
/* is the lock is currently blocked on behalf of
* another node */
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BLOCKED, 0);
wait = 1;
goto unlock;
}
if (level > lockres->l_level) {
if (noqueue_attempted > 0) {
ret = -EAGAIN;
goto unlock;
}
if (lkm_flags & DLM_LKF_NOQUEUE)
noqueue_attempted = 1;
if (lockres->l_action != OCFS2_AST_INVALID)
mlog(ML_ERROR, "lockres %s has action %u pending\n",
lockres->l_name, lockres->l_action);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
lockres->l_action = OCFS2_AST_ATTACH;
lkm_flags &= ~DLM_LKF_CONVERT;
} else {
lockres->l_action = OCFS2_AST_CONVERT;
lkm_flags |= DLM_LKF_CONVERT;
}
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
gen = lockres_set_pending(lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
BUG_ON(level == DLM_LOCK_IV);
BUG_ON(level == DLM_LOCK_NL);
mlog(ML_BASTS, "lockres %s, convert from %d to %d\n",
lockres->l_name, lockres->l_level, level);
/* call dlm_lock to upgrade lock now */
ret = ocfs2_dlm_lock(osb->cconn,
level,
&lockres->l_lksb,
lkm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, gen, osb);
if (ret) {
if (!(lkm_flags & DLM_LKF_NOQUEUE) ||
(ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock",
ret, lockres);
}
ocfs2_recover_from_dlm_error(lockres, 1);
goto out;
}
mlog(0, "lock %s, successful return from ocfs2_dlm_lock\n",
lockres->l_name);
/* At this point we've gone inside the dlm and need to
* complete our work regardless. */
catch_signals = 0;
/* wait for busy to clear and carry on */
goto again;
}
update_holders:
/* Ok, if we get here then we're good to go. */
ocfs2_inc_holders(lockres, level);
ret = 0;
unlock:
lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
spin_unlock_irqrestore(&lockres->l_lock, flags);
out:
/*
* This is helping work around a lock inversion between the page lock
* and dlm locks. One path holds the page lock while calling aops
* which block acquiring dlm locks. The voting thread holds dlm
* locks while acquiring page locks while down converting data locks.
* This block is helping an aop path notice the inversion and back
* off to unlock its page lock before trying the dlm lock again.
*/
if (wait && arg_flags & OCFS2_LOCK_NONBLOCK &&
mw.mw_mask & (OCFS2_LOCK_BUSY|OCFS2_LOCK_BLOCKED)) {
wait = 0;
if (lockres_remove_mask_waiter(lockres, &mw))
ret = -EAGAIN;
else
goto again;
}
if (wait) {
ret = ocfs2_wait_for_mask(&mw);
if (ret == 0)
goto again;
mlog_errno(ret);
}
ocfs2_update_lock_stats(lockres, level, &mw, ret);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (!ret && lockres->l_lockdep_map.key != NULL) {
if (level == DLM_LOCK_PR)
rwsem_acquire_read(&lockres->l_lockdep_map, l_subclass,
!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
caller_ip);
else
rwsem_acquire(&lockres->l_lockdep_map, l_subclass,
!!(arg_flags & OCFS2_META_LOCK_NOQUEUE),
caller_ip);
}
#endif
mlog_exit(ret);
return ret;
}
static inline int ocfs2_cluster_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
u32 lkm_flags,
int arg_flags)
{
return __ocfs2_cluster_lock(osb, lockres, level, lkm_flags, arg_flags,
0, _RET_IP_);
}
static void __ocfs2_cluster_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int level,
unsigned long caller_ip)
{
unsigned long flags;
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
ocfs2_dec_holders(lockres, level);
ocfs2_downconvert_on_unlock(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (lockres->l_lockdep_map.key != NULL)
rwsem_release(&lockres->l_lockdep_map, 1, caller_ip);
#endif
mlog_exit_void();
}
static int ocfs2_create_new_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int ex,
int local)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
unsigned long flags;
u32 lkm_flags = local ? DLM_LKF_LOCAL : 0;
spin_lock_irqsave(&lockres->l_lock, flags);
BUG_ON(lockres->l_flags & OCFS2_LOCK_ATTACHED);
lockres_or_flags(lockres, OCFS2_LOCK_LOCAL);
spin_unlock_irqrestore(&lockres->l_lock, flags);
return ocfs2_lock_create(osb, lockres, level, lkm_flags);
}
/* Grants us an EX lock on the data and metadata resources, skipping
* the normal cluster directory lookup. Use this ONLY on newly created
* inodes which other nodes can't possibly see, and which haven't been
* hashed in the inode hash yet. This can give us a good performance
* increase as it'll skip the network broadcast normally associated
* with creating a new lock resource. */
int ocfs2_create_new_inode_locks(struct inode *inode)
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!inode);
BUG_ON(!ocfs2_inode_is_new(inode));
mlog_entry_void();
mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
/* NOTE: That we don't increment any of the holder counts, nor
* do we add anything to a journal handle. Since this is
* supposed to be a new inode which the cluster doesn't know
* about yet, there is no need to. As far as the LVB handling
* is concerned, this is basically like acquiring an EX lock
* on a resource which has an invalid one -- we'll set it
* valid when we release the EX. */
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_rw_lockres, 1, 1);
if (ret) {
mlog_errno(ret);
goto bail;
}
/*
* We don't want to use DLM_LKF_LOCAL on a meta data lock as they
* don't use a generation in their lock names.
*/
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_inode_lockres, 1, 0);
if (ret) {
mlog_errno(ret);
goto bail;
}
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
if (ret) {
mlog_errno(ret);
goto bail;
}
bail:
mlog_exit(ret);
return ret;
}
int ocfs2_rw_lock(struct inode *inode, int write)
{
int status, level;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!inode);
mlog_entry_void();
mlog(0, "inode %llu take %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (ocfs2_mount_local(osb)) {
mlog_exit(0);
return 0;
}
lockres = &OCFS2_I(inode)->ip_rw_lockres;
level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres, level, 0,
0);
if (status < 0)
mlog_errno(status);
mlog_exit(status);
return status;
}
void ocfs2_rw_unlock(struct inode *inode, int write)
{
int level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_rw_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
mlog(0, "inode %llu drop %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
mlog_exit_void();
}
/*
* ocfs2_open_lock always get PR mode lock.
*/
int ocfs2_open_lock(struct inode *inode)
{
int status = 0;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!inode);
mlog_entry_void();
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
if (ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
DLM_LOCK_PR, 0, 0);
if (status < 0)
mlog_errno(status);
out:
mlog_exit(status);
return status;
}
int ocfs2_try_open_lock(struct inode *inode, int write)
{
int status = 0, level;
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
BUG_ON(!inode);
mlog_entry_void();
mlog(0, "inode %llu try to take %s open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
if (ocfs2_mount_local(osb))
goto out;
lockres = &OCFS2_I(inode)->ip_open_lockres;
level = write ? DLM_LOCK_EX : DLM_LOCK_PR;
/*
* The file system may already holding a PRMODE/EXMODE open lock.
* Since we pass DLM_LKF_NOQUEUE, the request won't block waiting on
* other nodes and the -EAGAIN will indicate to the caller that
* this inode is still in use.
*/
status = ocfs2_cluster_lock(OCFS2_SB(inode->i_sb), lockres,
level, DLM_LKF_NOQUEUE, 0);
out:
mlog_exit(status);
return status;
}
/*
* ocfs2_open_unlock unlock PR and EX mode open locks.
*/
void ocfs2_open_unlock(struct inode *inode)
{
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_open_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
mlog(0, "inode %llu drop open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
if (ocfs2_mount_local(osb))
goto out;
if(lockres->l_ro_holders)
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
DLM_LOCK_PR);
if(lockres->l_ex_holders)
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres,
DLM_LOCK_EX);
out:
mlog_exit_void();
}
static int ocfs2_flock_handle_signal(struct ocfs2_lock_res *lockres,
int level)
{
int ret;
struct ocfs2_super *osb = ocfs2_get_lockres_osb(lockres);
unsigned long flags;
struct ocfs2_mask_waiter mw;
ocfs2_init_mask_waiter(&mw);
retry_cancel:
spin_lock_irqsave(&lockres->l_lock, flags);
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
ret = ocfs2_prepare_cancel_convert(osb, lockres);
if (ret) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_cancel_convert(osb, lockres);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
goto retry_cancel;
}
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ocfs2_wait_for_mask(&mw);
goto retry_cancel;
}
ret = -ERESTARTSYS;
/*
* We may still have gotten the lock, in which case there's no
* point to restarting the syscall.
*/
if (lockres->l_level == level)
ret = 0;
mlog(0, "Cancel returning %d. flags: 0x%lx, level: %d, act: %d\n", ret,
lockres->l_flags, lockres->l_level, lockres->l_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
out:
return ret;
}
/*
* ocfs2_file_lock() and ocfs2_file_unlock() map to a single pair of
* flock() calls. The locking approach this requires is sufficiently
* different from all other cluster lock types that we implement a
* separate path to the "low-level" dlm calls. In particular:
*
* - No optimization of lock levels is done - we take at exactly
* what's been requested.
*
* - No lock caching is employed. We immediately downconvert to
* no-lock at unlock time. This also means flock locks never go on
* the blocking list).
*
* - Since userspace can trivially deadlock itself with flock, we make
* sure to allow cancellation of a misbehaving applications flock()
* request.
*
* - Access to any flock lockres doesn't require concurrency, so we
* can simplify the code by requiring the caller to guarantee
* serialization of dlmglue flock calls.
*/
int ocfs2_file_lock(struct file *file, int ex, int trylock)
{
int ret, level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
unsigned int lkm_flags = trylock ? DLM_LKF_NOQUEUE : 0;
unsigned long flags;
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_lock_res *lockres = &fp->fp_flock;
struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
struct ocfs2_mask_waiter mw;
ocfs2_init_mask_waiter(&mw);
if ((lockres->l_flags & OCFS2_LOCK_BUSY) ||
(lockres->l_level > DLM_LOCK_NL)) {
mlog(ML_ERROR,
"File lock \"%s\" has busy or locked state: flags: 0x%lx, "
"level: %u\n", lockres->l_name, lockres->l_flags,
lockres->l_level);
return -EINVAL;
}
spin_lock_irqsave(&lockres->l_lock, flags);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
/*
* Get the lock at NLMODE to start - that way we
* can cancel the upconvert request if need be.
*/
ret = ocfs2_lock_create(osb, lockres, DLM_LOCK_NL, 0);
if (ret < 0) {
mlog_errno(ret);
goto out;
}
ret = ocfs2_wait_for_mask(&mw);
if (ret) {
mlog_errno(ret);
goto out;
}
spin_lock_irqsave(&lockres->l_lock, flags);
}
lockres->l_action = OCFS2_AST_CONVERT;
lkm_flags |= DLM_LKF_CONVERT;
lockres->l_requested = level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_dlm_lock(osb->cconn, level, &lockres->l_lksb, lkm_flags,
lockres->l_name, OCFS2_LOCK_ID_MAX_LEN - 1);
if (ret) {
if (!trylock || (ret != -EAGAIN)) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
ret = -EINVAL;
}
ocfs2_recover_from_dlm_error(lockres, 1);
lockres_remove_mask_waiter(lockres, &mw);
goto out;
}
ret = ocfs2_wait_for_mask_interruptible(&mw, lockres);
if (ret == -ERESTARTSYS) {
/*
* Userspace can cause deadlock itself with
* flock(). Current behavior locally is to allow the
* deadlock, but abort the system call if a signal is
* received. We follow this example, otherwise a
* poorly written program could sit in kernel until
* reboot.
*
* Handling this is a bit more complicated for Ocfs2
* though. We can't exit this function with an
* outstanding lock request, so a cancel convert is
* required. We intentionally overwrite 'ret' - if the
* cancel fails and the lock was granted, it's easier
* to just bubble success back up to the user.
*/
ret = ocfs2_flock_handle_signal(lockres, level);
} else if (!ret && (level > lockres->l_level)) {
/* Trylock failed asynchronously */
BUG_ON(!trylock);
ret = -EAGAIN;
}
out:
mlog(0, "Lock: \"%s\" ex: %d, trylock: %d, returns: %d\n",
lockres->l_name, ex, trylock, ret);
return ret;
}
void ocfs2_file_unlock(struct file *file)
{
int ret;
unsigned int gen;
unsigned long flags;
struct ocfs2_file_private *fp = file->private_data;
struct ocfs2_lock_res *lockres = &fp->fp_flock;
struct ocfs2_super *osb = OCFS2_SB(file->f_mapping->host->i_sb);
struct ocfs2_mask_waiter mw;
ocfs2_init_mask_waiter(&mw);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED))
return;
if (lockres->l_level == DLM_LOCK_NL)
return;
mlog(0, "Unlock: \"%s\" flags: 0x%lx, level: %d, act: %d\n",
lockres->l_name, lockres->l_flags, lockres->l_level,
lockres->l_action);
spin_lock_irqsave(&lockres->l_lock, flags);
/*
* Fake a blocking ast for the downconvert code.
*/
lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
lockres->l_blocking = DLM_LOCK_EX;
gen = ocfs2_prepare_downconvert(lockres, DLM_LOCK_NL);
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_BUSY, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_downconvert_lock(osb, lockres, DLM_LOCK_NL, 0, gen);
if (ret) {
mlog_errno(ret);
return;
}
ret = ocfs2_wait_for_mask(&mw);
if (ret)
mlog_errno(ret);
}
static void ocfs2_downconvert_on_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int kick = 0;
mlog_entry_void();
/* If we know that another node is waiting on our lock, kick
* the downconvert thread * pre-emptively when we reach a release
* condition. */
if (lockres->l_flags & OCFS2_LOCK_BLOCKED) {
switch(lockres->l_blocking) {
case DLM_LOCK_EX:
if (!lockres->l_ex_holders && !lockres->l_ro_holders)
kick = 1;
break;
case DLM_LOCK_PR:
if (!lockres->l_ex_holders)
kick = 1;
break;
default:
BUG();
}
}
if (kick)
ocfs2_wake_downconvert_thread(osb);
mlog_exit_void();
}
#define OCFS2_SEC_BITS 34
#define OCFS2_SEC_SHIFT (64 - 34)
#define OCFS2_NSEC_MASK ((1ULL << OCFS2_SEC_SHIFT) - 1)
/* LVB only has room for 64 bits of time here so we pack it for
* now. */
static u64 ocfs2_pack_timespec(struct timespec *spec)
{
u64 res;
u64 sec = spec->tv_sec;
u32 nsec = spec->tv_nsec;
res = (sec << OCFS2_SEC_SHIFT) | (nsec & OCFS2_NSEC_MASK);
return res;
}
/* Call this with the lockres locked. I am reasonably sure we don't
* need ip_lock in this function as anyone who would be changing those
* values is supposed to be blocked in ocfs2_inode_lock right now. */
static void __ocfs2_stuff_meta_lvb(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
mlog_entry_void();
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
/*
* Invalidate the LVB of a deleted inode - this way other
* nodes are forced to go to disk and discover the new inode
* status.
*/
if (oi->ip_flags & OCFS2_INODE_DELETED) {
lvb->lvb_version = 0;
goto out;
}
lvb->lvb_version = OCFS2_LVB_VERSION;
lvb->lvb_isize = cpu_to_be64(i_size_read(inode));
lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
lvb->lvb_igid = cpu_to_be32(inode->i_gid);
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
lvb->lvb_iatime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_atime));
lvb->lvb_ictime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_ctime));
lvb->lvb_imtime_packed =
cpu_to_be64(ocfs2_pack_timespec(&inode->i_mtime));
lvb->lvb_iattr = cpu_to_be32(oi->ip_attr);
lvb->lvb_idynfeatures = cpu_to_be16(oi->ip_dyn_features);
lvb->lvb_igeneration = cpu_to_be32(inode->i_generation);
out:
mlog_meta_lvb(0, lockres);
mlog_exit_void();
}
static void ocfs2_unpack_timespec(struct timespec *spec,
u64 packed_time)
{
spec->tv_sec = packed_time >> OCFS2_SEC_SHIFT;
spec->tv_nsec = packed_time & OCFS2_NSEC_MASK;
}
static void ocfs2_refresh_inode_from_lvb(struct inode *inode)
{
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_meta_lvb *lvb;
mlog_entry_void();
mlog_meta_lvb(0, lockres);
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
/* We're safe here without the lockres lock... */
spin_lock(&oi->ip_lock);
oi->ip_clusters = be32_to_cpu(lvb->lvb_iclusters);
i_size_write(inode, be64_to_cpu(lvb->lvb_isize));
oi->ip_attr = be32_to_cpu(lvb->lvb_iattr);
oi->ip_dyn_features = be16_to_cpu(lvb->lvb_idynfeatures);
ocfs2_set_inode_flags(inode);
/* fast-symlinks are a special case */
if (S_ISLNK(inode->i_mode) && !oi->ip_clusters)
inode->i_blocks = 0;
else
inode->i_blocks = ocfs2_inode_sector_count(inode);
inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
inode->i_gid = be32_to_cpu(lvb->lvb_igid);
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
ocfs2_unpack_timespec(&inode->i_atime,
be64_to_cpu(lvb->lvb_iatime_packed));
ocfs2_unpack_timespec(&inode->i_mtime,
be64_to_cpu(lvb->lvb_imtime_packed));
ocfs2_unpack_timespec(&inode->i_ctime,
be64_to_cpu(lvb->lvb_ictime_packed));
spin_unlock(&oi->ip_lock);
mlog_exit_void();
}
static inline int ocfs2_meta_lvb_is_trustable(struct inode *inode,
struct ocfs2_lock_res *lockres)
{
struct ocfs2_meta_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)
&& lvb->lvb_version == OCFS2_LVB_VERSION
&& be32_to_cpu(lvb->lvb_igeneration) == inode->i_generation)
return 1;
return 0;
}
/* Determine whether a lock resource needs to be refreshed, and
* arbitrate who gets to refresh it.
*
* 0 means no refresh needed.
*
* > 0 means you need to refresh this and you MUST call
* ocfs2_complete_lock_res_refresh afterwards. */
static int ocfs2_should_refresh_lock_res(struct ocfs2_lock_res *lockres)
{
unsigned long flags;
int status = 0;
mlog_entry_void();
refresh_check:
spin_lock_irqsave(&lockres->l_lock, flags);
if (!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH)) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto bail;
}
if (lockres->l_flags & OCFS2_LOCK_REFRESHING) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
ocfs2_wait_on_refreshing_lock(lockres);
goto refresh_check;
}
/* Ok, I'll be the one to refresh this lock. */
lockres_or_flags(lockres, OCFS2_LOCK_REFRESHING);
spin_unlock_irqrestore(&lockres->l_lock, flags);
status = 1;
bail:
mlog_exit(status);
return status;
}
/* If status is non zero, I'll mark it as not being in refresh
* anymroe, but i won't clear the needs refresh flag. */
static inline void ocfs2_complete_lock_res_refresh(struct ocfs2_lock_res *lockres,
int status)
{
unsigned long flags;
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_REFRESHING);
if (!status)
lockres_clear_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
spin_unlock_irqrestore(&lockres->l_lock, flags);
wake_up(&lockres->l_event);
mlog_exit_void();
}
/* may or may not return a bh if it went to disk. */
static int ocfs2_inode_lock_update(struct inode *inode,
struct buffer_head **bh)
{
int status = 0;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
struct ocfs2_lock_res *lockres = &oi->ip_inode_lockres;
struct ocfs2_dinode *fe;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
if (ocfs2_mount_local(osb))
goto bail;
spin_lock(&oi->ip_lock);
if (oi->ip_flags & OCFS2_INODE_DELETED) {
mlog(0, "Orphaned inode %llu was deleted while we "
"were waiting on a lock. ip_flags = 0x%x\n",
(unsigned long long)oi->ip_blkno, oi->ip_flags);
spin_unlock(&oi->ip_lock);
status = -ENOENT;
goto bail;
}
spin_unlock(&oi->ip_lock);
if (!ocfs2_should_refresh_lock_res(lockres))
goto bail;
/* This will discard any caching information we might have had
* for the inode metadata. */
ocfs2_metadata_cache_purge(INODE_CACHE(inode));
ocfs2_extent_map_trunc(inode, 0);
if (ocfs2_meta_lvb_is_trustable(inode, lockres)) {
mlog(0, "Trusting LVB on inode %llu\n",
(unsigned long long)oi->ip_blkno);
ocfs2_refresh_inode_from_lvb(inode);
} else {
/* Boo, we have to go to disk. */
/* read bh, cast, ocfs2_refresh_inode */
status = ocfs2_read_inode_block(inode, bh);
if (status < 0) {
mlog_errno(status);
goto bail_refresh;
}
fe = (struct ocfs2_dinode *) (*bh)->b_data;
/* This is a good chance to make sure we're not
* locking an invalid object. ocfs2_read_inode_block()
* already checked that the inode block is sane.
*
* We bug on a stale inode here because we checked
* above whether it was wiped from disk. The wiping
* node provides a guarantee that we receive that
* message and can mark the inode before dropping any
* locks associated with it. */
mlog_bug_on_msg(inode->i_generation !=
le32_to_cpu(fe->i_generation),
"Invalid dinode %llu disk generation: %u "
"inode->i_generation: %u\n",
(unsigned long long)oi->ip_blkno,
le32_to_cpu(fe->i_generation),
inode->i_generation);
mlog_bug_on_msg(le64_to_cpu(fe->i_dtime) ||
!(fe->i_flags & cpu_to_le32(OCFS2_VALID_FL)),
"Stale dinode %llu dtime: %llu flags: 0x%x\n",
(unsigned long long)oi->ip_blkno,
(unsigned long long)le64_to_cpu(fe->i_dtime),
le32_to_cpu(fe->i_flags));
ocfs2_refresh_inode(inode, fe);
ocfs2_track_lock_refresh(lockres);
}
status = 0;
bail_refresh:
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
mlog_exit(status);
return status;
}
static int ocfs2_assign_bh(struct inode *inode,
struct buffer_head **ret_bh,
struct buffer_head *passed_bh)
{
int status;
if (passed_bh) {
/* Ok, the update went to disk for us, use the
* returned bh. */
*ret_bh = passed_bh;
get_bh(*ret_bh);
return 0;
}
status = ocfs2_read_inode_block(inode, ret_bh);
if (status < 0)
mlog_errno(status);
return status;
}
/*
* returns < 0 error if the callback will never be called, otherwise
* the result of the lock will be communicated via the callback.
*/
int ocfs2_inode_lock_full_nested(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
int arg_flags,
int subclass)
{
int status, level, acquired;
u32 dlm_flags;
struct ocfs2_lock_res *lockres = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *local_bh = NULL;
BUG_ON(!inode);
mlog_entry_void();
mlog(0, "inode %llu, take %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
status = 0;
acquired = 0;
/* We'll allow faking a readonly metadata lock for
* rodevices. */
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
goto bail;
}
if (ocfs2_mount_local(osb))
goto local;
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
lockres = &OCFS2_I(inode)->ip_inode_lockres;
level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
dlm_flags = 0;
if (arg_flags & OCFS2_META_LOCK_NOQUEUE)
dlm_flags |= DLM_LKF_NOQUEUE;
status = __ocfs2_cluster_lock(osb, lockres, level, dlm_flags,
arg_flags, subclass, _RET_IP_);
if (status < 0) {
if (status != -EAGAIN && status != -EIOCBRETRY)
mlog_errno(status);
goto bail;
}
/* Notify the error cleanup path to drop the cluster lock. */
acquired = 1;
/* We wait twice because a node may have died while we were in
* the lower dlm layers. The second time though, we've
* committed to owning this lock so we don't allow signals to
* abort the operation. */
if (!(arg_flags & OCFS2_META_LOCK_RECOVERY))
ocfs2_wait_for_recovery(osb);
local:
/*
* We only see this flag if we're being called from
* ocfs2_read_locked_inode(). It means we're locking an inode
* which hasn't been populated yet, so clear the refresh flag
* and let the caller handle it.
*/
if (inode->i_state & I_NEW) {
status = 0;
if (lockres)
ocfs2_complete_lock_res_refresh(lockres, 0);
goto bail;
}
/* This is fun. The caller may want a bh back, or it may
* not. ocfs2_inode_lock_update definitely wants one in, but
* may or may not read one, depending on what's in the
* LVB. The result of all of this is that we've *only* gone to
* disk if we have to, so the complexity is worthwhile. */
status = ocfs2_inode_lock_update(inode, &local_bh);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
goto bail;
}
if (ret_bh) {
status = ocfs2_assign_bh(inode, ret_bh, local_bh);
if (status < 0) {
mlog_errno(status);
goto bail;
}
}
bail:
if (status < 0) {
if (ret_bh && (*ret_bh)) {
brelse(*ret_bh);
*ret_bh = NULL;
}
if (acquired)
ocfs2_inode_unlock(inode, ex);
}
if (local_bh)
brelse(local_bh);
mlog_exit(status);
return status;
}
/*
* This is working around a lock inversion between tasks acquiring DLM
* locks while holding a page lock and the downconvert thread which
* blocks dlm lock acquiry while acquiring page locks.
*
* ** These _with_page variantes are only intended to be called from aop
* methods that hold page locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. **
*
* The DLM is called such that it returns -EAGAIN if it would have
* blocked waiting for the downconvert thread. In that case we unlock
* our page so the downconvert thread can make progress. Once we've
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
* that called us can bubble that back up into the VFS who will then
* immediately retry the aop call.
*
* We do a blocking lock and immediate unlock before returning, though, so that
* the lock has a great chance of being cached on this node by the time the VFS
* calls back to retry the aop. This has a potential to livelock as nodes
* ping locks back and forth, but that's a risk we're willing to take to avoid
* the lock inversion simply.
*/
int ocfs2_inode_lock_with_page(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct page *page)
{
int ret;
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
if (ret == -EAGAIN) {
unlock_page(page);
if (ocfs2_inode_lock(inode, ret_bh, ex) == 0)
ocfs2_inode_unlock(inode, ex);
ret = AOP_TRUNCATED_PAGE;
}
return ret;
}
int ocfs2_inode_lock_atime(struct inode *inode,
struct vfsmount *vfsmnt,
int *level)
{
int ret;
mlog_entry_void();
ret = ocfs2_inode_lock(inode, NULL, 0);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
/*
* If we should update atime, we will get EX lock,
* otherwise we just get PR lock.
*/
if (ocfs2_should_update_atime(inode, vfsmnt)) {
struct buffer_head *bh = NULL;
ocfs2_inode_unlock(inode, 0);
ret = ocfs2_inode_lock(inode, &bh, 1);
if (ret < 0) {
mlog_errno(ret);
return ret;
}
*level = 1;
if (ocfs2_should_update_atime(inode, vfsmnt))
ocfs2_update_inode_atime(inode, bh);
if (bh)
brelse(bh);
} else
*level = 0;
mlog_exit(ret);
return ret;
}
void ocfs2_inode_unlock(struct inode *inode,
int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &OCFS2_I(inode)->ip_inode_lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
mlog_entry_void();
mlog(0, "inode %llu drop %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
if (!ocfs2_is_hard_readonly(OCFS2_SB(inode->i_sb)) &&
!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(OCFS2_SB(inode->i_sb), lockres, level);
mlog_exit_void();
}
int ocfs2_orphan_scan_lock(struct ocfs2_super *osb, u32 *seqno)
{
struct ocfs2_lock_res *lockres;
struct ocfs2_orphan_scan_lvb *lvb;
int status = 0;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
lockres = &osb->osb_orphan_scan.os_lockres;
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
if (status < 0)
return status;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
lvb->lvb_version == OCFS2_ORPHAN_LVB_VERSION)
*seqno = be32_to_cpu(lvb->lvb_os_seqno);
else
*seqno = osb->osb_orphan_scan.os_seqno + 1;
return status;
}
void ocfs2_orphan_scan_unlock(struct ocfs2_super *osb, u32 seqno)
{
struct ocfs2_lock_res *lockres;
struct ocfs2_orphan_scan_lvb *lvb;
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb)) {
lockres = &osb->osb_orphan_scan.os_lockres;
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_ORPHAN_LVB_VERSION;
lvb->lvb_os_seqno = cpu_to_be32(seqno);
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
}
}
int ocfs2_super_lock(struct ocfs2_super *osb,
int ex)
{
int status = 0;
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
mlog_entry_void();
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
goto bail;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* The super block lock path is really in the best position to
* know when resources covered by the lock need to be
* refreshed, so we do it here. Of course, making sense of
* everything is up to the caller :) */
status = ocfs2_should_refresh_lock_res(lockres);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (status) {
status = ocfs2_refresh_slot_info(osb);
ocfs2_complete_lock_res_refresh(lockres, status);
if (status < 0)
mlog_errno(status);
ocfs2_track_lock_refresh(lockres);
}
bail:
mlog_exit(status);
return status;
}
void ocfs2_super_unlock(struct ocfs2_super *osb,
int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &osb->osb_super_lockres;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
int ocfs2_rename_lock(struct ocfs2_super *osb)
{
int status;
struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, DLM_LOCK_EX, 0, 0);
if (status < 0)
mlog_errno(status);
return status;
}
void ocfs2_rename_unlock(struct ocfs2_super *osb)
{
struct ocfs2_lock_res *lockres = &osb->osb_rename_lockres;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, DLM_LOCK_EX);
}
int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex)
{
int status;
struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE,
0, 0);
if (status < 0)
mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status);
return status;
}
void ocfs2_nfs_sync_unlock(struct ocfs2_super *osb, int ex)
{
struct ocfs2_lock_res *lockres = &osb->osb_nfs_sync_lockres;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres,
ex ? LKM_EXMODE : LKM_PRMODE);
}
int ocfs2_dentry_lock(struct dentry *dentry, int ex)
{
int ret;
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
BUG_ON(!dl);
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
ret = ocfs2_cluster_lock(osb, &dl->dl_lockres, level, 0, 0);
if (ret < 0)
mlog_errno(ret);
return ret;
}
void ocfs2_dentry_unlock(struct dentry *dentry, int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb);
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, &dl->dl_lockres, level);
}
/* Reference counting of the dlm debug structure. We want this because
* open references on the debug inodes can live on after a mount, so
* we can't rely on the ocfs2_super to always exist. */
static void ocfs2_dlm_debug_free(struct kref *kref)
{
struct ocfs2_dlm_debug *dlm_debug;
dlm_debug = container_of(kref, struct ocfs2_dlm_debug, d_refcnt);
kfree(dlm_debug);
}
void ocfs2_put_dlm_debug(struct ocfs2_dlm_debug *dlm_debug)
{
if (dlm_debug)
kref_put(&dlm_debug->d_refcnt, ocfs2_dlm_debug_free);
}
static void ocfs2_get_dlm_debug(struct ocfs2_dlm_debug *debug)
{
kref_get(&debug->d_refcnt);
}
struct ocfs2_dlm_debug *ocfs2_new_dlm_debug(void)
{
struct ocfs2_dlm_debug *dlm_debug;
dlm_debug = kmalloc(sizeof(struct ocfs2_dlm_debug), GFP_KERNEL);
if (!dlm_debug) {
mlog_errno(-ENOMEM);
goto out;
}
kref_init(&dlm_debug->d_refcnt);
INIT_LIST_HEAD(&dlm_debug->d_lockres_tracking);
dlm_debug->d_locking_state = NULL;
out:
return dlm_debug;
}
/* Access to this is arbitrated for us via seq_file->sem. */
struct ocfs2_dlm_seq_priv {
struct ocfs2_dlm_debug *p_dlm_debug;
struct ocfs2_lock_res p_iter_res;
struct ocfs2_lock_res p_tmp_res;
};
static struct ocfs2_lock_res *ocfs2_dlm_next_res(struct ocfs2_lock_res *start,
struct ocfs2_dlm_seq_priv *priv)
{
struct ocfs2_lock_res *iter, *ret = NULL;
struct ocfs2_dlm_debug *dlm_debug = priv->p_dlm_debug;
assert_spin_locked(&ocfs2_dlm_tracking_lock);
list_for_each_entry(iter, &start->l_debug_list, l_debug_list) {
/* discover the head of the list */
if (&iter->l_debug_list == &dlm_debug->d_lockres_tracking) {
mlog(0, "End of list found, %p\n", ret);
break;
}
/* We track our "dummy" iteration lockres' by a NULL
* l_ops field. */
if (iter->l_ops != NULL) {
ret = iter;
break;
}
}
return ret;
}
static void *ocfs2_dlm_seq_start(struct seq_file *m, loff_t *pos)
{
struct ocfs2_dlm_seq_priv *priv = m->private;
struct ocfs2_lock_res *iter;
spin_lock(&ocfs2_dlm_tracking_lock);
iter = ocfs2_dlm_next_res(&priv->p_iter_res, priv);
if (iter) {
/* Since lockres' have the lifetime of their container
* (which can be inodes, ocfs2_supers, etc) we want to
* copy this out to a temporary lockres while still
* under the spinlock. Obviously after this we can't
* trust any pointers on the copy returned, but that's
* ok as the information we want isn't typically held
* in them. */
priv->p_tmp_res = *iter;
iter = &priv->p_tmp_res;
}
spin_unlock(&ocfs2_dlm_tracking_lock);
return iter;
}
static void ocfs2_dlm_seq_stop(struct seq_file *m, void *v)
{
}
static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ocfs2_dlm_seq_priv *priv = m->private;
struct ocfs2_lock_res *iter = v;
struct ocfs2_lock_res *dummy = &priv->p_iter_res;
spin_lock(&ocfs2_dlm_tracking_lock);
iter = ocfs2_dlm_next_res(iter, priv);
list_del_init(&dummy->l_debug_list);
if (iter) {
list_add(&dummy->l_debug_list, &iter->l_debug_list);
priv->p_tmp_res = *iter;
iter = &priv->p_tmp_res;
}
spin_unlock(&ocfs2_dlm_tracking_lock);
return iter;
}
/* So that debugfs.ocfs2 can determine which format is being used */
#define OCFS2_DLM_DEBUG_STR_VERSION 2
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
char *lvb;
struct ocfs2_lock_res *lockres = v;
if (!lockres)
return -EINVAL;
seq_printf(m, "0x%x\t", OCFS2_DLM_DEBUG_STR_VERSION);
if (lockres->l_type == OCFS2_LOCK_TYPE_DENTRY)
seq_printf(m, "%.*s%08x\t", OCFS2_DENTRY_LOCK_INO_START - 1,
lockres->l_name,
(unsigned int)ocfs2_get_dentry_lock_ino(lockres));
else
seq_printf(m, "%.*s\t", OCFS2_LOCK_ID_MAX_LEN, lockres->l_name);
seq_printf(m, "%d\t"
"0x%lx\t"
"0x%x\t"
"0x%x\t"
"%u\t"
"%u\t"
"%d\t"
"%d\t",
lockres->l_level,
lockres->l_flags,
lockres->l_action,
lockres->l_unlock_action,
lockres->l_ro_holders,
lockres->l_ex_holders,
lockres->l_requested,
lockres->l_blocking);
/* Dump the raw LVB */
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
for(i = 0; i < DLM_LVB_LEN; i++)
seq_printf(m, "0x%x\t", lvb[i]);
#ifdef CONFIG_OCFS2_FS_STATS
# define lock_num_prmode(_l) (_l)->l_lock_num_prmode
# define lock_num_exmode(_l) (_l)->l_lock_num_exmode
# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
# define lock_total_prmode(_l) (_l)->l_lock_total_prmode
# define lock_total_exmode(_l) (_l)->l_lock_total_exmode
# define lock_max_prmode(_l) (_l)->l_lock_max_prmode
# define lock_max_exmode(_l) (_l)->l_lock_max_exmode
# define lock_refresh(_l) (_l)->l_lock_refresh
#else
# define lock_num_prmode(_l) (0ULL)
# define lock_num_exmode(_l) (0ULL)
# define lock_num_prmode_failed(_l) (0)
# define lock_num_exmode_failed(_l) (0)
# define lock_total_prmode(_l) (0ULL)
# define lock_total_exmode(_l) (0ULL)
# define lock_max_prmode(_l) (0)
# define lock_max_exmode(_l) (0)
# define lock_refresh(_l) (0)
#endif
/* The following seq_print was added in version 2 of this output */
seq_printf(m, "%llu\t"
"%llu\t"
"%u\t"
"%u\t"
"%llu\t"
"%llu\t"
"%u\t"
"%u\t"
"%u\t",
lock_num_prmode(lockres),
lock_num_exmode(lockres),
lock_num_prmode_failed(lockres),
lock_num_exmode_failed(lockres),
lock_total_prmode(lockres),
lock_total_exmode(lockres),
lock_max_prmode(lockres),
lock_max_exmode(lockres),
lock_refresh(lockres));
/* End the line */
seq_printf(m, "\n");
return 0;
}
static const struct seq_operations ocfs2_dlm_seq_ops = {
.start = ocfs2_dlm_seq_start,
.stop = ocfs2_dlm_seq_stop,
.next = ocfs2_dlm_seq_next,
.show = ocfs2_dlm_seq_show,
};
static int ocfs2_dlm_debug_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = (struct seq_file *) file->private_data;
struct ocfs2_dlm_seq_priv *priv = seq->private;
struct ocfs2_lock_res *res = &priv->p_iter_res;
ocfs2_remove_lockres_tracking(res);
ocfs2_put_dlm_debug(priv->p_dlm_debug);
return seq_release_private(inode, file);
}
static int ocfs2_dlm_debug_open(struct inode *inode, struct file *file)
{
int ret;
struct ocfs2_dlm_seq_priv *priv;
struct seq_file *seq;
struct ocfs2_super *osb;
priv = kzalloc(sizeof(struct ocfs2_dlm_seq_priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
}
osb = inode->i_private;
ocfs2_get_dlm_debug(osb->osb_dlm_debug);
priv->p_dlm_debug = osb->osb_dlm_debug;
INIT_LIST_HEAD(&priv->p_iter_res.l_debug_list);
ret = seq_open(file, &ocfs2_dlm_seq_ops);
if (ret) {
kfree(priv);
mlog_errno(ret);
goto out;
}
seq = (struct seq_file *) file->private_data;
seq->private = priv;
ocfs2_add_lockres_tracking(&priv->p_iter_res,
priv->p_dlm_debug);
out:
return ret;
}
static const struct file_operations ocfs2_dlm_debug_fops = {
.open = ocfs2_dlm_debug_open,
.release = ocfs2_dlm_debug_release,
.read = seq_read,
.llseek = seq_lseek,
};
static int ocfs2_dlm_init_debug(struct ocfs2_super *osb)
{
int ret = 0;
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
dlm_debug->d_locking_state = debugfs_create_file("locking_state",
S_IFREG|S_IRUSR,
osb->osb_debug_root,
osb,
&ocfs2_dlm_debug_fops);
if (!dlm_debug->d_locking_state) {
ret = -EINVAL;
mlog(ML_ERROR,
"Unable to create locking state debugfs file.\n");
goto out;
}
ocfs2_get_dlm_debug(dlm_debug);
out:
return ret;
}
static void ocfs2_dlm_shutdown_debug(struct ocfs2_super *osb)
{
struct ocfs2_dlm_debug *dlm_debug = osb->osb_dlm_debug;
if (dlm_debug) {
debugfs_remove(dlm_debug->d_locking_state);
ocfs2_put_dlm_debug(dlm_debug);
}
}
int ocfs2_dlm_init(struct ocfs2_super *osb)
{
int status = 0;
struct ocfs2_cluster_connection *conn = NULL;
mlog_entry_void();
if (ocfs2_mount_local(osb)) {
osb->node_num = 0;
goto local;
}
status = ocfs2_dlm_init_debug(osb);
if (status < 0) {
mlog_errno(status);
goto bail;
}
/* launch downconvert thread */
osb->dc_task = kthread_run(ocfs2_downconvert_thread, osb, "ocfs2dc");
if (IS_ERR(osb->dc_task)) {
status = PTR_ERR(osb->dc_task);
osb->dc_task = NULL;
mlog_errno(status);
goto bail;
}
/* for now, uuid == domain */
status = ocfs2_cluster_connect(osb->osb_cluster_stack,
osb->uuid_str,
strlen(osb->uuid_str),
&lproto, ocfs2_do_node_down, osb,
&conn);
if (status) {
mlog_errno(status);
goto bail;
}
status = ocfs2_cluster_this_node(&osb->node_num);
if (status < 0) {
mlog_errno(status);
mlog(ML_ERROR,
"could not find this host's node number\n");
ocfs2_cluster_disconnect(conn, 0);
goto bail;
}
local:
ocfs2_super_lock_res_init(&osb->osb_super_lockres, osb);
ocfs2_rename_lock_res_init(&osb->osb_rename_lockres, osb);
ocfs2_nfs_sync_lock_res_init(&osb->osb_nfs_sync_lockres, osb);
ocfs2_orphan_scan_lock_res_init(&osb->osb_orphan_scan.os_lockres, osb);
osb->cconn = conn;
status = 0;
bail:
if (status < 0) {
ocfs2_dlm_shutdown_debug(osb);
if (osb->dc_task)
kthread_stop(osb->dc_task);
}
mlog_exit(status);
return status;
}
void ocfs2_dlm_shutdown(struct ocfs2_super *osb,
int hangup_pending)
{
mlog_entry_void();
ocfs2_drop_osb_locks(osb);
/*
* Now that we have dropped all locks and ocfs2_dismount_volume()
* has disabled recovery, the DLM won't be talking to us. It's
* safe to tear things down before disconnecting the cluster.
*/
if (osb->dc_task) {
kthread_stop(osb->dc_task);
osb->dc_task = NULL;
}
ocfs2_lock_res_free(&osb->osb_super_lockres);
ocfs2_lock_res_free(&osb->osb_rename_lockres);
ocfs2_lock_res_free(&osb->osb_nfs_sync_lockres);
ocfs2_lock_res_free(&osb->osb_orphan_scan.os_lockres);
ocfs2_cluster_disconnect(osb->cconn, hangup_pending);
osb->cconn = NULL;
ocfs2_dlm_shutdown_debug(osb);
mlog_exit_void();
}
static int ocfs2_drop_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int ret;
unsigned long flags;
u32 lkm_flags = 0;
/* We didn't get anywhere near actually using this lockres. */
if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED))
goto out;
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
lkm_flags |= DLM_LKF_VALBLK;
spin_lock_irqsave(&lockres->l_lock, flags);
mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_FREEING),
"lockres %s, flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
while (lockres->l_flags & OCFS2_LOCK_BUSY) {
mlog(0, "waiting on busy lock \"%s\": flags = %lx, action = "
"%u, unlock_action = %u\n",
lockres->l_name, lockres->l_flags, lockres->l_action,
lockres->l_unlock_action);
spin_unlock_irqrestore(&lockres->l_lock, flags);
/* XXX: Today we just wait on any busy
* locks... Perhaps we need to cancel converts in the
* future? */
ocfs2_wait_on_busy_lock(lockres);
spin_lock_irqsave(&lockres->l_lock, flags);
}
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
if (lockres->l_flags & OCFS2_LOCK_ATTACHED &&
lockres->l_level == DLM_LOCK_EX &&
!(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
lockres->l_ops->set_lvb(lockres);
}
if (lockres->l_flags & OCFS2_LOCK_BUSY)
mlog(ML_ERROR, "destroying busy lock: \"%s\"\n",
lockres->l_name);
if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
mlog(0, "destroying blocked lock: \"%s\"\n", lockres->l_name);
if (!(lockres->l_flags & OCFS2_LOCK_ATTACHED)) {
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto out;
}
lockres_clear_flags(lockres, OCFS2_LOCK_ATTACHED);
/* make sure we never get here while waiting for an ast to
* fire. */
BUG_ON(lockres->l_action != OCFS2_AST_INVALID);
/* is this necessary? */
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
lockres->l_unlock_action = OCFS2_UNLOCK_DROP_LOCK;
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog(0, "lock %s\n", lockres->l_name);
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb, lkm_flags);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
mlog(ML_ERROR, "lockres flags: %lu\n", lockres->l_flags);
ocfs2_dlm_dump_lksb(&lockres->l_lksb);
BUG();
}
mlog(0, "lock %s, successful return from ocfs2_dlm_unlock\n",
lockres->l_name);
ocfs2_wait_on_busy_lock(lockres);
out:
mlog_exit(0);
return 0;
}
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
* being dequeued from the downconvert thread before we can consider
* it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
{
int status;
struct ocfs2_mask_waiter mw;
unsigned long flags;
ocfs2_init_mask_waiter(&mw);
spin_lock_irqsave(&lockres->l_lock, flags);
lockres->l_flags |= OCFS2_LOCK_FREEING;
while (lockres->l_flags & OCFS2_LOCK_QUEUED) {
lockres_add_mask_waiter(lockres, &mw, OCFS2_LOCK_QUEUED, 0);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog(0, "Waiting on lockres %s\n", lockres->l_name);
status = ocfs2_wait_for_mask(&mw);
if (status)
mlog_errno(status);
spin_lock_irqsave(&lockres->l_lock, flags);
}
spin_unlock_irqrestore(&lockres->l_lock, flags);
}
void ocfs2_simple_drop_lockres(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int ret;
ocfs2_mark_lockres_freeing(lockres);
ret = ocfs2_drop_lock(osb, lockres);
if (ret)
mlog_errno(ret);
}
static void ocfs2_drop_osb_locks(struct ocfs2_super *osb)
{
ocfs2_simple_drop_lockres(osb, &osb->osb_super_lockres);
ocfs2_simple_drop_lockres(osb, &osb->osb_rename_lockres);
ocfs2_simple_drop_lockres(osb, &osb->osb_nfs_sync_lockres);
ocfs2_simple_drop_lockres(osb, &osb->osb_orphan_scan.os_lockres);
}
int ocfs2_drop_inode_locks(struct inode *inode)
{
int status, err;
mlog_entry_void();
/* No need to call ocfs2_mark_lockres_freeing here -
* ocfs2_clear_inode has done it for us. */
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
&OCFS2_I(inode)->ip_open_lockres);
if (err < 0)
mlog_errno(err);
status = err;
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
&OCFS2_I(inode)->ip_inode_lockres);
if (err < 0)
mlog_errno(err);
if (err < 0 && !status)
status = err;
err = ocfs2_drop_lock(OCFS2_SB(inode->i_sb),
&OCFS2_I(inode)->ip_rw_lockres);
if (err < 0)
mlog_errno(err);
if (err < 0 && !status)
status = err;
mlog_exit(status);
return status;
}
static unsigned int ocfs2_prepare_downconvert(struct ocfs2_lock_res *lockres,
int new_level)
{
assert_spin_locked(&lockres->l_lock);
BUG_ON(lockres->l_blocking <= DLM_LOCK_NL);
if (lockres->l_level <= new_level) {
mlog(ML_ERROR, "lockres %s, lvl %d <= %d, blcklst %d, mask %d, "
"type %d, flags 0x%lx, hold %d %d, act %d %d, req %d, "
"block %d, pgen %d\n", lockres->l_name, lockres->l_level,
new_level, list_empty(&lockres->l_blocked_list),
list_empty(&lockres->l_mask_waiters), lockres->l_type,
lockres->l_flags, lockres->l_ro_holders,
lockres->l_ex_holders, lockres->l_action,
lockres->l_unlock_action, lockres->l_requested,
lockres->l_blocking, lockres->l_pending_gen);
BUG();
}
mlog(ML_BASTS, "lockres %s, level %d => %d, blocking %d\n",
lockres->l_name, lockres->l_level, new_level, lockres->l_blocking);
lockres->l_action = OCFS2_AST_DOWNCONVERT;
lockres->l_requested = new_level;
lockres_or_flags(lockres, OCFS2_LOCK_BUSY);
return lockres_set_pending(lockres);
}
static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
int new_level,
int lvb,
unsigned int generation)
{
int ret;
u32 dlm_flags = DLM_LKF_CONVERT;
mlog_entry_void();
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
ret = ocfs2_dlm_lock(osb->cconn,
new_level,
&lockres->l_lksb,
dlm_flags,
lockres->l_name,
OCFS2_LOCK_ID_MAX_LEN - 1);
lockres_clear_pending(lockres, generation, osb);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_lock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 1);
goto bail;
}
ret = 0;
bail:
mlog_exit(ret);
return ret;
}
/* returns 1 when the caller should unlock and call ocfs2_dlm_unlock */
static int ocfs2_prepare_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
assert_spin_locked(&lockres->l_lock);
mlog_entry_void();
if (lockres->l_unlock_action == OCFS2_UNLOCK_CANCEL_CONVERT) {
/* If we're already trying to cancel a lock conversion
* then just drop the spinlock and allow the caller to
* requeue this lock. */
mlog(ML_BASTS, "lockres %s, skip convert\n", lockres->l_name);
return 0;
}
/* were we in a convert when we got the bast fire? */
BUG_ON(lockres->l_action != OCFS2_AST_CONVERT &&
lockres->l_action != OCFS2_AST_DOWNCONVERT);
/* set things up for the unlockast to know to just
* clear out the ast_action and unset busy, etc. */
lockres->l_unlock_action = OCFS2_UNLOCK_CANCEL_CONVERT;
mlog_bug_on_msg(!(lockres->l_flags & OCFS2_LOCK_BUSY),
"lock %s, invalid flags: 0x%lx\n",
lockres->l_name, lockres->l_flags);
mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
return 1;
}
static int ocfs2_cancel_convert(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int ret;
mlog_entry_void();
ret = ocfs2_dlm_unlock(osb->cconn, &lockres->l_lksb,
DLM_LKF_CANCEL);
if (ret) {
ocfs2_log_dlm_error("ocfs2_dlm_unlock", ret, lockres);
ocfs2_recover_from_dlm_error(lockres, 0);
}
mlog(ML_BASTS, "lockres %s\n", lockres->l_name);
mlog_exit(ret);
return ret;
}
static int ocfs2_unblock_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres,
struct ocfs2_unblock_ctl *ctl)
{
unsigned long flags;
int blocking;
int new_level;
int level;
int ret = 0;
int set_lvb = 0;
unsigned int gen;
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
recheck:
/*
* Is it still blocking? If not, we have no more work to do.
*/
if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = 0;
goto leave;
}
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
/* XXX
* This is a *big* race. The OCFS2_LOCK_PENDING flag
* exists entirely for one reason - another thread has set
* OCFS2_LOCK_BUSY, but has *NOT* yet called dlm_lock().
*
* If we do ocfs2_cancel_convert() before the other thread
* calls dlm_lock(), our cancel will do nothing. We will
* get no ast, and we will have no way of knowing the
* cancel failed. Meanwhile, the other thread will call
* into dlm_lock() and wait...forever.
*
* Why forever? Because another node has asked for the
* lock first; that's why we're here in unblock_lock().
*
* The solution is OCFS2_LOCK_PENDING. When PENDING is
* set, we just requeue the unblock. Only when the other
* thread has called dlm_lock() and cleared PENDING will
* we then cancel their request.
*
* All callers of dlm_lock() must set OCFS2_DLM_PENDING
* at the same time they set OCFS2_DLM_BUSY. They must
* clear OCFS2_DLM_PENDING after dlm_lock() returns.
*/
if (lockres->l_flags & OCFS2_LOCK_PENDING) {
mlog(ML_BASTS, "lockres %s, ReQ: Pending\n",
lockres->l_name);
goto leave_requeue;
}
ctl->requeue = 1;
ret = ocfs2_prepare_cancel_convert(osb, lockres);
spin_unlock_irqrestore(&lockres->l_lock, flags);
if (ret) {
ret = ocfs2_cancel_convert(osb, lockres);
if (ret < 0)
mlog_errno(ret);
}
goto leave;
}
/*
* This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
* set when the ast is received for an upconvert just before the
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
* on the heels of the ast, we want to delay the downconvert just
* enough to allow the up requestor to do its task. Because this
* lock is in the blocked queue, the lock will be downconverted
* as soon as the requestor is done with the lock.
*/
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
goto leave_requeue;
/*
* How can we block and yet be at NL? We were trying to upconvert
* from NL and got canceled. The code comes back here, and now
* we notice and clear BLOCKING.
*/
if (lockres->l_level == DLM_LOCK_NL) {
BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
mlog(ML_BASTS, "lockres %s, Aborting dc\n", lockres->l_name);
lockres->l_blocking = DLM_LOCK_NL;
lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
spin_unlock_irqrestore(&lockres->l_lock, flags);
goto leave;
}
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
&& (lockres->l_ex_holders || lockres->l_ro_holders)) {
mlog(ML_BASTS, "lockres %s, ReQ: EX/PR Holders %u,%u\n",
lockres->l_name, lockres->l_ex_holders,
lockres->l_ro_holders);
goto leave_requeue;
}
/* If it's a PR we're blocking, then only
* requeue if we've got any EX holders */
if (lockres->l_blocking == DLM_LOCK_PR &&
lockres->l_ex_holders) {
mlog(ML_BASTS, "lockres %s, ReQ: EX Holders %u\n",
lockres->l_name, lockres->l_ex_holders);
goto leave_requeue;
}
/*
* Can we get a lock in this state if the holder counts are
* zero? The meta data unblock code used to check this.
*/
if ((lockres->l_ops->flags & LOCK_TYPE_REQUIRES_REFRESH)
&& (lockres->l_flags & OCFS2_LOCK_REFRESHING)) {
mlog(ML_BASTS, "lockres %s, ReQ: Lock Refreshing\n",
lockres->l_name);
goto leave_requeue;
}
new_level = ocfs2_highest_compat_lock_level(lockres->l_blocking);
if (lockres->l_ops->check_downconvert
&& !lockres->l_ops->check_downconvert(lockres, new_level)) {
mlog(ML_BASTS, "lockres %s, ReQ: Checkpointing\n",
lockres->l_name);
goto leave_requeue;
}
/* If we get here, then we know that there are no more
* incompatible holders (and anyone asking for an incompatible
* lock is blocked). We can now downconvert the lock */
if (!lockres->l_ops->downconvert_worker)
goto downconvert;
/* Some lockres types want to do a bit of work before
* downconverting a lock. Allow that here. The worker function
* may sleep, so we save off a copy of what we're blocking as
* it may change while we're not holding the spin lock. */
blocking = lockres->l_blocking;
level = lockres->l_level;
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
if (ctl->unblock_action == UNBLOCK_STOP_POST) {
mlog(ML_BASTS, "lockres %s, UNBLOCK_STOP_POST\n",
lockres->l_name);
goto leave;
}
spin_lock_irqsave(&lockres->l_lock, flags);
if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
mlog(ML_BASTS, "lockres %s, block=%d:%d, level=%d:%d, "
"Recheck\n", lockres->l_name, blocking,
lockres->l_blocking, level, lockres->l_level);
goto recheck;
}
downconvert:
ctl->requeue = 0;
if (lockres->l_ops->flags & LOCK_TYPE_USES_LVB) {
if (lockres->l_level == DLM_LOCK_EX)
set_lvb = 1;
/*
* We only set the lvb if the lock has been fully
* refreshed - otherwise we risk setting stale
* data. Otherwise, there's no need to actually clear
* out the lvb here as it's value is still valid.
*/
if (set_lvb && !(lockres->l_flags & OCFS2_LOCK_NEEDS_REFRESH))
lockres->l_ops->set_lvb(lockres);
}
gen = ocfs2_prepare_downconvert(lockres, new_level);
spin_unlock_irqrestore(&lockres->l_lock, flags);
ret = ocfs2_downconvert_lock(osb, lockres, new_level, set_lvb,
gen);
leave:
mlog_exit(ret);
return ret;
leave_requeue:
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->requeue = 1;
mlog_exit(0);
return 0;
}
static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
int blocking)
{
struct inode *inode;
struct address_space *mapping;
inode = ocfs2_lock_res_inode(lockres);
mapping = inode->i_mapping;
if (!S_ISREG(inode->i_mode))
goto out;
/*
* We need this before the filemap_fdatawrite() so that it can
* transfer the dirty bit from the PTE to the
* page. Unfortunately this means that even for EX->PR
* downconverts, we'll lose our mappings and have to build
* them up again.
*/
unmap_mapping_range(mapping, 0, 0, 0);
if (filemap_fdatawrite(mapping)) {
mlog(ML_ERROR, "Could not sync inode %llu for downconvert!",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
}
sync_mapping_buffers(mapping);
if (blocking == DLM_LOCK_EX) {
truncate_inode_pages(mapping, 0);
} else {
/* We only need to wait on the I/O if we're not also
* truncating pages because truncate_inode_pages waits
* for us above. We don't truncate pages if we're
* blocking anything < EXMODE because we want to keep
* them around in that case. */
filemap_fdatawait(mapping);
}
out:
return UNBLOCK_CONTINUE;
}
static int ocfs2_ci_checkpointed(struct ocfs2_caching_info *ci,
struct ocfs2_lock_res *lockres,
int new_level)
{
int checkpointed = ocfs2_ci_fully_checkpointed(ci);
BUG_ON(new_level != DLM_LOCK_NL && new_level != DLM_LOCK_PR);
BUG_ON(lockres->l_level != DLM_LOCK_EX && !checkpointed);
if (checkpointed)
return 1;
ocfs2_start_checkpoint(OCFS2_SB(ocfs2_metadata_cache_get_super(ci)));
return 0;
}
static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres,
int new_level)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
return ocfs2_ci_checkpointed(INODE_CACHE(inode), lockres, new_level);
}
static void ocfs2_set_meta_lvb(struct ocfs2_lock_res *lockres)
{
struct inode *inode = ocfs2_lock_res_inode(lockres);
__ocfs2_stuff_meta_lvb(inode);
}
/*
* Does the final reference drop on our dentry lock. Right now this
* happens in the downconvert thread, but we could choose to simplify the
* dlmglue API and push these off to the ocfs2_wq in the future.
*/
static void ocfs2_dentry_post_unlock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
ocfs2_dentry_lock_put(osb, dl);
}
/*
* d_delete() matching dentries before the lock downconvert.
*
* At this point, any process waiting to destroy the
* dentry_lock due to last ref count is stopped by the
* OCFS2_LOCK_QUEUED flag.
*
* We have two potential problems
*
* 1) If we do the last reference drop on our dentry_lock (via dput)
* we'll wind up in ocfs2_release_dentry_lock(), waiting on
* the downconvert to finish. Instead we take an elevated
* reference and push the drop until after we've completed our
* unblock processing.
*
* 2) There might be another process with a final reference,
* waiting on us to finish processing. If this is the case, we
* detect it and exit out - there's no more dentries anyway.
*/
static int ocfs2_dentry_convert_worker(struct ocfs2_lock_res *lockres,
int blocking)
{
struct ocfs2_dentry_lock *dl = ocfs2_lock_res_dl(lockres);
struct ocfs2_inode_info *oi = OCFS2_I(dl->dl_inode);
struct dentry *dentry;
unsigned long flags;
int extra_ref = 0;
/*
* This node is blocking another node from getting a read
* lock. This happens when we've renamed within a
* directory. We've forced the other nodes to d_delete(), but
* we never actually dropped our lock because it's still
* valid. The downconvert code will retain a PR for this node,
* so there's no further work to do.
*/
if (blocking == DLM_LOCK_PR)
return UNBLOCK_CONTINUE;
/*
* Mark this inode as potentially orphaned. The code in
* ocfs2_delete_inode() will figure out whether it actually
* needs to be freed or not.
*/
spin_lock(&oi->ip_lock);
oi->ip_flags |= OCFS2_INODE_MAYBE_ORPHANED;
spin_unlock(&oi->ip_lock);
/*
* Yuck. We need to make sure however that the check of
* OCFS2_LOCK_FREEING and the extra reference are atomic with
* respect to a reference decrement or the setting of that
* flag.
*/
spin_lock_irqsave(&lockres->l_lock, flags);
spin_lock(&dentry_attach_lock);
if (!(lockres->l_flags & OCFS2_LOCK_FREEING)
&& dl->dl_count) {
dl->dl_count++;
extra_ref = 1;
}
spin_unlock(&dentry_attach_lock);
spin_unlock_irqrestore(&lockres->l_lock, flags);
mlog(0, "extra_ref = %d\n", extra_ref);
/*
* We have a process waiting on us in ocfs2_dentry_iput(),
* which means we can't have any more outstanding
* aliases. There's no need to do any more work.
*/
if (!extra_ref)
return UNBLOCK_CONTINUE;
spin_lock(&dentry_attach_lock);
while (1) {
dentry = ocfs2_find_local_alias(dl->dl_inode,
dl->dl_parent_blkno, 1);
if (!dentry)
break;
spin_unlock(&dentry_attach_lock);
mlog(0, "d_delete(%.*s);\n", dentry->d_name.len,
dentry->d_name.name);
/*
* The following dcache calls may do an
* iput(). Normally we don't want that from the
* downconverting thread, but in this case it's ok
* because the requesting node already has an
* exclusive lock on the inode, so it can't be queued
* for a downconvert.
*/
d_delete(dentry);
dput(dentry);
spin_lock(&dentry_attach_lock);
}
spin_unlock(&dentry_attach_lock);
/*
* If we are the last holder of this dentry lock, there is no
* reason to downconvert so skip straight to the unlock.
*/
if (dl->dl_count == 1)
return UNBLOCK_STOP_POST;
return UNBLOCK_CONTINUE_POST;
}
static int ocfs2_check_refcount_downconvert(struct ocfs2_lock_res *lockres,
int new_level)
{
struct ocfs2_refcount_tree *tree =
ocfs2_lock_res_refcount_tree(lockres);
return ocfs2_ci_checkpointed(&tree->rf_ci, lockres, new_level);
}
static int ocfs2_refcount_convert_worker(struct ocfs2_lock_res *lockres,
int blocking)
{
struct ocfs2_refcount_tree *tree =
ocfs2_lock_res_refcount_tree(lockres);
ocfs2_metadata_cache_purge(&tree->rf_ci);
return UNBLOCK_CONTINUE;
}
static void ocfs2_set_qinfo_lvb(struct ocfs2_lock_res *lockres)
{
struct ocfs2_qinfo_lvb *lvb;
struct ocfs2_mem_dqinfo *oinfo = ocfs2_lock_res_qinfo(lockres);
struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
oinfo->dqi_gi.dqi_type);
mlog_entry_void();
lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
lvb->lvb_version = OCFS2_QINFO_LVB_VERSION;
lvb->lvb_bgrace = cpu_to_be32(info->dqi_bgrace);
lvb->lvb_igrace = cpu_to_be32(info->dqi_igrace);
lvb->lvb_syncms = cpu_to_be32(oinfo->dqi_syncms);
lvb->lvb_blocks = cpu_to_be32(oinfo->dqi_gi.dqi_blocks);
lvb->lvb_free_blk = cpu_to_be32(oinfo->dqi_gi.dqi_free_blk);
lvb->lvb_free_entry = cpu_to_be32(oinfo->dqi_gi.dqi_free_entry);
mlog_exit_void();
}
void ocfs2_qinfo_unlock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
mlog_entry_void();
if (!ocfs2_is_hard_readonly(osb) && !ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
mlog_exit_void();
}
static int ocfs2_refresh_qinfo(struct ocfs2_mem_dqinfo *oinfo)
{
struct mem_dqinfo *info = sb_dqinfo(oinfo->dqi_gi.dqi_sb,
oinfo->dqi_gi.dqi_type);
struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
struct ocfs2_qinfo_lvb *lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
struct buffer_head *bh = NULL;
struct ocfs2_global_disk_dqinfo *gdinfo;
int status = 0;
if (ocfs2_dlm_lvb_valid(&lockres->l_lksb) &&
lvb->lvb_version == OCFS2_QINFO_LVB_VERSION) {
info->dqi_bgrace = be32_to_cpu(lvb->lvb_bgrace);
info->dqi_igrace = be32_to_cpu(lvb->lvb_igrace);
oinfo->dqi_syncms = be32_to_cpu(lvb->lvb_syncms);
oinfo->dqi_gi.dqi_blocks = be32_to_cpu(lvb->lvb_blocks);
oinfo->dqi_gi.dqi_free_blk = be32_to_cpu(lvb->lvb_free_blk);
oinfo->dqi_gi.dqi_free_entry =
be32_to_cpu(lvb->lvb_free_entry);
} else {
status = ocfs2_read_quota_phys_block(oinfo->dqi_gqinode,
oinfo->dqi_giblk, &bh);
if (status) {
mlog_errno(status);
goto bail;
}
gdinfo = (struct ocfs2_global_disk_dqinfo *)
(bh->b_data + OCFS2_GLOBAL_INFO_OFF);
info->dqi_bgrace = le32_to_cpu(gdinfo->dqi_bgrace);
info->dqi_igrace = le32_to_cpu(gdinfo->dqi_igrace);
oinfo->dqi_syncms = le32_to_cpu(gdinfo->dqi_syncms);
oinfo->dqi_gi.dqi_blocks = le32_to_cpu(gdinfo->dqi_blocks);
oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(gdinfo->dqi_free_blk);
oinfo->dqi_gi.dqi_free_entry =
le32_to_cpu(gdinfo->dqi_free_entry);
brelse(bh);
ocfs2_track_lock_refresh(lockres);
}
bail:
return status;
}
/* Lock quota info, this function expects at least shared lock on the quota file
* so that we can safely refresh quota info from disk. */
int ocfs2_qinfo_lock(struct ocfs2_mem_dqinfo *oinfo, int ex)
{
struct ocfs2_lock_res *lockres = &oinfo->dqi_gqlock;
struct ocfs2_super *osb = OCFS2_SB(oinfo->dqi_gi.dqi_sb);
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
int status = 0;
mlog_entry_void();
/* On RO devices, locking really isn't needed... */
if (ocfs2_is_hard_readonly(osb)) {
if (ex)
status = -EROFS;
goto bail;
}
if (ocfs2_mount_local(osb))
goto bail;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0) {
mlog_errno(status);
goto bail;
}
if (!ocfs2_should_refresh_lock_res(lockres))
goto bail;
/* OK, we have the lock but we need to refresh the quota info */
status = ocfs2_refresh_qinfo(oinfo);
if (status)
ocfs2_qinfo_unlock(oinfo, ex);
ocfs2_complete_lock_res_refresh(lockres, status);
bail:
mlog_exit(status);
return status;
}
int ocfs2_refcount_lock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
int status;
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
struct ocfs2_super *osb = lockres->l_priv;
if (ocfs2_is_hard_readonly(osb))
return -EROFS;
if (ocfs2_mount_local(osb))
return 0;
status = ocfs2_cluster_lock(osb, lockres, level, 0, 0);
if (status < 0)
mlog_errno(status);
return status;
}
void ocfs2_refcount_unlock(struct ocfs2_refcount_tree *ref_tree, int ex)
{
int level = ex ? DLM_LOCK_EX : DLM_LOCK_PR;
struct ocfs2_lock_res *lockres = &ref_tree->rf_lockres;
struct ocfs2_super *osb = lockres->l_priv;
if (!ocfs2_mount_local(osb))
ocfs2_cluster_unlock(osb, lockres, level);
}
static void ocfs2_process_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
int status;
struct ocfs2_unblock_ctl ctl = {0, 0,};
unsigned long flags;
/* Our reference to the lockres in this function can be
* considered valid until we remove the OCFS2_LOCK_QUEUED
* flag. */
mlog_entry_void();
BUG_ON(!lockres);
BUG_ON(!lockres->l_ops);
mlog(ML_BASTS, "lockres %s blocked\n", lockres->l_name);
/* Detect whether a lock has been marked as going away while
* the downconvert thread was processing other things. A lock can
* still be marked with OCFS2_LOCK_FREEING after this check,
* but short circuiting here will still save us some
* performance. */
spin_lock_irqsave(&lockres->l_lock, flags);
if (lockres->l_flags & OCFS2_LOCK_FREEING)
goto unqueue;
spin_unlock_irqrestore(&lockres->l_lock, flags);
status = ocfs2_unblock_lock(osb, lockres, &ctl);
if (status < 0)
mlog_errno(status);
spin_lock_irqsave(&lockres->l_lock, flags);
unqueue:
if (lockres->l_flags & OCFS2_LOCK_FREEING || !ctl.requeue) {
lockres_clear_flags(lockres, OCFS2_LOCK_QUEUED);
} else
ocfs2_schedule_blocked_lock(osb, lockres);
mlog(ML_BASTS, "lockres %s, requeue = %s.\n", lockres->l_name,
ctl.requeue ? "yes" : "no");
spin_unlock_irqrestore(&lockres->l_lock, flags);
if (ctl.unblock_action != UNBLOCK_CONTINUE
&& lockres->l_ops->post_unlock)
lockres->l_ops->post_unlock(osb, lockres);
mlog_exit_void();
}
static void ocfs2_schedule_blocked_lock(struct ocfs2_super *osb,
struct ocfs2_lock_res *lockres)
{
mlog_entry_void();
assert_spin_locked(&lockres->l_lock);
if (lockres->l_flags & OCFS2_LOCK_FREEING) {
/* Do not schedule a lock for downconvert when it's on
* the way to destruction - any nodes wanting access
* to the resource will get it soon. */
mlog(ML_BASTS, "lockres %s won't be scheduled: flags 0x%lx\n",
lockres->l_name, lockres->l_flags);
return;
}
lockres_or_flags(lockres, OCFS2_LOCK_QUEUED);
spin_lock(&osb->dc_task_lock);
if (list_empty(&lockres->l_blocked_list)) {
list_add_tail(&lockres->l_blocked_list,
&osb->blocked_lock_list);
osb->blocked_lock_count++;
}
spin_unlock(&osb->dc_task_lock);
mlog_exit_void();
}
static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
{
unsigned long processed;
struct ocfs2_lock_res *lockres;
mlog_entry_void();
spin_lock(&osb->dc_task_lock);
/* grab this early so we know to try again if a state change and
* wake happens part-way through our work */
osb->dc_work_sequence = osb->dc_wake_sequence;
processed = osb->blocked_lock_count;
while (processed) {
BUG_ON(list_empty(&osb->blocked_lock_list));
lockres = list_entry(osb->blocked_lock_list.next,
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
osb->blocked_lock_count--;
spin_unlock(&osb->dc_task_lock);
BUG_ON(!processed);
processed--;
ocfs2_process_blocked_lock(osb, lockres);
spin_lock(&osb->dc_task_lock);
}
spin_unlock(&osb->dc_task_lock);
mlog_exit_void();
}
static int ocfs2_downconvert_thread_lists_empty(struct ocfs2_super *osb)
{
int empty = 0;
spin_lock(&osb->dc_task_lock);
if (list_empty(&osb->blocked_lock_list))
empty = 1;
spin_unlock(&osb->dc_task_lock);
return empty;
}
static int ocfs2_downconvert_thread_should_wake(struct ocfs2_super *osb)
{
int should_wake = 0;
spin_lock(&osb->dc_task_lock);
if (osb->dc_work_sequence != osb->dc_wake_sequence)
should_wake = 1;
spin_unlock(&osb->dc_task_lock);
return should_wake;
}
static int ocfs2_downconvert_thread(void *arg)
{
int status = 0;
struct ocfs2_super *osb = arg;
/* only quit once we've been asked to stop and there is no more
* work available */
while (!(kthread_should_stop() &&
ocfs2_downconvert_thread_lists_empty(osb))) {
wait_event_interruptible(osb->dc_event,
ocfs2_downconvert_thread_should_wake(osb) ||
kthread_should_stop());
mlog(0, "downconvert_thread: awoken\n");
ocfs2_downconvert_thread_do_work(osb);
}
osb->dc_task = NULL;
return status;
}
void ocfs2_wake_downconvert_thread(struct ocfs2_super *osb)
{
spin_lock(&osb->dc_task_lock);
/* make sure the voting thread gets a swipe at whatever changes
* the caller may have made to the voting state */
osb->dc_wake_sequence++;
spin_unlock(&osb->dc_task_lock);
wake_up(&osb->dc_event);
}
|
gpl-2.0
|
bheu/odroid_linux
|
kernel/smp.c
|
761
|
19294
|
/*
* Generic helpers for smp ipi calls
*
* (C) Jens Axboe <jens.axboe@oracle.com> 2008
*/
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/smp.h>
#include <linux/cpu.h>
#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
static struct {
struct list_head queue;
raw_spinlock_t lock;
} call_function __cacheline_aligned_in_smp =
{
.queue = LIST_HEAD_INIT(call_function.queue),
.lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
};
enum {
CSD_FLAG_LOCK = 0x01,
};
struct call_function_data {
struct call_single_data csd;
atomic_t refs;
cpumask_var_t cpumask;
cpumask_var_t cpumask_ipi;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
struct call_single_queue {
struct list_head list;
raw_spinlock_t lock;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
free_cpumask_var(cfd->cpumask);
free_cpumask_var(cfd->cpumask_ipi);
break;
#endif
};
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
.notifier_call = hotplug_cfd,
};
void __init call_function_init(void)
{
void *cpu = (void *)(long)smp_processor_id();
int i;
for_each_possible_cpu(i) {
struct call_single_queue *q = &per_cpu(call_single_queue, i);
raw_spin_lock_init(&q->lock);
INIT_LIST_HEAD(&q->list);
}
hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
register_cpu_notifier(&hotplug_cfd_notifier);
}
/*
* csd_lock/csd_unlock used to serialize access to per-cpu csd resources
*
* For non-synchronous ipi calls the csd can still be in use by the
* previous function call. For multi-cpu calls its even more interesting
* as we'll have to ensure no other cpu is observing our csd.
*/
static void csd_lock_wait(struct call_single_data *data)
{
while (data->flags & CSD_FLAG_LOCK)
cpu_relax();
}
static void csd_lock(struct call_single_data *data)
{
csd_lock_wait(data);
data->flags = CSD_FLAG_LOCK;
/*
* prevent CPU from reordering the above assignment
* to ->flags with any subsequent assignments to other
* fields of the specified call_single_data structure:
*/
smp_mb();
}
static void csd_unlock(struct call_single_data *data)
{
WARN_ON(!(data->flags & CSD_FLAG_LOCK));
/*
* ensure we're all done before releasing data:
*/
smp_mb();
data->flags &= ~CSD_FLAG_LOCK;
}
/*
* Insert a previously allocated call_single_data element
* for execution on the given CPU. data must already have
* ->func, ->info, and ->flags set.
*/
static
void generic_exec_single(int cpu, struct call_single_data *data, int wait)
{
struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
unsigned long flags;
int ipi;
raw_spin_lock_irqsave(&dst->lock, flags);
ipi = list_empty(&dst->list);
list_add_tail(&data->list, &dst->list);
raw_spin_unlock_irqrestore(&dst->lock, flags);
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of
* normal cache coherency rules implied by spinlocks.
*
* If IPIs can go out of order to the cache coherency protocol
* in an architecture, sufficient synchronisation should be added
* to arch code to make it appear to obey cache coherency WRT
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
if (ipi)
arch_send_call_function_single_ipi(cpu);
if (wait)
csd_lock_wait(data);
}
/*
* Invoked by arch to handle an IPI for call function. Must be called with
* interrupts disabled.
*/
void generic_smp_call_function_interrupt(void)
{
struct call_function_data *data;
int cpu = smp_processor_id();
/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
*/
WARN_ON_ONCE(!cpu_online(cpu));
/*
* Ensure entry is visible on call_function_queue after we have
* entered the IPI. See comment in smp_call_function_many.
* If we don't have this, then we may miss an entry on the list
* and never get another IPI to process it.
*/
smp_mb();
/*
* It's ok to use list_for_each_rcu() here even though we may
* delete 'pos', since list_del_rcu() doesn't clear ->next
*/
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
smp_call_func_t func;
/*
* Since we walk the list without any locks, we might
* see an entry that was completed, removed from the
* list and is in the process of being reused.
*
* We must check that the cpu is in the cpumask before
* checking the refs, and both must be set before
* executing the callback on this cpu.
*/
if (!cpumask_test_cpu(cpu, data->cpumask))
continue;
smp_rmb();
if (atomic_read(&data->refs) == 0)
continue;
func = data->csd.func; /* save for later warn */
func(data->csd.info);
/*
* If the cpu mask is not still set then func enabled
* interrupts (BUG), and this cpu took another smp call
* function interrupt and executed func(info) twice
* on this cpu. That nested execution decremented refs.
*/
if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
WARN(1, "%pf enabled interrupts and double executed\n", func);
continue;
}
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
if (refs)
continue;
WARN_ON(!cpumask_empty(data->cpumask));
raw_spin_lock(&call_function.lock);
list_del_rcu(&data->csd.list);
raw_spin_unlock(&call_function.lock);
csd_unlock(&data->csd);
}
}
/*
* Invoked by arch to handle an IPI for call function single. Must be
* called from the arch with interrupts disabled.
*/
void generic_smp_call_function_single_interrupt(void)
{
struct call_single_queue *q = &__get_cpu_var(call_single_queue);
unsigned int data_flags;
LIST_HEAD(list);
/*
* Shouldn't receive this interrupt on a cpu that is not yet online.
*/
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
raw_spin_lock(&q->lock);
list_replace_init(&q->list, &list);
raw_spin_unlock(&q->lock);
while (!list_empty(&list)) {
struct call_single_data *data;
data = list_entry(list.next, struct call_single_data, list);
list_del(&data->list);
/*
* 'data' can be invalid after this call if flags == 0
* (when called through generic_exec_single()),
* so save them away before making the call:
*/
data_flags = data->flags;
data->func(data->info);
/*
* Unlocked CSDs are valid through generic_exec_single():
*/
if (data_flags & CSD_FLAG_LOCK)
csd_unlock(data);
}
}
static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
/*
* smp_call_function_single - Run a function on a specific CPU
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed on other CPUs.
*
* Returns 0 on success, else a negative status code.
*/
int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
int wait)
{
struct call_single_data d = {
.flags = 0,
};
unsigned long flags;
int this_cpu;
int err = 0;
/*
* prevent preemption and reschedule on another processor,
* as well as CPU removal
*/
this_cpu = get_cpu();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress);
if (cpu == this_cpu) {
local_irq_save(flags);
func(info);
local_irq_restore(flags);
} else {
if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
struct call_single_data *data = &d;
if (!wait)
data = &__get_cpu_var(csd_data);
csd_lock(data);
data->func = func;
data->info = info;
generic_exec_single(cpu, data, wait);
} else {
err = -ENXIO; /* CPU not online */
}
}
put_cpu();
return err;
}
EXPORT_SYMBOL(smp_call_function_single);
/*
* smp_call_function_any - Run a function on any of the given cpus
* @mask: The mask of cpus it can run on.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait until function has completed.
*
* Returns 0 on success, else a negative status code (if no cpus were online).
* Note that @wait will be implicitly turned on in case of allocation failures,
* since we fall back to on-stack allocation.
*
* Selection preference:
* 1) current cpu if in @mask
* 2) any cpu of current node if in @mask
* 3) any other online cpu in @mask
*/
int smp_call_function_any(const struct cpumask *mask,
smp_call_func_t func, void *info, int wait)
{
unsigned int cpu;
const struct cpumask *nodemask;
int ret;
/* Try for same CPU (cheapest) */
cpu = get_cpu();
if (cpumask_test_cpu(cpu, mask))
goto call;
/* Try for same node. */
nodemask = cpumask_of_node(cpu_to_node(cpu));
for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
cpu = cpumask_next_and(cpu, nodemask, mask)) {
if (cpu_online(cpu))
goto call;
}
/* Any online will do: smp_call_function_single handles nr_cpu_ids. */
cpu = cpumask_any_and(mask, cpu_online_mask);
call:
ret = smp_call_function_single(cpu, func, info, wait);
put_cpu();
return ret;
}
EXPORT_SYMBOL_GPL(smp_call_function_any);
/**
* __smp_call_function_single(): Run a function on a specific CPU
* @cpu: The CPU to run on.
* @data: Pre-allocated and setup data structure
* @wait: If true, wait until function has completed on specified CPU.
*
* Like smp_call_function_single(), but allow caller to pass in a
* pre-allocated data structure. Useful for embedding @data inside
* other structures, for instance.
*/
void __smp_call_function_single(int cpu, struct call_single_data *data,
int wait)
{
unsigned int this_cpu;
unsigned long flags;
this_cpu = get_cpu();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
&& !oops_in_progress);
if (cpu == this_cpu) {
local_irq_save(flags);
data->func(data->info);
local_irq_restore(flags);
} else {
csd_lock(data);
generic_exec_single(cpu, data, wait);
}
put_cpu();
}
/**
* smp_call_function_many(): Run a function on a set of other CPUs.
* @mask: The set of cpus to run on (only runs on online subset).
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* If @wait is true, then returns once @func has returned.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler. Preemption
* must be disabled when calling this function.
*/
void smp_call_function_many(const struct cpumask *mask,
smp_call_func_t func, void *info, bool wait)
{
struct call_function_data *data;
unsigned long flags;
int refs, cpu, next_cpu, this_cpu = smp_processor_id();
/*
* Can deadlock when called with interrupts disabled.
* We allow cpu's that are not yet online though, as no one else can
* send smp call function interrupt to this cpu and as such deadlocks
* can't happen.
*/
WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
&& !oops_in_progress && !early_boot_irqs_disabled);
/* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
cpu = cpumask_first_and(mask, cpu_online_mask);
if (cpu == this_cpu)
cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
/* No online cpus? We're done. */
if (cpu >= nr_cpu_ids)
return;
/* Do we have another CPU which isn't us? */
next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
if (next_cpu == this_cpu)
next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
/* Fastpath: do that cpu by itself. */
if (next_cpu >= nr_cpu_ids) {
smp_call_function_single(cpu, func, info, wait);
return;
}
data = &__get_cpu_var(cfd_data);
csd_lock(&data->csd);
/* This BUG_ON verifies our reuse assertions and can be removed */
BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
/*
* The global call function queue list add and delete are protected
* by a lock, but the list is traversed without any lock, relying
* on the rcu list add and delete to allow safe concurrent traversal.
* We reuse the call function data without waiting for any grace
* period after some other cpu removes it from the global queue.
* This means a cpu might find our data block as it is being
* filled out.
*
* We hold off the interrupt handler on the other cpu by
* ordering our writes to the cpu mask vs our setting of the
* refs counter. We assert only the cpu owning the data block
* will set a bit in cpumask, and each bit will only be cleared
* by the subject cpu. Each cpu must first find its bit is
* set and then check that refs is set indicating the element is
* ready to be processed, otherwise it must skip the entry.
*
* On the previous iteration refs was set to 0 by another cpu.
* To avoid the use of transitivity, set the counter to 0 here
* so the wmb will pair with the rmb in the interrupt handler.
*/
atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
data->csd.func = func;
data->csd.info = info;
/* Ensure 0 refs is visible before mask. Also orders func and info */
smp_wmb();
/* We rely on the "and" being processed before the store */
cpumask_and(data->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, data->cpumask);
refs = cpumask_weight(data->cpumask);
/* Some callers race with other cpus changing the passed mask */
if (unlikely(!refs)) {
csd_unlock(&data->csd);
return;
}
/*
* After we put an entry into the list, data->cpumask
* may be cleared again when another CPU sends another IPI for
* a SMP function call, so data->cpumask will be zero.
*/
cpumask_copy(data->cpumask_ipi, data->cpumask);
raw_spin_lock_irqsave(&call_function.lock, flags);
/*
* Place entry at the _HEAD_ of the list, so that any cpu still
* observing the entry in generic_smp_call_function_interrupt()
* will not miss any other list entries:
*/
list_add_rcu(&data->csd.list, &call_function.queue);
/*
* We rely on the wmb() in list_add_rcu to complete our writes
* to the cpumask before this write to refs, which indicates
* data is on the list and is ready to be processed.
*/
atomic_set(&data->refs, refs);
raw_spin_unlock_irqrestore(&call_function.lock, flags);
/*
* Make the list addition visible before sending the ipi.
* (IPIs must obey or appear to obey normal Linux cache
* coherency rules -- see comment in generic_exec_single).
*/
smp_mb();
/* Send a message to all CPUs in the map */
arch_send_call_function_ipi_mask(data->cpumask_ipi);
/* Optionally wait for the CPUs to complete */
if (wait)
csd_lock_wait(&data->csd);
}
EXPORT_SYMBOL(smp_call_function_many);
/**
* smp_call_function(): Run a function on all other CPUs.
* @func: The function to run. This must be fast and non-blocking.
* @info: An arbitrary pointer to pass to the function.
* @wait: If true, wait (atomically) until function has completed
* on other CPUs.
*
* Returns 0.
*
* If @wait is true, then returns once @func has returned; otherwise
* it returns just before the target cpu calls @func.
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler or from a bottom half handler.
*/
int smp_call_function(smp_call_func_t func, void *info, int wait)
{
preempt_disable();
smp_call_function_many(cpu_online_mask, func, info, wait);
preempt_enable();
return 0;
}
EXPORT_SYMBOL(smp_call_function);
void ipi_call_lock(void)
{
raw_spin_lock(&call_function.lock);
}
void ipi_call_unlock(void)
{
raw_spin_unlock(&call_function.lock);
}
void ipi_call_lock_irq(void)
{
raw_spin_lock_irq(&call_function.lock);
}
void ipi_call_unlock_irq(void)
{
raw_spin_unlock_irq(&call_function.lock);
}
#endif /* USE_GENERIC_SMP_HELPERS */
/* Setup configured maximum number of CPUs to activate */
unsigned int setup_max_cpus = NR_CPUS;
EXPORT_SYMBOL(setup_max_cpus);
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
void __weak arch_disable_smp_support(void) { }
static int __init nosmp(char *str)
{
setup_max_cpus = 0;
arch_disable_smp_support();
return 0;
}
early_param("nosmp", nosmp);
/* this is hard limit */
static int __init nrcpus(char *str)
{
int nr_cpus;
get_option(&str, &nr_cpus);
if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
nr_cpu_ids = nr_cpus;
return 0;
}
early_param("nr_cpus", nrcpus);
static int __init maxcpus(char *str)
{
get_option(&str, &setup_max_cpus);
if (setup_max_cpus == 0)
arch_disable_smp_support();
return 0;
}
early_param("maxcpus", maxcpus);
/* Setup number of possible processor ids */
int nr_cpu_ids __read_mostly = NR_CPUS;
EXPORT_SYMBOL(nr_cpu_ids);
/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
void __init setup_nr_cpu_ids(void)
{
nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
}
/* Called by boot processor to activate the rest. */
void __init smp_init(void)
{
unsigned int cpu;
/* FIXME: This should be done in userspace --RR */
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
break;
if (!cpu_online(cpu))
cpu_up(cpu);
}
/* Any cleanup work */
printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
smp_cpus_done(setup_max_cpus);
}
/*
* Call a function on all processors. May be used during early boot while
* early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
* of local_irq_disable/enable().
*/
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
unsigned long flags;
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, wait);
local_irq_save(flags);
func(info);
local_irq_restore(flags);
preempt_enable();
return ret;
}
EXPORT_SYMBOL(on_each_cpu);
|
gpl-2.0
|
humberos/android_kernel_sony_msm8994
|
drivers/gpu/drm/radeon/radeon_irq_kms.c
|
1529
|
13048
|
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
#define RADEON_WAIT_IDLE_TIMEOUT 200
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
* @DRM_IRQ_ARGS: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct radeon_device *rdev = dev->dev_private;
return radeon_irq_process(rdev);
}
/*
* Handle hotplug events outside the interrupt handler proper.
*/
/**
* radeon_hotplug_work_func - display hotplug work handler
*
* @work: work struct
*
* This is the hot plug event work handler (all asics).
* The work gets scheduled from the irq handler if there
* was a hot plug interrupt. It walks the connector table
* and calls the hotplug handler for each one, then sends
* a drm hotplug event to alert userspace.
*/
static void radeon_hotplug_work_func(struct work_struct *work)
{
struct radeon_device *rdev = container_of(work, struct radeon_device,
hotplug_work);
struct drm_device *dev = rdev->ddev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
if (mode_config->num_connector) {
list_for_each_entry(connector, &mode_config->connector_list, head)
radeon_connector_hotplug(connector);
}
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Gets the hw ready to enable irqs (all asics).
* This function disables all interrupt sources on the GPU.
*/
void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
unsigned i;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
/**
* radeon_driver_irq_postinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Handles stuff to be done after enabling irqs (all asics).
* Returns 0 on success.
*/
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
dev->max_vblank_count = 0x001fffff;
return 0;
}
/**
* radeon_driver_irq_uninstall_kms - drm irq uninstall callback
*
* @dev: drm dev pointer
*
* This function disables all interrupt sources on the GPU (all asics).
*/
void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
unsigned i;
if (rdev == NULL) {
return;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_msi_ok - asic specific msi checks
*
* @rdev: radeon device pointer
*
* Handles asic specific MSI checks to determine if
* MSIs should be enabled on a particular chip (all asics).
* Returns true if MSIs should be enabled, false if MSIs
* should not be enabled.
*/
static bool radeon_msi_ok(struct radeon_device *rdev)
{
/* RV370/RV380 was first asic with MSI support */
if (rdev->family < CHIP_RV380)
return false;
/* MSIs don't work on AGP */
if (rdev->flags & RADEON_IS_AGP)
return false;
/* force MSI on */
if (radeon_msi == 1)
return true;
else if (radeon_msi == 0)
return false;
/* Quirks */
/* HP RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x103c) &&
(rdev->pdev->subsystem_device == 0x30c2))
return true;
/* Dell RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x1028) &&
(rdev->pdev->subsystem_device == 0x01fc))
return true;
/* Dell RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x1028) &&
(rdev->pdev->subsystem_device == 0x01fd))
return true;
/* Gateway RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x107b) &&
(rdev->pdev->subsystem_device == 0x0185))
return true;
/* try and enable MSIs by default on all RS690s */
if (rdev->family == CHIP_RS690)
return true;
/* RV515 seems to have MSI issues where it loses
* MSI rearms occasionally. This leads to lockups and freezes.
* disable it by default.
*/
if (rdev->family == CHIP_RV515)
return false;
if (rdev->flags & RADEON_IS_IGP) {
/* APUs work fine with MSIs */
if (rdev->family >= CHIP_PALM)
return true;
/* lots of IGPs have problems with MSIs */
return false;
}
return true;
}
/**
* radeon_irq_kms_init - init driver interrupt info
*
* @rdev: radeon device pointer
*
* Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
* Returns 0 for success, error for failure.
*/
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
spin_lock_init(&rdev->irq.lock);
r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
if (radeon_msi_ok(rdev)) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev);
if (r) {
rdev->irq.installed = false;
return r;
}
INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
/**
* radeon_irq_kms_fini - tear down driver interrupt info
*
* @rdev: radeon device pointer
*
* Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
*/
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
drm_irq_uninstall(rdev->ddev);
rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
flush_work(&rdev->hotplug_work);
}
}
/**
* radeon_irq_kms_sw_irq_get - enable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to enable
*
* Enables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to disable
*
* Disables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_pflip_irq_get - enable pageflip interrupt
*
* @rdev: radeon device pointer
* @crtc: crtc whose interrupt you want to enable
*
* Enables the pageflip interrupt for a specific crtc (all asics).
* For pageflips we use the vblank interrupt source.
*/
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_inc_return(&rdev->irq.pflip[crtc]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_pflip_irq_put - disable pageflip interrupt
*
* @rdev: radeon device pointer
* @crtc: crtc whose interrupt you want to disable
*
* Disables the pageflip interrupt for a specific crtc (all asics).
* For pageflips we use the vblank interrupt source.
*/
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc)
{
unsigned long irqflags;
if (crtc < 0 || crtc >= rdev->num_crtc)
return;
if (!rdev->ddev->irq_enabled)
return;
if (atomic_dec_and_test(&rdev->irq.pflip[crtc])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
}
/**
* radeon_irq_kms_enable_afmt - enable audio format change interrupt
*
* @rdev: radeon device pointer
* @block: afmt block whose interrupt you want to enable
*
* Enables the afmt change interrupt for a specific afmt block (all asics).
*/
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = true;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_irq_kms_disable_afmt - disable audio format change interrupt
*
* @rdev: radeon device pointer
* @block: afmt block whose interrupt you want to disable
*
* Disables the afmt change interrupt for a specific afmt block (all asics).
*/
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block)
{
unsigned long irqflags;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
rdev->irq.afmt[block] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_irq_kms_enable_hpd - enable hotplug detect interrupt
*
* @rdev: radeon device pointer
* @hpd_mask: mask of hpd pins you want to enable.
*
* Enables the hotplug detect interrupt for a specific hpd pin (all asics).
*/
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] |= !!(hpd_mask & (1 << i));
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
/**
* radeon_irq_kms_disable_hpd - disable hotplug detect interrupt
*
* @rdev: radeon device pointer
* @hpd_mask: mask of hpd pins you want to disable.
*
* Disables the hotplug detect interrupt for a specific hpd pin (all asics).
*/
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask)
{
unsigned long irqflags;
int i;
if (!rdev->ddev->irq_enabled)
return;
spin_lock_irqsave(&rdev->irq.lock, irqflags);
for (i = 0; i < RADEON_MAX_HPD_PINS; ++i)
rdev->irq.hpd[i] &= !(hpd_mask & (1 << i));
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
|
gpl-2.0
|
ptmr3/GalaxyS2-GalaxyNote_Kernel
|
arch/arm/mach-at91/leds.c
|
4601
|
3690
|
/*
* LED driver for Atmel AT91-based boards.
*
* Copyright (C) SAN People (Pty) Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <mach/board.h>
#include <mach/gpio.h>
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_NEW_LEDS)
/*
* New cross-platform LED support.
*/
static struct gpio_led_platform_data led_data;
static struct platform_device at91_gpio_leds_device = {
.name = "leds-gpio",
.id = -1,
.dev.platform_data = &led_data,
};
void __init at91_gpio_leds(struct gpio_led *leds, int nr)
{
int i;
if (!nr)
return;
for (i = 0; i < nr; i++)
at91_set_gpio_output(leds[i].gpio, leds[i].active_low);
led_data.leds = leds;
led_data.num_leds = nr;
platform_device_register(&at91_gpio_leds_device);
}
#else
void __init at91_gpio_leds(struct gpio_led *leds, int nr) {}
#endif
/* ------------------------------------------------------------------------- */
#if defined (CONFIG_LEDS_ATMEL_PWM)
/*
* PWM Leds
*/
static struct gpio_led_platform_data pwm_led_data;
static struct platform_device at91_pwm_leds_device = {
.name = "leds-atmel-pwm",
.id = -1,
.dev.platform_data = &pwm_led_data,
};
void __init at91_pwm_leds(struct gpio_led *leds, int nr)
{
int i;
u32 pwm_mask = 0;
if (!nr)
return;
for (i = 0; i < nr; i++)
pwm_mask |= (1 << leds[i].gpio);
pwm_led_data.leds = leds;
pwm_led_data.num_leds = nr;
at91_add_device_pwm(pwm_mask);
platform_device_register(&at91_pwm_leds_device);
}
#else
void __init at91_pwm_leds(struct gpio_led *leds, int nr){}
#endif
/* ------------------------------------------------------------------------- */
#if defined(CONFIG_LEDS)
#include <asm/leds.h>
/*
* Old ARM-specific LED framework; not fully functional when generic time is
* in use.
*/
static u8 at91_leds_cpu;
static u8 at91_leds_timer;
static inline void at91_led_on(unsigned int led)
{
at91_set_gpio_value(led, 0);
}
static inline void at91_led_off(unsigned int led)
{
at91_set_gpio_value(led, 1);
}
static inline void at91_led_toggle(unsigned int led)
{
unsigned long is_off = at91_get_gpio_value(led);
if (is_off)
at91_led_on(led);
else
at91_led_off(led);
}
/*
* Handle LED events.
*/
static void at91_leds_event(led_event_t evt)
{
unsigned long flags;
local_irq_save(flags);
switch(evt) {
case led_start: /* System startup */
at91_led_on(at91_leds_cpu);
break;
case led_stop: /* System stop / suspend */
at91_led_off(at91_leds_cpu);
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer: /* Every 50 timer ticks */
at91_led_toggle(at91_leds_timer);
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start: /* Entering idle state */
at91_led_off(at91_leds_cpu);
break;
case led_idle_end: /* Exit idle state */
at91_led_on(at91_leds_cpu);
break;
#endif
default:
break;
}
local_irq_restore(flags);
}
static int __init leds_init(void)
{
if (!at91_leds_timer || !at91_leds_cpu)
return -ENODEV;
leds_event = at91_leds_event;
leds_event(led_start);
return 0;
}
__initcall(leds_init);
void __init at91_init_leds(u8 cpu_led, u8 timer_led)
{
/* Enable GPIO to access the LEDs */
at91_set_gpio_output(cpu_led, 1);
at91_set_gpio_output(timer_led, 1);
at91_leds_cpu = cpu_led;
at91_leds_timer = timer_led;
}
#else
void __init at91_init_leds(u8 cpu_led, u8 timer_led) {}
#endif
|
gpl-2.0
|
widz4rd/WIDzard-A850L
|
drivers/hwmon/atxp1.c
|
4857
|
9400
|
/*
* atxp1.c - kernel module for setting CPU VID and general purpose
* I/Os using the Attansic ATXP1 chip.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("System voltages control via Attansic ATXP1");
MODULE_VERSION("0.6.3");
MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
#define ATXP1_VID 0x00
#define ATXP1_CVID 0x01
#define ATXP1_GPIO1 0x06
#define ATXP1_GPIO2 0x0a
#define ATXP1_VIDENA 0x20
#define ATXP1_VIDMASK 0x1f
#define ATXP1_GPIO1MASK 0x0f
static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
static int atxp1_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int atxp1_remove(struct i2c_client *client);
static struct atxp1_data *atxp1_update_device(struct device *dev);
static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
static const struct i2c_device_id atxp1_id[] = {
{ "atxp1", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, atxp1_id);
static struct i2c_driver atxp1_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "atxp1",
},
.probe = atxp1_probe,
.remove = atxp1_remove,
.id_table = atxp1_id,
.detect = atxp1_detect,
.address_list = normal_i2c,
};
struct atxp1_data {
struct device *hwmon_dev;
struct mutex update_lock;
unsigned long last_updated;
u8 valid;
struct {
u8 vid; /* VID output register */
u8 cpu_vid; /* VID input from CPU */
u8 gpio1; /* General purpose I/O register 1 */
u8 gpio2; /* General purpose I/O register 2 */
} reg;
u8 vrm; /* Detected CPU VRM */
};
static struct atxp1_data *atxp1_update_device(struct device *dev)
{
struct i2c_client *client;
struct atxp1_data *data;
client = to_i2c_client(dev);
data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
/* Update local register data */
data->reg.vid = i2c_smbus_read_byte_data(client, ATXP1_VID);
data->reg.cpu_vid = i2c_smbus_read_byte_data(client,
ATXP1_CVID);
data->reg.gpio1 = i2c_smbus_read_byte_data(client, ATXP1_GPIO1);
data->reg.gpio2 = i2c_smbus_read_byte_data(client, ATXP1_GPIO2);
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
/* sys file functions for cpu0_vid */
static ssize_t atxp1_showvcore(struct device *dev,
struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
data = atxp1_update_device(dev);
size = sprintf(buf, "%d\n", vid_from_reg(data->reg.vid & ATXP1_VIDMASK,
data->vrm));
return size;
}
static ssize_t atxp1_storevcore(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct atxp1_data *data;
struct i2c_client *client;
int vid, cvid;
unsigned long vcore;
int err;
client = to_i2c_client(dev);
data = atxp1_update_device(dev);
err = kstrtoul(buf, 10, &vcore);
if (err)
return err;
vcore /= 25;
vcore *= 25;
/* Calculate VID */
vid = vid_to_reg(vcore, data->vrm);
if (vid < 0) {
dev_err(dev, "VID calculation failed.\n");
return -1;
}
/*
* If output enabled, use control register value.
* Otherwise original CPU VID
*/
if (data->reg.vid & ATXP1_VIDENA)
cvid = data->reg.vid & ATXP1_VIDMASK;
else
cvid = data->reg.cpu_vid;
/* Nothing changed, aborting */
if (vid == cvid)
return count;
dev_dbg(dev, "Setting VCore to %d mV (0x%02x)\n", (int)vcore, vid);
/* Write every 25 mV step to increase stability */
if (cvid > vid) {
for (; cvid >= vid; cvid--)
i2c_smbus_write_byte_data(client,
ATXP1_VID, cvid | ATXP1_VIDENA);
} else {
for (; cvid <= vid; cvid++)
i2c_smbus_write_byte_data(client,
ATXP1_VID, cvid | ATXP1_VIDENA);
}
data->valid = 0;
return count;
}
/*
* CPU core reference voltage
* unit: millivolt
*/
static DEVICE_ATTR(cpu0_vid, S_IRUGO | S_IWUSR, atxp1_showvcore,
atxp1_storevcore);
/* sys file functions for GPIO1 */
static ssize_t atxp1_showgpio1(struct device *dev,
struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
data = atxp1_update_device(dev);
size = sprintf(buf, "0x%02x\n", data->reg.gpio1 & ATXP1_GPIO1MASK);
return size;
}
static ssize_t atxp1_storegpio1(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct atxp1_data *data;
struct i2c_client *client;
unsigned long value;
int err;
client = to_i2c_client(dev);
data = atxp1_update_device(dev);
err = kstrtoul(buf, 16, &value);
if (err)
return err;
value &= ATXP1_GPIO1MASK;
if (value != (data->reg.gpio1 & ATXP1_GPIO1MASK)) {
dev_info(dev, "Writing 0x%x to GPIO1.\n", (unsigned int)value);
i2c_smbus_write_byte_data(client, ATXP1_GPIO1, value);
data->valid = 0;
}
return count;
}
/*
* GPIO1 data register
* unit: Four bit as hex (e.g. 0x0f)
*/
static DEVICE_ATTR(gpio1, S_IRUGO | S_IWUSR, atxp1_showgpio1, atxp1_storegpio1);
/* sys file functions for GPIO2 */
static ssize_t atxp1_showgpio2(struct device *dev,
struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
data = atxp1_update_device(dev);
size = sprintf(buf, "0x%02x\n", data->reg.gpio2);
return size;
}
static ssize_t atxp1_storegpio2(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct atxp1_data *data = atxp1_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
unsigned long value;
int err;
err = kstrtoul(buf, 16, &value);
if (err)
return err;
value &= 0xff;
if (value != data->reg.gpio2) {
dev_info(dev, "Writing 0x%x to GPIO1.\n", (unsigned int)value);
i2c_smbus_write_byte_data(client, ATXP1_GPIO2, value);
data->valid = 0;
}
return count;
}
/*
* GPIO2 data register
* unit: Eight bit as hex (e.g. 0xff)
*/
static DEVICE_ATTR(gpio2, S_IRUGO | S_IWUSR, atxp1_showgpio2, atxp1_storegpio2);
static struct attribute *atxp1_attributes[] = {
&dev_attr_gpio1.attr,
&dev_attr_gpio2.attr,
&dev_attr_cpu0_vid.attr,
NULL
};
static const struct attribute_group atxp1_group = {
.attrs = atxp1_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int atxp1_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
u8 temp;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Detect ATXP1, checking if vendor ID registers are all zero */
if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0x3f) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0xfe) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0xff) == 0)))
return -ENODEV;
/*
* No vendor ID, now checking if registers 0x10,0x11 (non-existent)
* showing the same as register 0x00
*/
temp = i2c_smbus_read_byte_data(new_client, 0x00);
if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) &&
(i2c_smbus_read_byte_data(new_client, 0x11) == temp)))
return -ENODEV;
/* Get VRM */
temp = vid_which_vrm();
if ((temp != 90) && (temp != 91)) {
dev_err(&adapter->dev, "atxp1: Not supporting VRM %d.%d\n",
temp / 10, temp % 10);
return -ENODEV;
}
strlcpy(info->type, "atxp1", I2C_NAME_SIZE);
return 0;
}
static int atxp1_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct atxp1_data *data;
int err;
data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
/* Get VRM */
data->vrm = vid_which_vrm();
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
dev_info(&new_client->dev, "Using VRM: %d.%d\n",
data->vrm / 10, data->vrm % 10);
return 0;
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &atxp1_group);
exit_free:
kfree(data);
exit:
return err;
};
static int atxp1_remove(struct i2c_client *client)
{
struct atxp1_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &atxp1_group);
kfree(data);
return 0;
};
module_i2c_driver(atxp1_driver);
|
gpl-2.0
|
ptmr3/S4_jflte-xx-_Kernel
|
arch/arm/mach-davinci/devices.c
|
4857
|
8504
|
/*
* mach-davinci/devices.c
*
* DaVinci platform device setup/initialization
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/i2c.h>
#include <mach/irqs.h>
#include <mach/cputype.h>
#include <mach/mux.h>
#include <mach/edma.h>
#include <mach/mmc.h>
#include <mach/time.h>
#include "davinci.h"
#include "clock.h"
#define DAVINCI_I2C_BASE 0x01C21000
#define DAVINCI_ATA_BASE 0x01C66000
#define DAVINCI_MMCSD0_BASE 0x01E10000
#define DM355_MMCSD0_BASE 0x01E11000
#define DM355_MMCSD1_BASE 0x01E00000
#define DM365_MMCSD0_BASE 0x01D11000
#define DM365_MMCSD1_BASE 0x01D00000
void __iomem *davinci_sysmod_base;
void davinci_map_sysmod(void)
{
davinci_sysmod_base = ioremap_nocache(DAVINCI_SYSTEM_MODULE_BASE,
0x800);
/*
* Throw a bug since a lot of board initialization code depends
* on system module availability. ioremap() failing this early
* need careful looking into anyway.
*/
BUG_ON(!davinci_sysmod_base);
}
static struct resource i2c_resources[] = {
{
.start = DAVINCI_I2C_BASE,
.end = DAVINCI_I2C_BASE + 0x40,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_I2C,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device davinci_i2c_device = {
.name = "i2c_davinci",
.id = 1,
.num_resources = ARRAY_SIZE(i2c_resources),
.resource = i2c_resources,
};
void __init davinci_init_i2c(struct davinci_i2c_platform_data *pdata)
{
if (cpu_is_davinci_dm644x())
davinci_cfg_reg(DM644X_I2C);
davinci_i2c_device.dev.platform_data = pdata;
(void) platform_device_register(&davinci_i2c_device);
}
static struct resource ide_resources[] = {
{
.start = DAVINCI_ATA_BASE,
.end = DAVINCI_ATA_BASE + 0x7ff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_IDE,
.end = IRQ_IDE,
.flags = IORESOURCE_IRQ,
},
};
static u64 ide_dma_mask = DMA_BIT_MASK(32);
static struct platform_device ide_device = {
.name = "palm_bk3710",
.id = -1,
.resource = ide_resources,
.num_resources = ARRAY_SIZE(ide_resources),
.dev = {
.dma_mask = &ide_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
void __init davinci_init_ide(void)
{
if (cpu_is_davinci_dm644x()) {
davinci_cfg_reg(DM644X_HPIEN_DISABLE);
davinci_cfg_reg(DM644X_ATAEN);
davinci_cfg_reg(DM644X_HDIREN);
} else if (cpu_is_davinci_dm646x()) {
/* IRQ_DM646X_IDE is the same as IRQ_IDE */
davinci_cfg_reg(DM646X_ATAEN);
} else {
WARN_ON(1);
return;
}
platform_device_register(&ide_device);
}
#if defined(CONFIG_MMC_DAVINCI) || defined(CONFIG_MMC_DAVINCI_MODULE)
static u64 mmcsd0_dma_mask = DMA_BIT_MASK(32);
static struct resource mmcsd0_resources[] = {
{
/* different on dm355 */
.start = DAVINCI_MMCSD0_BASE,
.end = DAVINCI_MMCSD0_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
/* IRQs: MMC/SD, then SDIO */
{
.start = IRQ_MMCINT,
.flags = IORESOURCE_IRQ,
}, {
/* different on dm355 */
.start = IRQ_SDIOINT,
.flags = IORESOURCE_IRQ,
},
/* DMA channels: RX, then TX */
{
.start = EDMA_CTLR_CHAN(0, DAVINCI_DMA_MMCRXEVT),
.flags = IORESOURCE_DMA,
}, {
.start = EDMA_CTLR_CHAN(0, DAVINCI_DMA_MMCTXEVT),
.flags = IORESOURCE_DMA,
},
};
static struct platform_device davinci_mmcsd0_device = {
.name = "davinci_mmc",
.id = 0,
.dev = {
.dma_mask = &mmcsd0_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(mmcsd0_resources),
.resource = mmcsd0_resources,
};
static u64 mmcsd1_dma_mask = DMA_BIT_MASK(32);
static struct resource mmcsd1_resources[] = {
{
.start = DM355_MMCSD1_BASE,
.end = DM355_MMCSD1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
/* IRQs: MMC/SD, then SDIO */
{
.start = IRQ_DM355_MMCINT1,
.flags = IORESOURCE_IRQ,
}, {
.start = IRQ_DM355_SDIOINT1,
.flags = IORESOURCE_IRQ,
},
/* DMA channels: RX, then TX */
{
.start = EDMA_CTLR_CHAN(0, 30), /* rx */
.flags = IORESOURCE_DMA,
}, {
.start = EDMA_CTLR_CHAN(0, 31), /* tx */
.flags = IORESOURCE_DMA,
},
};
static struct platform_device davinci_mmcsd1_device = {
.name = "davinci_mmc",
.id = 1,
.dev = {
.dma_mask = &mmcsd1_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(mmcsd1_resources),
.resource = mmcsd1_resources,
};
void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
{
struct platform_device *pdev = NULL;
if (WARN_ON(cpu_is_davinci_dm646x()))
return;
/* REVISIT: update PINMUX, ARM_IRQMUX, and EDMA_EVTMUX here too;
* for example if MMCSD1 is used for SDIO, maybe DAT2 is unused.
*
* FIXME dm6441 (no MMC/SD), dm357 (one), and dm335 (two) are
* not handled right here ...
*/
switch (module) {
case 1:
if (cpu_is_davinci_dm355()) {
/* REVISIT we may not need all these pins if e.g. this
* is a hard-wired SDIO device...
*/
davinci_cfg_reg(DM355_SD1_CMD);
davinci_cfg_reg(DM355_SD1_CLK);
davinci_cfg_reg(DM355_SD1_DATA0);
davinci_cfg_reg(DM355_SD1_DATA1);
davinci_cfg_reg(DM355_SD1_DATA2);
davinci_cfg_reg(DM355_SD1_DATA3);
} else if (cpu_is_davinci_dm365()) {
/* Configure pull down control */
unsigned v;
v = __raw_readl(DAVINCI_SYSMOD_VIRT(SYSMOD_PUPDCTL1));
__raw_writel(v & ~0xfc0,
DAVINCI_SYSMOD_VIRT(SYSMOD_PUPDCTL1));
mmcsd1_resources[0].start = DM365_MMCSD1_BASE;
mmcsd1_resources[0].end = DM365_MMCSD1_BASE +
SZ_4K - 1;
mmcsd1_resources[2].start = IRQ_DM365_SDIOINT1;
} else
break;
pdev = &davinci_mmcsd1_device;
break;
case 0:
if (cpu_is_davinci_dm355()) {
mmcsd0_resources[0].start = DM355_MMCSD0_BASE;
mmcsd0_resources[0].end = DM355_MMCSD0_BASE + SZ_4K - 1;
mmcsd0_resources[2].start = IRQ_DM355_SDIOINT0;
/* expose all 6 MMC0 signals: CLK, CMD, DATA[0..3] */
davinci_cfg_reg(DM355_MMCSD0);
/* enable RX EDMA */
davinci_cfg_reg(DM355_EVT26_MMC0_RX);
} else if (cpu_is_davinci_dm365()) {
mmcsd0_resources[0].start = DM365_MMCSD0_BASE;
mmcsd0_resources[0].end = DM365_MMCSD0_BASE +
SZ_4K - 1;
mmcsd0_resources[2].start = IRQ_DM365_SDIOINT0;
} else if (cpu_is_davinci_dm644x()) {
/* REVISIT: should this be in board-init code? */
/* Power-on 3.3V IO cells */
__raw_writel(0,
DAVINCI_SYSMOD_VIRT(SYSMOD_VDD3P3VPWDN));
/*Set up the pull regiter for MMC */
davinci_cfg_reg(DM644X_MSTK);
}
pdev = &davinci_mmcsd0_device;
break;
}
if (WARN_ON(!pdev))
return;
pdev->dev.platform_data = config;
platform_device_register(pdev);
}
#else
void __init davinci_setup_mmc(int module, struct davinci_mmc_config *config)
{
}
#endif
/*-------------------------------------------------------------------------*/
static struct resource wdt_resources[] = {
{
.start = DAVINCI_WDOG_BASE,
.end = DAVINCI_WDOG_BASE + SZ_1K - 1,
.flags = IORESOURCE_MEM,
},
};
struct platform_device davinci_wdt_device = {
.name = "watchdog",
.id = -1,
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
};
void davinci_restart(char mode, const char *cmd)
{
davinci_watchdog_reset(&davinci_wdt_device);
}
static void davinci_init_wdt(void)
{
platform_device_register(&davinci_wdt_device);
}
/*-------------------------------------------------------------------------*/
static struct platform_device davinci_pcm_device = {
.name = "davinci-pcm-audio",
.id = -1,
};
static void davinci_init_pcm(void)
{
platform_device_register(&davinci_pcm_device);
}
/*-------------------------------------------------------------------------*/
struct davinci_timer_instance davinci_timer_instance[2] = {
{
.base = DAVINCI_TIMER0_BASE,
.bottom_irq = IRQ_TINT0_TINT12,
.top_irq = IRQ_TINT0_TINT34,
},
{
.base = DAVINCI_TIMER1_BASE,
.bottom_irq = IRQ_TINT1_TINT12,
.top_irq = IRQ_TINT1_TINT34,
},
};
/*-------------------------------------------------------------------------*/
static int __init davinci_init_devices(void)
{
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
davinci_init_pcm();
davinci_init_wdt();
return 0;
}
arch_initcall(davinci_init_devices);
|
gpl-2.0
|
BoostPop/kernel_lge_hammerhead
|
drivers/hwmon/lm80.c
|
4857
|
22124
|
/*
* lm80.c - From lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 1998, 1999 Frodo Looijaard <frodol@dds.nl>
* and Philip Edelbrock <phil@netroedge.com>
*
* Ported to Linux 2.6 by Tiago Sousa <mirage@kaotik.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
0x2e, 0x2f, I2C_CLIENT_END };
/* Many LM80 constants specified below */
/* The LM80 registers */
#define LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
#define LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
#define LM80_REG_IN(nr) (0x20 + (nr))
#define LM80_REG_FAN1 0x28
#define LM80_REG_FAN2 0x29
#define LM80_REG_FAN_MIN(nr) (0x3b + (nr))
#define LM80_REG_TEMP 0x27
#define LM80_REG_TEMP_HOT_MAX 0x38
#define LM80_REG_TEMP_HOT_HYST 0x39
#define LM80_REG_TEMP_OS_MAX 0x3a
#define LM80_REG_TEMP_OS_HYST 0x3b
#define LM80_REG_CONFIG 0x00
#define LM80_REG_ALARM1 0x01
#define LM80_REG_ALARM2 0x02
#define LM80_REG_MASK1 0x03
#define LM80_REG_MASK2 0x04
#define LM80_REG_FANDIV 0x05
#define LM80_REG_RES 0x06
#define LM96080_REG_CONV_RATE 0x07
#define LM96080_REG_MAN_ID 0x3e
#define LM96080_REG_DEV_ID 0x3f
/*
* Conversions. Rounding and limit checking is only done on the TO_REG
* variants. Note that you should be a bit careful with which arguments
* these macros are called: arguments may be evaluated more than once.
* Fixing this is just not worth it.
*/
#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
#define IN_FROM_REG(val) ((val) * 10)
static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
{
if (rpm == 0)
return 255;
rpm = SENSORS_LIMIT(rpm, 1, 1000000);
return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
(val) == 255 ? 0 : 1350000/((div) * (val)))
static inline long TEMP_FROM_REG(u16 temp)
{
long res;
temp >>= 4;
if (temp < 0x0800)
res = 625 * (long) temp;
else
res = ((long) temp - 0x01000) * 625;
return res / 10;
}
#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
(val) - 0x100 : (val)) * 1000)
#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \
((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
#define DIV_FROM_REG(val) (1 << (val))
/*
* Client data (each client gets its own)
*/
struct lm80_data {
struct device *hwmon_dev;
struct mutex update_lock;
char error; /* !=0 if error occurred during last update */
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[7]; /* Register value */
u8 in_max[7]; /* Register value */
u8 in_min[7]; /* Register value */
u8 fan[2]; /* Register value */
u8 fan_min[2]; /* Register value */
u8 fan_div[2]; /* Register encoding, shifted right */
u16 temp; /* Register values, shifted right */
u8 temp_hot_max; /* Register value */
u8 temp_hot_hyst; /* Register value */
u8 temp_os_max; /* Register value */
u8 temp_os_hyst; /* Register value */
u16 alarms; /* Register encoding, combined */
};
/*
* Functions declaration
*/
static int lm80_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm80_init_client(struct i2c_client *client);
static int lm80_remove(struct i2c_client *client);
static struct lm80_data *lm80_update_device(struct device *dev);
static int lm80_read_value(struct i2c_client *client, u8 reg);
static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value);
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm80_id[] = {
{ "lm80", 0 },
{ "lm96080", 1 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm80_id);
static struct i2c_driver lm80_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm80",
},
.probe = lm80_probe,
.remove = lm80_remove,
.id_table = lm80_id,
.detect = lm80_detect,
.address_list = normal_i2c,
};
/*
* Sysfs stuff
*/
#define show_in(suffix, value) \
static ssize_t show_in_##suffix(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
if (IS_ERR(data)) \
return PTR_ERR(data); \
return sprintf(buf, "%d\n", IN_FROM_REG(data->value[nr])); \
}
show_in(min, in_min)
show_in(max, in_max)
show_in(input, in)
#define set_in(suffix, value, reg) \
static ssize_t set_in_##suffix(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val; \
int err = kstrtol(buf, 10, &val); \
if (err < 0) \
return err; \
\
mutex_lock(&data->update_lock);\
data->value[nr] = IN_TO_REG(val); \
lm80_write_value(client, reg(nr), data->value[nr]); \
mutex_unlock(&data->update_lock);\
return count; \
}
set_in(min, in_min, LM80_REG_IN_MIN)
set_in(max, in_max, LM80_REG_IN_MAX)
#define show_fan(suffix, value) \
static ssize_t show_fan_##suffix(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
if (IS_ERR(data)) \
return PTR_ERR(data); \
return sprintf(buf, "%d\n", FAN_FROM_REG(data->value[nr], \
DIV_FROM_REG(data->fan_div[nr]))); \
}
show_fan(min, fan_min)
show_fan(input, fan)
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct lm80_data *data = lm80_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct lm80_data *data = i2c_get_clientdata(client);
unsigned long val;
int err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
/*
* Note: we save and restore the fan minimum here, because its value is
* determined in part by the fan divisor. This follows the principle of
* least surprise; the user doesn't expect the fan minimum to change just
* because the divisor changed.
*/
static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct lm80_data *data = i2c_get_clientdata(client);
unsigned long min, val;
u8 reg;
int err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
/* Save fan_min */
mutex_lock(&data->update_lock);
min = FAN_FROM_REG(data->fan_min[nr],
DIV_FROM_REG(data->fan_div[nr]));
switch (val) {
case 1:
data->fan_div[nr] = 0;
break;
case 2:
data->fan_div[nr] = 1;
break;
case 4:
data->fan_div[nr] = 2;
break;
case 8:
data->fan_div[nr] = 3;
break;
default:
dev_err(&client->dev, "fan_div value %ld not "
"supported. Choose one of 1, 2, 4 or 8!\n", val);
mutex_unlock(&data->update_lock);
return -EINVAL;
}
reg = (lm80_read_value(client, LM80_REG_FANDIV) & ~(3 << (2 * (nr + 1))))
| (data->fan_div[nr] << (2 * (nr + 1)));
lm80_write_value(client, LM80_REG_FANDIV, reg);
/* Restore fan_min */
data->fan_min[nr] = FAN_TO_REG(min, DIV_FROM_REG(data->fan_div[nr]));
lm80_write_value(client, LM80_REG_FAN_MIN(nr + 1), data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp_input1(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp));
}
#define show_temp(suffix, value) \
static ssize_t show_temp_##suffix(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct lm80_data *data = lm80_update_device(dev); \
if (IS_ERR(data)) \
return PTR_ERR(data); \
return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \
}
show_temp(hot_max, temp_hot_max);
show_temp(hot_hyst, temp_hot_hyst);
show_temp(os_max, temp_os_max);
show_temp(os_hyst, temp_os_hyst);
#define set_temp(suffix, value, reg) \
static ssize_t set_temp_##suffix(struct device *dev, \
struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val; \
int err = kstrtol(buf, 10, &val); \
if (err < 0) \
return err; \
\
mutex_lock(&data->update_lock); \
data->value = TEMP_LIMIT_TO_REG(val); \
lm80_write_value(client, reg, data->value); \
mutex_unlock(&data->update_lock); \
return count; \
}
set_temp(hot_max, temp_hot_max, LM80_REG_TEMP_HOT_MAX);
set_temp(hot_hyst, temp_hot_hyst, LM80_REG_TEMP_HOT_HYST);
set_temp(os_max, temp_os_max, LM80_REG_TEMP_OS_MAX);
set_temp(os_hyst, temp_os_hyst, LM80_REG_TEMP_OS_HYST);
static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%u\n", data->alarms);
}
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
int bitnr = to_sensor_dev_attr(attr)->index;
struct lm80_data *data = lm80_update_device(dev);
if (IS_ERR(data))
return PTR_ERR(data);
return sprintf(buf, "%u\n", (data->alarms >> bitnr) & 1);
}
static SENSOR_DEVICE_ATTR(in0_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 0);
static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 1);
static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 2);
static SENSOR_DEVICE_ATTR(in3_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 3);
static SENSOR_DEVICE_ATTR(in4_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 4);
static SENSOR_DEVICE_ATTR(in5_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 5);
static SENSOR_DEVICE_ATTR(in6_min, S_IWUSR | S_IRUGO,
show_in_min, set_in_min, 6);
static SENSOR_DEVICE_ATTR(in0_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 0);
static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 1);
static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 2);
static SENSOR_DEVICE_ATTR(in3_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 3);
static SENSOR_DEVICE_ATTR(in4_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 4);
static SENSOR_DEVICE_ATTR(in5_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 5);
static SENSOR_DEVICE_ATTR(in6_max, S_IWUSR | S_IRUGO,
show_in_max, set_in_max, 6);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in_input, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_in_input, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_in_input, NULL, 2);
static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, show_in_input, NULL, 3);
static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, show_in_input, NULL, 4);
static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_in_input, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_in_input, NULL, 6);
static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO,
show_fan_min, set_fan_min, 0);
static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO,
show_fan_min, set_fan_min, 1);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan_input, NULL, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan_input, NULL, 1);
static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO,
show_fan_div, set_fan_div, 0);
static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, set_fan_div, 1);
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max,
set_temp_hot_max);
static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst,
set_temp_hot_hyst);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max,
set_temp_os_max);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst,
set_temp_os_hyst);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5);
static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 10);
static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 11);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 8);
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 13);
/*
* Real code
*/
static struct attribute *lm80_attributes[] = {
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
&dev_attr_temp1_input.attr,
&dev_attr_temp1_max.attr,
&dev_attr_temp1_max_hyst.attr,
&dev_attr_temp1_crit.attr,
&dev_attr_temp1_crit_hyst.attr,
&dev_attr_alarms.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_fan1_alarm.dev_attr.attr,
&sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group lm80_group = {
.attrs = lm80_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int i, cur, man_id, dev_id;
const char *name = NULL;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* First check for unused bits, common to both chip types */
if ((lm80_read_value(client, LM80_REG_ALARM2) & 0xc0)
|| (lm80_read_value(client, LM80_REG_CONFIG) & 0x80))
return -ENODEV;
/*
* The LM96080 has manufacturer and stepping/die rev registers so we
* can just check that. The LM80 does not have such registers so we
* have to use a more expensive trick.
*/
man_id = lm80_read_value(client, LM96080_REG_MAN_ID);
dev_id = lm80_read_value(client, LM96080_REG_DEV_ID);
if (man_id == 0x01 && dev_id == 0x08) {
/* Check more unused bits for confirmation */
if (lm80_read_value(client, LM96080_REG_CONV_RATE) & 0xfe)
return -ENODEV;
name = "lm96080";
} else {
/* Check 6-bit addressing */
for (i = 0x2a; i <= 0x3d; i++) {
cur = i2c_smbus_read_byte_data(client, i);
if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
return -ENODEV;
}
name = "lm80";
}
strlcpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
static int lm80_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct lm80_data *data;
int err;
data = kzalloc(sizeof(struct lm80_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
/* Initialize the LM80 chip */
lm80_init_client(client);
/* A few vars need to be filled upon startup */
data->fan_min[0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &lm80_group);
if (err)
goto error_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto error_remove;
}
return 0;
error_remove:
sysfs_remove_group(&client->dev.kobj, &lm80_group);
error_free:
kfree(data);
exit:
return err;
}
static int lm80_remove(struct i2c_client *client)
{
struct lm80_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm80_group);
kfree(data);
return 0;
}
static int lm80_read_value(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static int lm80_write_value(struct i2c_client *client, u8 reg, u8 value)
{
return i2c_smbus_write_byte_data(client, reg, value);
}
/* Called when we have found a new LM80. */
static void lm80_init_client(struct i2c_client *client)
{
/*
* Reset all except Watchdog values and last conversion values
* This sets fan-divs to 2, among others. This makes most other
* initializations unnecessary
*/
lm80_write_value(client, LM80_REG_CONFIG, 0x80);
/* Set 11-bit temperature resolution */
lm80_write_value(client, LM80_REG_RES, 0x08);
/* Start monitoring */
lm80_write_value(client, LM80_REG_CONFIG, 0x01);
}
static struct lm80_data *lm80_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm80_data *data = i2c_get_clientdata(client);
int i;
int rv;
int prev_rv;
struct lm80_data *ret = data;
mutex_lock(&data->update_lock);
if (data->error)
lm80_init_client(client);
if (time_after(jiffies, data->last_updated + 2 * HZ) || !data->valid) {
dev_dbg(&client->dev, "Starting lm80 update\n");
for (i = 0; i <= 6; i++) {
rv = lm80_read_value(client, LM80_REG_IN(i));
if (rv < 0)
goto abort;
data->in[i] = rv;
rv = lm80_read_value(client, LM80_REG_IN_MIN(i));
if (rv < 0)
goto abort;
data->in_min[i] = rv;
rv = lm80_read_value(client, LM80_REG_IN_MAX(i));
if (rv < 0)
goto abort;
data->in_max[i] = rv;
}
rv = lm80_read_value(client, LM80_REG_FAN1);
if (rv < 0)
goto abort;
data->fan[0] = rv;
rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
if (rv < 0)
goto abort;
data->fan_min[0] = rv;
rv = lm80_read_value(client, LM80_REG_FAN2);
if (rv < 0)
goto abort;
data->fan[1] = rv;
rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
if (rv < 0)
goto abort;
data->fan_min[1] = rv;
prev_rv = rv = lm80_read_value(client, LM80_REG_TEMP);
if (rv < 0)
goto abort;
rv = lm80_read_value(client, LM80_REG_RES);
if (rv < 0)
goto abort;
data->temp = (prev_rv << 8) | (rv & 0xf0);
rv = lm80_read_value(client, LM80_REG_TEMP_OS_MAX);
if (rv < 0)
goto abort;
data->temp_os_max = rv;
rv = lm80_read_value(client, LM80_REG_TEMP_OS_HYST);
if (rv < 0)
goto abort;
data->temp_os_hyst = rv;
rv = lm80_read_value(client, LM80_REG_TEMP_HOT_MAX);
if (rv < 0)
goto abort;
data->temp_hot_max = rv;
rv = lm80_read_value(client, LM80_REG_TEMP_HOT_HYST);
if (rv < 0)
goto abort;
data->temp_hot_hyst = rv;
rv = lm80_read_value(client, LM80_REG_FANDIV);
if (rv < 0)
goto abort;
data->fan_div[0] = (rv >> 2) & 0x03;
data->fan_div[1] = (rv >> 4) & 0x03;
prev_rv = rv = lm80_read_value(client, LM80_REG_ALARM1);
if (rv < 0)
goto abort;
rv = lm80_read_value(client, LM80_REG_ALARM2);
if (rv < 0)
goto abort;
data->alarms = prev_rv + (rv << 8);
data->last_updated = jiffies;
data->valid = 1;
data->error = 0;
}
goto done;
abort:
ret = ERR_PTR(rv);
data->valid = 0;
data->error = 1;
done:
mutex_unlock(&data->update_lock);
return ret;
}
module_i2c_driver(lm80_driver);
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Philip Edelbrock <phil@netroedge.com>");
MODULE_DESCRIPTION("LM80 driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
javelinanddart/android_kernel_caf_ville
|
drivers/hwmon/lm83.c
|
4857
|
12807
|
/*
* lm83.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
* Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
*
* Heavily inspired from the lm78, lm75 and adm1021 drivers. The LM83 is
* a sensor chip made by National Semiconductor. It reports up to four
* temperatures (its own plus up to three external ones) with a 1 deg
* resolution and a 3-4 deg accuracy. Complete datasheet can be obtained
* from National's website at:
* http://www.national.com/pf/LM/LM83.html
* Since the datasheet omits to give the chip stepping code, I give it
* here: 0x03 (at register 0xff).
*
* Also supports the LM82 temp sensor, which is basically a stripped down
* model of the LM83. Datasheet is here:
* http://www.national.com/pf/LM/LM82.html
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
/*
* Addresses to scan
* Address is selected using 2 three-level pins, resulting in 9 possible
* addresses.
*/
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
enum chips { lm83, lm82 };
/*
* The LM83 registers
* Manufacturer ID is 0x01 for National Semiconductor.
*/
#define LM83_REG_R_MAN_ID 0xFE
#define LM83_REG_R_CHIP_ID 0xFF
#define LM83_REG_R_CONFIG 0x03
#define LM83_REG_W_CONFIG 0x09
#define LM83_REG_R_STATUS1 0x02
#define LM83_REG_R_STATUS2 0x35
#define LM83_REG_R_LOCAL_TEMP 0x00
#define LM83_REG_R_LOCAL_HIGH 0x05
#define LM83_REG_W_LOCAL_HIGH 0x0B
#define LM83_REG_R_REMOTE1_TEMP 0x30
#define LM83_REG_R_REMOTE1_HIGH 0x38
#define LM83_REG_W_REMOTE1_HIGH 0x50
#define LM83_REG_R_REMOTE2_TEMP 0x01
#define LM83_REG_R_REMOTE2_HIGH 0x07
#define LM83_REG_W_REMOTE2_HIGH 0x0D
#define LM83_REG_R_REMOTE3_TEMP 0x31
#define LM83_REG_R_REMOTE3_HIGH 0x3A
#define LM83_REG_W_REMOTE3_HIGH 0x52
#define LM83_REG_R_TCRIT 0x42
#define LM83_REG_W_TCRIT 0x5A
/*
* Conversions and various macros
* The LM83 uses signed 8-bit values with LSB = 1 degree Celsius.
*/
#define TEMP_FROM_REG(val) ((val) * 1000)
#define TEMP_TO_REG(val) ((val) <= -128000 ? -128 : \
(val) >= 127000 ? 127 : \
(val) < 0 ? ((val) - 500) / 1000 : \
((val) + 500) / 1000)
static const u8 LM83_REG_R_TEMP[] = {
LM83_REG_R_LOCAL_TEMP,
LM83_REG_R_REMOTE1_TEMP,
LM83_REG_R_REMOTE2_TEMP,
LM83_REG_R_REMOTE3_TEMP,
LM83_REG_R_LOCAL_HIGH,
LM83_REG_R_REMOTE1_HIGH,
LM83_REG_R_REMOTE2_HIGH,
LM83_REG_R_REMOTE3_HIGH,
LM83_REG_R_TCRIT,
};
static const u8 LM83_REG_W_HIGH[] = {
LM83_REG_W_LOCAL_HIGH,
LM83_REG_W_REMOTE1_HIGH,
LM83_REG_W_REMOTE2_HIGH,
LM83_REG_W_REMOTE3_HIGH,
LM83_REG_W_TCRIT,
};
/*
* Functions declaration
*/
static int lm83_detect(struct i2c_client *new_client,
struct i2c_board_info *info);
static int lm83_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int lm83_remove(struct i2c_client *client);
static struct lm83_data *lm83_update_device(struct device *dev);
/*
* Driver data (common to all clients)
*/
static const struct i2c_device_id lm83_id[] = {
{ "lm83", lm83 },
{ "lm82", lm82 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm83_id);
static struct i2c_driver lm83_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "lm83",
},
.probe = lm83_probe,
.remove = lm83_remove,
.id_table = lm83_id,
.detect = lm83_detect,
.address_list = normal_i2c,
};
/*
* Client data (each client gets its own)
*/
struct lm83_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
s8 temp[9]; /* 0..3: input 1-4,
4..7: high limit 1-4,
8 : critical limit */
u16 alarms; /* bitvector, combined */
};
/*
* Sysfs stuff
*/
static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm83_data *data = lm83_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[attr->index]));
}
static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm83_data *data = i2c_get_clientdata(client);
long val;
int nr = attr->index;
int err;
err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
mutex_lock(&data->update_lock);
data->temp[nr] = TEMP_TO_REG(val);
i2c_smbus_write_byte_data(client, LM83_REG_W_HIGH[nr - 4],
data->temp[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
char *buf)
{
struct lm83_data *data = lm83_update_device(dev);
return sprintf(buf, "%d\n", data->alarms);
}
static ssize_t show_alarm(struct device *dev, struct device_attribute
*devattr, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm83_data *data = lm83_update_device(dev);
int bitnr = attr->index;
return sprintf(buf, "%d\n", (data->alarms >> bitnr) & 1);
}
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp,
set_temp, 4);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp,
set_temp, 5);
static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp,
set_temp, 6);
static SENSOR_DEVICE_ATTR(temp4_max, S_IWUSR | S_IRUGO, show_temp,
set_temp, 7);
static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp, NULL, 8);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_temp, NULL, 8);
static SENSOR_DEVICE_ATTR(temp3_crit, S_IWUSR | S_IRUGO, show_temp,
set_temp, 8);
static SENSOR_DEVICE_ATTR(temp4_crit, S_IRUGO, show_temp, NULL, 8);
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp3_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 2);
static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_alarm, NULL, 4);
static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 8);
static SENSOR_DEVICE_ATTR(temp4_crit_alarm, S_IRUGO, show_alarm, NULL, 9);
static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_alarm, NULL, 10);
static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_alarm, NULL, 12);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 13);
static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 15);
/* Raw alarm file for compatibility */
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static struct attribute *lm83_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp1_crit.dev_attr.attr,
&sensor_dev_attr_temp3_crit.dev_attr.attr,
&sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
NULL
};
static const struct attribute_group lm83_group = {
.attrs = lm83_attributes,
};
static struct attribute *lm83_attributes_opt[] = {
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp4_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp4_max.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&sensor_dev_attr_temp4_crit.dev_attr.attr,
&sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_crit_alarm.dev_attr.attr,
&sensor_dev_attr_temp4_fault.dev_attr.attr,
&sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group lm83_group_opt = {
.attrs = lm83_attributes_opt,
};
/*
* Real code
*/
/* Return 0 if detection is successful, -ENODEV otherwise */
static int lm83_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
const char *name;
u8 man_id, chip_id;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Detection */
if ((i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS1) & 0xA8) ||
(i2c_smbus_read_byte_data(new_client, LM83_REG_R_STATUS2) & 0x48) ||
(i2c_smbus_read_byte_data(new_client, LM83_REG_R_CONFIG) & 0x41)) {
dev_dbg(&adapter->dev, "LM83 detection failed at 0x%02x\n",
new_client->addr);
return -ENODEV;
}
/* Identification */
man_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_MAN_ID);
if (man_id != 0x01) /* National Semiconductor */
return -ENODEV;
chip_id = i2c_smbus_read_byte_data(new_client, LM83_REG_R_CHIP_ID);
switch (chip_id) {
case 0x03:
name = "lm83";
break;
case 0x01:
name = "lm82";
break;
default:
/* identification failed */
dev_info(&adapter->dev,
"Unsupported chip (man_id=0x%02X, chip_id=0x%02X)\n",
man_id, chip_id);
return -ENODEV;
}
strlcpy(info->type, name, I2C_NAME_SIZE);
return 0;
}
static int lm83_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct lm83_data *data;
int err;
data = kzalloc(sizeof(struct lm83_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/*
* Register sysfs hooks
* The LM82 can only monitor one external diode which is
* at the same register as the LM83 temp3 entry - so we
* declare 1 and 3 common, and then 2 and 4 only for the LM83.
*/
err = sysfs_create_group(&new_client->dev.kobj, &lm83_group);
if (err)
goto exit_free;
if (id->driver_data == lm83) {
err = sysfs_create_group(&new_client->dev.kobj,
&lm83_group_opt);
if (err)
goto exit_remove_files;
}
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
return 0;
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &lm83_group);
sysfs_remove_group(&new_client->dev.kobj, &lm83_group_opt);
exit_free:
kfree(data);
exit:
return err;
}
static int lm83_remove(struct i2c_client *client)
{
struct lm83_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm83_group);
sysfs_remove_group(&client->dev.kobj, &lm83_group_opt);
kfree(data);
return 0;
}
static struct lm83_data *lm83_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm83_data *data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ * 2) || !data->valid) {
int nr;
dev_dbg(&client->dev, "Updating lm83 data.\n");
for (nr = 0; nr < 9; nr++) {
data->temp[nr] =
i2c_smbus_read_byte_data(client,
LM83_REG_R_TEMP[nr]);
}
data->alarms =
i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS1)
+ (i2c_smbus_read_byte_data(client, LM83_REG_R_STATUS2)
<< 8);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
module_i2c_driver(lm83_driver);
MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("LM83 driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
TREX-ROM/android_kernel_lge_hammerhead
|
drivers/hwmon/smsc47m192.c
|
4857
|
21295
|
/*
* smsc47m192.c - Support for hardware monitoring block of
* SMSC LPC47M192 and compatible Super I/O chips
*
* Copyright (C) 2006 Hartmut Rick <linux@rick.claranet.de>
*
* Derived from lm78.c and other chip drivers.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/mutex.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, I2C_CLIENT_END };
/* SMSC47M192 registers */
#define SMSC47M192_REG_IN(nr) ((nr) < 6 ? (0x20 + (nr)) : \
(0x50 + (nr) - 6))
#define SMSC47M192_REG_IN_MAX(nr) ((nr) < 6 ? (0x2b + (nr) * 2) : \
(0x54 + (((nr) - 6) * 2)))
#define SMSC47M192_REG_IN_MIN(nr) ((nr) < 6 ? (0x2c + (nr) * 2) : \
(0x55 + (((nr) - 6) * 2)))
static u8 SMSC47M192_REG_TEMP[3] = { 0x27, 0x26, 0x52 };
static u8 SMSC47M192_REG_TEMP_MAX[3] = { 0x39, 0x37, 0x58 };
static u8 SMSC47M192_REG_TEMP_MIN[3] = { 0x3A, 0x38, 0x59 };
#define SMSC47M192_REG_TEMP_OFFSET(nr) ((nr) == 2 ? 0x1e : 0x1f)
#define SMSC47M192_REG_ALARM1 0x41
#define SMSC47M192_REG_ALARM2 0x42
#define SMSC47M192_REG_VID 0x47
#define SMSC47M192_REG_VID4 0x49
#define SMSC47M192_REG_CONFIG 0x40
#define SMSC47M192_REG_SFR 0x4f
#define SMSC47M192_REG_COMPANY_ID 0x3e
#define SMSC47M192_REG_VERSION 0x3f
/* generalised scaling with integer rounding */
static inline int SCALE(long val, int mul, int div)
{
if (val < 0)
return (val * mul - div / 2) / div;
else
return (val * mul + div / 2) / div;
}
/* Conversions */
/* smsc47m192 internally scales voltage measurements */
static const u16 nom_mv[] = { 2500, 2250, 3300, 5000, 12000, 3300, 1500, 1800 };
static inline unsigned int IN_FROM_REG(u8 reg, int n)
{
return SCALE(reg, nom_mv[n], 192);
}
static inline u8 IN_TO_REG(unsigned long val, int n)
{
return SENSORS_LIMIT(SCALE(val, 192, nom_mv[n]), 0, 255);
}
/*
* TEMP: 0.001 degC units (-128C to +127C)
* REG: 1C/bit, two's complement
*/
static inline s8 TEMP_TO_REG(int val)
{
return SENSORS_LIMIT(SCALE(val, 1, 1000), -128000, 127000);
}
static inline int TEMP_FROM_REG(s8 val)
{
return val * 1000;
}
struct smsc47m192_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[8]; /* Register value */
u8 in_max[8]; /* Register value */
u8 in_min[8]; /* Register value */
s8 temp[3]; /* Register value */
s8 temp_max[3]; /* Register value */
s8 temp_min[3]; /* Register value */
s8 temp_offset[3]; /* Register value */
u16 alarms; /* Register encoding, combined */
u8 vid; /* Register encoding, combined */
u8 vrm;
};
static int smsc47m192_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int smsc47m192_detect(struct i2c_client *client,
struct i2c_board_info *info);
static int smsc47m192_remove(struct i2c_client *client);
static struct smsc47m192_data *smsc47m192_update_device(struct device *dev);
static const struct i2c_device_id smsc47m192_id[] = {
{ "smsc47m192", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, smsc47m192_id);
static struct i2c_driver smsc47m192_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "smsc47m192",
},
.probe = smsc47m192_probe,
.remove = smsc47m192_remove,
.id_table = smsc47m192_id,
.detect = smsc47m192_detect,
.address_list = normal_i2c,
};
/* Voltages */
static ssize_t show_in(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", IN_FROM_REG(data->in[nr], nr));
}
static ssize_t show_in_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_min[nr], nr));
}
static ssize_t show_in_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", IN_FROM_REG(data->in_max[nr], nr));
}
static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct smsc47m192_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->in_min[nr] = IN_TO_REG(val, nr);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MIN(nr),
data->in_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct smsc47m192_data *data = i2c_get_clientdata(client);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->in_max[nr] = IN_TO_REG(val, nr);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_IN_MAX(nr),
data->in_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
#define show_in_offset(offset) \
static SENSOR_DEVICE_ATTR(in##offset##_input, S_IRUGO, \
show_in, NULL, offset); \
static SENSOR_DEVICE_ATTR(in##offset##_min, S_IRUGO | S_IWUSR, \
show_in_min, set_in_min, offset); \
static SENSOR_DEVICE_ATTR(in##offset##_max, S_IRUGO | S_IWUSR, \
show_in_max, set_in_max, offset);
show_in_offset(0)
show_in_offset(1)
show_in_offset(2)
show_in_offset(3)
show_in_offset(4)
show_in_offset(5)
show_in_offset(6)
show_in_offset(7)
/* Temperatures */
static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
}
static ssize_t show_temp_min(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_min[nr]));
}
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_max[nr]));
}
static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct smsc47m192_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MIN[nr],
data->temp_min[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct smsc47m192_data *data = i2c_get_clientdata(client);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
i2c_smbus_write_byte_data(client, SMSC47M192_REG_TEMP_MAX[nr],
data->temp_max[nr]);
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t show_temp_offset(struct device *dev, struct device_attribute
*attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp_offset[nr]));
}
static ssize_t set_temp_offset(struct device *dev, struct device_attribute
*attr, const char *buf, size_t count)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct i2c_client *client = to_i2c_client(dev);
struct smsc47m192_data *data = i2c_get_clientdata(client);
u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR);
long val;
int err;
err = kstrtol(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_TO_REG(val);
if (nr > 1)
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]);
else if (data->temp_offset[nr] != 0) {
/*
* offset[0] and offset[1] share the same register,
* SFR bit 4 activates offset[0]
*/
i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR,
(sfr & 0xef) | (nr == 0 ? 0x10 : 0));
data->temp_offset[1-nr] = 0;
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_TEMP_OFFSET(nr), data->temp_offset[nr]);
} else if ((sfr & 0x10) == (nr == 0 ? 0x10 : 0))
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_TEMP_OFFSET(nr), 0);
mutex_unlock(&data->update_lock);
return count;
}
#define show_temp_index(index) \
static SENSOR_DEVICE_ATTR(temp##index##_input, S_IRUGO, \
show_temp, NULL, index-1); \
static SENSOR_DEVICE_ATTR(temp##index##_min, S_IRUGO | S_IWUSR, \
show_temp_min, set_temp_min, index-1); \
static SENSOR_DEVICE_ATTR(temp##index##_max, S_IRUGO | S_IWUSR, \
show_temp_max, set_temp_max, index-1); \
static SENSOR_DEVICE_ATTR(temp##index##_offset, S_IRUGO | S_IWUSR, \
show_temp_offset, set_temp_offset, index-1);
show_temp_index(1)
show_temp_index(2)
show_temp_index(3)
/* VID */
static ssize_t show_vid(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm));
}
static DEVICE_ATTR(cpu0_vid, S_IRUGO, show_vid, NULL);
static ssize_t show_vrm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct smsc47m192_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", data->vrm);
}
static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct smsc47m192_data *data = dev_get_drvdata(dev);
unsigned long val;
int err;
err = kstrtoul(buf, 10, &val);
if (err)
return err;
data->vrm = val;
return count;
}
static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm, set_vrm);
/* Alarms */
static ssize_t show_alarm(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct smsc47m192_data *data = smsc47m192_update_device(dev);
return sprintf(buf, "%u\n", (data->alarms & nr) ? 1 : 0);
}
static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 0x0010);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 0x0020);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 0x0040);
static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 0x4000);
static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_alarm, NULL, 0x8000);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0x0001);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 0x0002);
static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 0x0004);
static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 0x0008);
static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 0x0100);
static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 0x0200);
static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 0x0400);
static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 0x0800);
static struct attribute *smsc47m192_attributes[] = {
&sensor_dev_attr_in0_input.dev_attr.attr,
&sensor_dev_attr_in0_min.dev_attr.attr,
&sensor_dev_attr_in0_max.dev_attr.attr,
&sensor_dev_attr_in0_alarm.dev_attr.attr,
&sensor_dev_attr_in1_input.dev_attr.attr,
&sensor_dev_attr_in1_min.dev_attr.attr,
&sensor_dev_attr_in1_max.dev_attr.attr,
&sensor_dev_attr_in1_alarm.dev_attr.attr,
&sensor_dev_attr_in2_input.dev_attr.attr,
&sensor_dev_attr_in2_min.dev_attr.attr,
&sensor_dev_attr_in2_max.dev_attr.attr,
&sensor_dev_attr_in2_alarm.dev_attr.attr,
&sensor_dev_attr_in3_input.dev_attr.attr,
&sensor_dev_attr_in3_min.dev_attr.attr,
&sensor_dev_attr_in3_max.dev_attr.attr,
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in5_input.dev_attr.attr,
&sensor_dev_attr_in5_min.dev_attr.attr,
&sensor_dev_attr_in5_max.dev_attr.attr,
&sensor_dev_attr_in5_alarm.dev_attr.attr,
&sensor_dev_attr_in6_input.dev_attr.attr,
&sensor_dev_attr_in6_min.dev_attr.attr,
&sensor_dev_attr_in6_max.dev_attr.attr,
&sensor_dev_attr_in6_alarm.dev_attr.attr,
&sensor_dev_attr_in7_input.dev_attr.attr,
&sensor_dev_attr_in7_min.dev_attr.attr,
&sensor_dev_attr_in7_max.dev_attr.attr,
&sensor_dev_attr_in7_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp1_min.dev_attr.attr,
&sensor_dev_attr_temp1_offset.dev_attr.attr,
&sensor_dev_attr_temp1_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_alarm.dev_attr.attr,
&sensor_dev_attr_temp2_fault.dev_attr.attr,
&sensor_dev_attr_temp3_input.dev_attr.attr,
&sensor_dev_attr_temp3_max.dev_attr.attr,
&sensor_dev_attr_temp3_min.dev_attr.attr,
&sensor_dev_attr_temp3_offset.dev_attr.attr,
&sensor_dev_attr_temp3_alarm.dev_attr.attr,
&sensor_dev_attr_temp3_fault.dev_attr.attr,
&dev_attr_cpu0_vid.attr,
&dev_attr_vrm.attr,
NULL
};
static const struct attribute_group smsc47m192_group = {
.attrs = smsc47m192_attributes,
};
static struct attribute *smsc47m192_attributes_in4[] = {
&sensor_dev_attr_in4_input.dev_attr.attr,
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
NULL
};
static const struct attribute_group smsc47m192_group_in4 = {
.attrs = smsc47m192_attributes_in4,
};
static void smsc47m192_init_client(struct i2c_client *client)
{
int i;
u8 config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG);
u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR);
/* select cycle mode (pause 1 sec between updates) */
i2c_smbus_write_byte_data(client, SMSC47M192_REG_SFR,
(sfr & 0xfd) | 0x02);
if (!(config & 0x01)) {
/* initialize alarm limits */
for (i = 0; i < 8; i++) {
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_IN_MIN(i), 0);
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_IN_MAX(i), 0xff);
}
for (i = 0; i < 3; i++) {
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_TEMP_MIN[i], 0x80);
i2c_smbus_write_byte_data(client,
SMSC47M192_REG_TEMP_MAX[i], 0x7f);
}
/* start monitoring */
i2c_smbus_write_byte_data(client, SMSC47M192_REG_CONFIG,
(config & 0xf7) | 0x01);
}
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int smsc47m192_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int version;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Detection criteria from sensors_detect script */
version = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VERSION);
if (i2c_smbus_read_byte_data(client,
SMSC47M192_REG_COMPANY_ID) == 0x55
&& (version & 0xf0) == 0x20
&& (i2c_smbus_read_byte_data(client,
SMSC47M192_REG_VID) & 0x70) == 0x00
&& (i2c_smbus_read_byte_data(client,
SMSC47M192_REG_VID4) & 0xfe) == 0x80) {
dev_info(&adapter->dev,
"found SMSC47M192 or compatible, "
"version 2, stepping A%d\n", version & 0x0f);
} else {
dev_dbg(&adapter->dev,
"SMSC47M192 detection failed at 0x%02x\n",
client->addr);
return -ENODEV;
}
strlcpy(info->type, "smsc47m192", I2C_NAME_SIZE);
return 0;
}
static int smsc47m192_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct smsc47m192_data *data;
int config;
int err;
data = kzalloc(sizeof(struct smsc47m192_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
data->vrm = vid_which_vrm();
mutex_init(&data->update_lock);
/* Initialize the SMSC47M192 chip */
smsc47m192_init_client(client);
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &smsc47m192_group);
if (err)
goto exit_free;
/* Pin 110 is either in4 (+12V) or VID4 */
config = i2c_smbus_read_byte_data(client, SMSC47M192_REG_CONFIG);
if (!(config & 0x20)) {
err = sysfs_create_group(&client->dev.kobj,
&smsc47m192_group_in4);
if (err)
goto exit_remove_files;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
return 0;
exit_remove_files:
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
exit_free:
kfree(data);
exit:
return err;
}
static int smsc47m192_remove(struct i2c_client *client)
{
struct smsc47m192_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group);
sysfs_remove_group(&client->dev.kobj, &smsc47m192_group_in4);
kfree(data);
return 0;
}
static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct smsc47m192_data *data = i2c_get_clientdata(client);
int i, config;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
u8 sfr = i2c_smbus_read_byte_data(client, SMSC47M192_REG_SFR);
dev_dbg(&client->dev, "Starting smsc47m192 update\n");
for (i = 0; i <= 7; i++) {
data->in[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_IN(i));
data->in_min[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_IN_MIN(i));
data->in_max[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_IN_MAX(i));
}
for (i = 0; i < 3; i++) {
data->temp[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_TEMP[i]);
data->temp_max[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_TEMP_MAX[i]);
data->temp_min[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_TEMP_MIN[i]);
}
for (i = 1; i < 3; i++)
data->temp_offset[i] = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_TEMP_OFFSET(i));
/*
* first offset is temp_offset[0] if SFR bit 4 is set,
* temp_offset[1] otherwise
*/
if (sfr & 0x10) {
data->temp_offset[0] = data->temp_offset[1];
data->temp_offset[1] = 0;
} else
data->temp_offset[0] = 0;
data->vid = i2c_smbus_read_byte_data(client, SMSC47M192_REG_VID)
& 0x0f;
config = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_CONFIG);
if (config & 0x20)
data->vid |= (i2c_smbus_read_byte_data(client,
SMSC47M192_REG_VID4) & 0x01) << 4;
data->alarms = i2c_smbus_read_byte_data(client,
SMSC47M192_REG_ALARM1) |
(i2c_smbus_read_byte_data(client,
SMSC47M192_REG_ALARM2) << 8);
data->last_updated = jiffies;
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
module_i2c_driver(smsc47m192_driver);
MODULE_AUTHOR("Hartmut Rick <linux@rick.claranet.de>");
MODULE_DESCRIPTION("SMSC47M192 driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
tjarnold/jewel_3.4.49
|
arch/arm/mach-shark/core.c
|
4857
|
3586
|
/*
* linux/arch/arm/mach-shark/arch.c
*
* Architecture specific stuff.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/serial_8250.h>
#include <linux/io.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/leds.h>
#include <asm/param.h>
#include <asm/system_misc.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
#define IO_BASE 0xe0000000
#define IO_SIZE 0x08000000
#define IO_START 0x40000000
#define ROMCARD_SIZE 0x08000000
#define ROMCARD_START 0x10000000
static void shark_restart(char mode, const char *cmd)
{
short temp;
/* Reset the Machine via pc[3] of the sequoia chipset */
outw(0x09,0x24);
temp=inw(0x26);
temp = temp | (1<<3) | (1<<10);
outw(0x09,0x24);
outw(temp,0x26);
}
static struct plat_serial8250_port serial_platform_data[] = {
{
.iobase = 0x3f8,
.irq = 4,
.uartclk = 1843200,
.regshift = 0,
.iotype = UPIO_PORT,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
},
{
.iobase = 0x2f8,
.irq = 3,
.uartclk = 1843200,
.regshift = 0,
.iotype = UPIO_PORT,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
},
{ },
};
static struct platform_device serial_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = serial_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = 0x70,
.end = 0x73,
.flags = IORESOURCE_IO,
},
[1] = {
.start = IRQ_ISA_RTC_ALARM,
.end = IRQ_ISA_RTC_ALARM,
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device rtc_device = {
.name = "rtc_cmos",
.id = -1,
.resource = rtc_resources,
.num_resources = ARRAY_SIZE(rtc_resources),
};
static int __init shark_init(void)
{
int ret;
if (machine_is_shark())
{
ret = platform_device_register(&rtc_device);
if (ret) printk(KERN_ERR "Unable to register RTC device: %d\n", ret);
ret = platform_device_register(&serial_device);
if (ret) printk(KERN_ERR "Unable to register Serial device: %d\n", ret);
}
return 0;
}
arch_initcall(shark_init);
extern void shark_init_irq(void);
static struct map_desc shark_io_desc[] __initdata = {
{
.virtual = IO_BASE,
.pfn = __phys_to_pfn(IO_START),
.length = IO_SIZE,
.type = MT_DEVICE
}
};
static void __init shark_map_io(void)
{
iotable_init(shark_io_desc, ARRAY_SIZE(shark_io_desc));
}
#define IRQ_TIMER 0
#define HZ_TIME ((1193180 + HZ/2) / HZ)
static irqreturn_t
shark_timer_interrupt(int irq, void *dev_id)
{
timer_tick();
return IRQ_HANDLED;
}
static struct irqaction shark_timer_irq = {
.name = "Shark Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = shark_timer_interrupt,
};
/*
* Set up timer interrupt, and return the current time in seconds.
*/
static void __init shark_timer_init(void)
{
outb(0x34, 0x43); /* binary, mode 0, LSB/MSB, Ch 0 */
outb(HZ_TIME & 0xff, 0x40); /* LSB of count */
outb(HZ_TIME >> 8, 0x40);
setup_irq(IRQ_TIMER, &shark_timer_irq);
}
static struct sys_timer shark_timer = {
.init = shark_timer_init,
};
static void shark_init_early(void)
{
disable_hlt();
}
MACHINE_START(SHARK, "Shark")
/* Maintainer: Alexander Schulz */
.atag_offset = 0x3000,
.map_io = shark_map_io,
.init_early = shark_init_early,
.init_irq = shark_init_irq,
.timer = &shark_timer,
.dma_zone_size = SZ_4M,
.restart = shark_restart,
MACHINE_END
|
gpl-2.0
|
msfkonsole/android_kernel_xiaomi_dior
|
drivers/hwmon/atxp1.c
|
4857
|
9400
|
/*
* atxp1.c - kernel module for setting CPU VID and general purpose
* I/Os using the Attansic ATXP1 chip.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-vid.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("System voltages control via Attansic ATXP1");
MODULE_VERSION("0.6.3");
MODULE_AUTHOR("Sebastian Witt <se.witt@gmx.net>");
#define ATXP1_VID 0x00
#define ATXP1_CVID 0x01
#define ATXP1_GPIO1 0x06
#define ATXP1_GPIO2 0x0a
#define ATXP1_VIDENA 0x20
#define ATXP1_VIDMASK 0x1f
#define ATXP1_GPIO1MASK 0x0f
static const unsigned short normal_i2c[] = { 0x37, 0x4e, I2C_CLIENT_END };
static int atxp1_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static int atxp1_remove(struct i2c_client *client);
static struct atxp1_data *atxp1_update_device(struct device *dev);
static int atxp1_detect(struct i2c_client *client, struct i2c_board_info *info);
static const struct i2c_device_id atxp1_id[] = {
{ "atxp1", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, atxp1_id);
static struct i2c_driver atxp1_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "atxp1",
},
.probe = atxp1_probe,
.remove = atxp1_remove,
.id_table = atxp1_id,
.detect = atxp1_detect,
.address_list = normal_i2c,
};
struct atxp1_data {
struct device *hwmon_dev;
struct mutex update_lock;
unsigned long last_updated;
u8 valid;
struct {
u8 vid; /* VID output register */
u8 cpu_vid; /* VID input from CPU */
u8 gpio1; /* General purpose I/O register 1 */
u8 gpio2; /* General purpose I/O register 2 */
} reg;
u8 vrm; /* Detected CPU VRM */
};
static struct atxp1_data *atxp1_update_device(struct device *dev)
{
struct i2c_client *client;
struct atxp1_data *data;
client = to_i2c_client(dev);
data = i2c_get_clientdata(client);
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
/* Update local register data */
data->reg.vid = i2c_smbus_read_byte_data(client, ATXP1_VID);
data->reg.cpu_vid = i2c_smbus_read_byte_data(client,
ATXP1_CVID);
data->reg.gpio1 = i2c_smbus_read_byte_data(client, ATXP1_GPIO1);
data->reg.gpio2 = i2c_smbus_read_byte_data(client, ATXP1_GPIO2);
data->valid = 1;
}
mutex_unlock(&data->update_lock);
return data;
}
/* sys file functions for cpu0_vid */
static ssize_t atxp1_showvcore(struct device *dev,
struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
data = atxp1_update_device(dev);
size = sprintf(buf, "%d\n", vid_from_reg(data->reg.vid & ATXP1_VIDMASK,
data->vrm));
return size;
}
static ssize_t atxp1_storevcore(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct atxp1_data *data;
struct i2c_client *client;
int vid, cvid;
unsigned long vcore;
int err;
client = to_i2c_client(dev);
data = atxp1_update_device(dev);
err = kstrtoul(buf, 10, &vcore);
if (err)
return err;
vcore /= 25;
vcore *= 25;
/* Calculate VID */
vid = vid_to_reg(vcore, data->vrm);
if (vid < 0) {
dev_err(dev, "VID calculation failed.\n");
return -1;
}
/*
* If output enabled, use control register value.
* Otherwise original CPU VID
*/
if (data->reg.vid & ATXP1_VIDENA)
cvid = data->reg.vid & ATXP1_VIDMASK;
else
cvid = data->reg.cpu_vid;
/* Nothing changed, aborting */
if (vid == cvid)
return count;
dev_dbg(dev, "Setting VCore to %d mV (0x%02x)\n", (int)vcore, vid);
/* Write every 25 mV step to increase stability */
if (cvid > vid) {
for (; cvid >= vid; cvid--)
i2c_smbus_write_byte_data(client,
ATXP1_VID, cvid | ATXP1_VIDENA);
} else {
for (; cvid <= vid; cvid++)
i2c_smbus_write_byte_data(client,
ATXP1_VID, cvid | ATXP1_VIDENA);
}
data->valid = 0;
return count;
}
/*
* CPU core reference voltage
* unit: millivolt
*/
static DEVICE_ATTR(cpu0_vid, S_IRUGO | S_IWUSR, atxp1_showvcore,
atxp1_storevcore);
/* sys file functions for GPIO1 */
static ssize_t atxp1_showgpio1(struct device *dev,
struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
data = atxp1_update_device(dev);
size = sprintf(buf, "0x%02x\n", data->reg.gpio1 & ATXP1_GPIO1MASK);
return size;
}
static ssize_t atxp1_storegpio1(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct atxp1_data *data;
struct i2c_client *client;
unsigned long value;
int err;
client = to_i2c_client(dev);
data = atxp1_update_device(dev);
err = kstrtoul(buf, 16, &value);
if (err)
return err;
value &= ATXP1_GPIO1MASK;
if (value != (data->reg.gpio1 & ATXP1_GPIO1MASK)) {
dev_info(dev, "Writing 0x%x to GPIO1.\n", (unsigned int)value);
i2c_smbus_write_byte_data(client, ATXP1_GPIO1, value);
data->valid = 0;
}
return count;
}
/*
* GPIO1 data register
* unit: Four bit as hex (e.g. 0x0f)
*/
static DEVICE_ATTR(gpio1, S_IRUGO | S_IWUSR, atxp1_showgpio1, atxp1_storegpio1);
/* sys file functions for GPIO2 */
static ssize_t atxp1_showgpio2(struct device *dev,
struct device_attribute *attr, char *buf)
{
int size;
struct atxp1_data *data;
data = atxp1_update_device(dev);
size = sprintf(buf, "0x%02x\n", data->reg.gpio2);
return size;
}
static ssize_t atxp1_storegpio2(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct atxp1_data *data = atxp1_update_device(dev);
struct i2c_client *client = to_i2c_client(dev);
unsigned long value;
int err;
err = kstrtoul(buf, 16, &value);
if (err)
return err;
value &= 0xff;
if (value != data->reg.gpio2) {
dev_info(dev, "Writing 0x%x to GPIO1.\n", (unsigned int)value);
i2c_smbus_write_byte_data(client, ATXP1_GPIO2, value);
data->valid = 0;
}
return count;
}
/*
* GPIO2 data register
* unit: Eight bit as hex (e.g. 0xff)
*/
static DEVICE_ATTR(gpio2, S_IRUGO | S_IWUSR, atxp1_showgpio2, atxp1_storegpio2);
static struct attribute *atxp1_attributes[] = {
&dev_attr_gpio1.attr,
&dev_attr_gpio2.attr,
&dev_attr_cpu0_vid.attr,
NULL
};
static const struct attribute_group atxp1_group = {
.attrs = atxp1_attributes,
};
/* Return 0 if detection is successful, -ENODEV otherwise */
static int atxp1_detect(struct i2c_client *new_client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = new_client->adapter;
u8 temp;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
/* Detect ATXP1, checking if vendor ID registers are all zero */
if (!((i2c_smbus_read_byte_data(new_client, 0x3e) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0x3f) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0xfe) == 0) &&
(i2c_smbus_read_byte_data(new_client, 0xff) == 0)))
return -ENODEV;
/*
* No vendor ID, now checking if registers 0x10,0x11 (non-existent)
* showing the same as register 0x00
*/
temp = i2c_smbus_read_byte_data(new_client, 0x00);
if (!((i2c_smbus_read_byte_data(new_client, 0x10) == temp) &&
(i2c_smbus_read_byte_data(new_client, 0x11) == temp)))
return -ENODEV;
/* Get VRM */
temp = vid_which_vrm();
if ((temp != 90) && (temp != 91)) {
dev_err(&adapter->dev, "atxp1: Not supporting VRM %d.%d\n",
temp / 10, temp % 10);
return -ENODEV;
}
strlcpy(info->type, "atxp1", I2C_NAME_SIZE);
return 0;
}
static int atxp1_probe(struct i2c_client *new_client,
const struct i2c_device_id *id)
{
struct atxp1_data *data;
int err;
data = kzalloc(sizeof(struct atxp1_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
/* Get VRM */
data->vrm = vid_which_vrm();
i2c_set_clientdata(new_client, data);
data->valid = 0;
mutex_init(&data->update_lock);
/* Register sysfs hooks */
err = sysfs_create_group(&new_client->dev.kobj, &atxp1_group);
if (err)
goto exit_free;
data->hwmon_dev = hwmon_device_register(&new_client->dev);
if (IS_ERR(data->hwmon_dev)) {
err = PTR_ERR(data->hwmon_dev);
goto exit_remove_files;
}
dev_info(&new_client->dev, "Using VRM: %d.%d\n",
data->vrm / 10, data->vrm % 10);
return 0;
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &atxp1_group);
exit_free:
kfree(data);
exit:
return err;
};
static int atxp1_remove(struct i2c_client *client)
{
struct atxp1_data *data = i2c_get_clientdata(client);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &atxp1_group);
kfree(data);
return 0;
};
module_i2c_driver(atxp1_driver);
|
gpl-2.0
|
Pafcholini/emotion_beta_511_no_updates
|
drivers/media/dvb-frontends/dib7000m.c
|
7929
|
42198
|
/*
* Linux-DVB Driver for DiBcom's DiB7000M and
* first generation DiB7000P-demodulator-family.
*
* Copyright (C) 2005-7 DiBcom (http://www.dibcom.fr/)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include "dvb_frontend.h"
#include "dib7000m.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "turn on debugging (default: 0)");
#define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000M: "); printk(args); printk("\n"); } } while (0)
struct dib7000m_state {
struct dvb_frontend demod;
struct dib7000m_config cfg;
u8 i2c_addr;
struct i2c_adapter *i2c_adap;
struct dibx000_i2c_master i2c_master;
/* offset is 1 in case of the 7000MC */
u8 reg_offs;
u16 wbd_ref;
u8 current_band;
u32 current_bandwidth;
struct dibx000_agc_config *current_agc;
u32 timf;
u32 timf_default;
u32 internal_clk;
u8 div_force_off : 1;
u8 div_state : 1;
u16 div_sync_wait;
u16 revision;
u8 agc_state;
/* for the I2C transfer */
struct i2c_msg msg[2];
u8 i2c_write_buffer[4];
u8 i2c_read_buffer[2];
struct mutex i2c_buffer_lock;
};
enum dib7000m_power_mode {
DIB7000M_POWER_ALL = 0,
DIB7000M_POWER_NO,
DIB7000M_POWER_INTERF_ANALOG_AGC,
DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD,
DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD,
DIB7000M_POWER_INTERFACE_ONLY,
};
static u16 dib7000m_read_word(struct dib7000m_state *state, u16 reg)
{
u16 ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
dprintk("could not acquire lock");
return 0;
}
state->i2c_write_buffer[0] = (reg >> 8) | 0x80;
state->i2c_write_buffer[1] = reg & 0xff;
memset(state->msg, 0, 2 * sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 2;
state->msg[1].addr = state->i2c_addr >> 1;
state->msg[1].flags = I2C_M_RD;
state->msg[1].buf = state->i2c_read_buffer;
state->msg[1].len = 2;
if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2)
dprintk("i2c read error on %d",reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
mutex_unlock(&state->i2c_buffer_lock);
return ret;
}
static int dib7000m_write_word(struct dib7000m_state *state, u16 reg, u16 val)
{
int ret;
if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
dprintk("could not acquire lock");
return -EINVAL;
}
state->i2c_write_buffer[0] = (reg >> 8) & 0xff;
state->i2c_write_buffer[1] = reg & 0xff;
state->i2c_write_buffer[2] = (val >> 8) & 0xff;
state->i2c_write_buffer[3] = val & 0xff;
memset(&state->msg[0], 0, sizeof(struct i2c_msg));
state->msg[0].addr = state->i2c_addr >> 1;
state->msg[0].flags = 0;
state->msg[0].buf = state->i2c_write_buffer;
state->msg[0].len = 4;
ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ?
-EREMOTEIO : 0);
mutex_unlock(&state->i2c_buffer_lock);
return ret;
}
static void dib7000m_write_tab(struct dib7000m_state *state, u16 *buf)
{
u16 l = 0, r, *n;
n = buf;
l = *n++;
while (l) {
r = *n++;
if (state->reg_offs && (r >= 112 && r <= 331)) // compensate for 7000MC
r++;
do {
dib7000m_write_word(state, r, *n++);
r++;
} while (--l);
l = *n++;
}
}
static int dib7000m_set_output_mode(struct dib7000m_state *state, int mode)
{
int ret = 0;
u16 outreg, fifo_threshold, smo_mode,
sram = 0x0005; /* by default SRAM output is disabled */
outreg = 0;
fifo_threshold = 1792;
smo_mode = (dib7000m_read_word(state, 294 + state->reg_offs) & 0x0010) | (1 << 1);
dprintk( "setting output mode for demod %p to %d", &state->demod, mode);
switch (mode) {
case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock
outreg = (1 << 10); /* 0x0400 */
break;
case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock
outreg = (1 << 10) | (1 << 6); /* 0x0440 */
break;
case OUTMODE_MPEG2_SERIAL: // STBs with serial input
outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */
break;
case OUTMODE_DIVERSITY:
if (state->cfg.hostbus_diversity)
outreg = (1 << 10) | (4 << 6); /* 0x0500 */
else
sram |= 0x0c00;
break;
case OUTMODE_MPEG2_FIFO: // e.g. USB feeding
smo_mode |= (3 << 1);
fifo_threshold = 512;
outreg = (1 << 10) | (5 << 6);
break;
case OUTMODE_HIGH_Z: // disable
outreg = 0;
break;
default:
dprintk( "Unhandled output_mode passed to be set for demod %p",&state->demod);
break;
}
if (state->cfg.output_mpeg2_in_188_bytes)
smo_mode |= (1 << 5) ;
ret |= dib7000m_write_word(state, 294 + state->reg_offs, smo_mode);
ret |= dib7000m_write_word(state, 295 + state->reg_offs, fifo_threshold); /* synchronous fread */
ret |= dib7000m_write_word(state, 1795, outreg);
ret |= dib7000m_write_word(state, 1805, sram);
if (state->revision == 0x4003) {
u16 clk_cfg1 = dib7000m_read_word(state, 909) & 0xfffd;
if (mode == OUTMODE_DIVERSITY)
clk_cfg1 |= (1 << 1); // P_O_CLK_en
dib7000m_write_word(state, 909, clk_cfg1);
}
return ret;
}
static void dib7000m_set_power_mode(struct dib7000m_state *state, enum dib7000m_power_mode mode)
{
/* by default everything is going to be powered off */
u16 reg_903 = 0xffff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906 = 0x3fff;
u8 offset = 0;
/* now, depending on the requested mode, we power on */
switch (mode) {
/* power up everything in the demod */
case DIB7000M_POWER_ALL:
reg_903 = 0x0000; reg_904 = 0x0000; reg_905 = 0x0000; reg_906 = 0x0000;
break;
/* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */
case DIB7000M_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */
reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2));
break;
case DIB7000M_POWER_INTERF_ANALOG_AGC:
reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10));
reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2));
reg_906 &= ~((1 << 0));
break;
case DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD:
reg_903 = 0x0000; reg_904 = 0x801f; reg_905 = 0x0000; reg_906 = 0x0000;
break;
case DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD:
reg_903 = 0x0000; reg_904 = 0x8000; reg_905 = 0x010b; reg_906 = 0x0000;
break;
case DIB7000M_POWER_NO:
break;
}
/* always power down unused parts */
if (!state->cfg.mobile_mode)
reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1);
/* P_sdio_select_clk = 0 on MC and after*/
if (state->revision != 0x4000)
reg_906 <<= 1;
if (state->revision == 0x4003)
offset = 1;
dib7000m_write_word(state, 903 + offset, reg_903);
dib7000m_write_word(state, 904 + offset, reg_904);
dib7000m_write_word(state, 905 + offset, reg_905);
dib7000m_write_word(state, 906 + offset, reg_906);
}
static int dib7000m_set_adc_state(struct dib7000m_state *state, enum dibx000_adc_states no)
{
int ret = 0;
u16 reg_913 = dib7000m_read_word(state, 913),
reg_914 = dib7000m_read_word(state, 914);
switch (no) {
case DIBX000_SLOW_ADC_ON:
reg_914 |= (1 << 1) | (1 << 0);
ret |= dib7000m_write_word(state, 914, reg_914);
reg_914 &= ~(1 << 1);
break;
case DIBX000_SLOW_ADC_OFF:
reg_914 |= (1 << 1) | (1 << 0);
break;
case DIBX000_ADC_ON:
if (state->revision == 0x4000) { // workaround for PA/MA
// power-up ADC
dib7000m_write_word(state, 913, 0);
dib7000m_write_word(state, 914, reg_914 & 0x3);
// power-down bandgag
dib7000m_write_word(state, 913, (1 << 15));
dib7000m_write_word(state, 914, reg_914 & 0x3);
}
reg_913 &= 0x0fff;
reg_914 &= 0x0003;
break;
case DIBX000_ADC_OFF: // leave the VBG voltage on
reg_913 |= (1 << 14) | (1 << 13) | (1 << 12);
reg_914 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2);
break;
case DIBX000_VBG_ENABLE:
reg_913 &= ~(1 << 15);
break;
case DIBX000_VBG_DISABLE:
reg_913 |= (1 << 15);
break;
default:
break;
}
// dprintk( "913: %x, 914: %x", reg_913, reg_914);
ret |= dib7000m_write_word(state, 913, reg_913);
ret |= dib7000m_write_word(state, 914, reg_914);
return ret;
}
static int dib7000m_set_bandwidth(struct dib7000m_state *state, u32 bw)
{
u32 timf;
if (!bw)
bw = 8000;
// store the current bandwidth for later use
state->current_bandwidth = bw;
if (state->timf == 0) {
dprintk( "using default timf");
timf = state->timf_default;
} else {
dprintk( "using updated timf");
timf = state->timf;
}
timf = timf * (bw / 50) / 160;
dib7000m_write_word(state, 23, (u16) ((timf >> 16) & 0xffff));
dib7000m_write_word(state, 24, (u16) ((timf ) & 0xffff));
return 0;
}
static int dib7000m_set_diversity_in(struct dvb_frontend *demod, int onoff)
{
struct dib7000m_state *state = demod->demodulator_priv;
if (state->div_force_off) {
dprintk( "diversity combination deactivated - forced by COFDM parameters");
onoff = 0;
}
state->div_state = (u8)onoff;
if (onoff) {
dib7000m_write_word(state, 263 + state->reg_offs, 6);
dib7000m_write_word(state, 264 + state->reg_offs, 6);
dib7000m_write_word(state, 266 + state->reg_offs, (state->div_sync_wait << 4) | (1 << 2) | (2 << 0));
} else {
dib7000m_write_word(state, 263 + state->reg_offs, 1);
dib7000m_write_word(state, 264 + state->reg_offs, 0);
dib7000m_write_word(state, 266 + state->reg_offs, 0);
}
return 0;
}
static int dib7000m_sad_calib(struct dib7000m_state *state)
{
/* internal */
// dib7000m_write_word(state, 928, (3 << 14) | (1 << 12) | (524 << 0)); // sampling clock of the SAD is writting in set_bandwidth
dib7000m_write_word(state, 929, (0 << 1) | (0 << 0));
dib7000m_write_word(state, 930, 776); // 0.625*3.3 / 4096
/* do the calibration */
dib7000m_write_word(state, 929, (1 << 0));
dib7000m_write_word(state, 929, (0 << 0));
msleep(1);
return 0;
}
static void dib7000m_reset_pll_common(struct dib7000m_state *state, const struct dibx000_bandwidth_config *bw)
{
dib7000m_write_word(state, 18, (u16) (((bw->internal*1000) >> 16) & 0xffff));
dib7000m_write_word(state, 19, (u16) ( (bw->internal*1000) & 0xffff));
dib7000m_write_word(state, 21, (u16) ( (bw->ifreq >> 16) & 0xffff));
dib7000m_write_word(state, 22, (u16) ( bw->ifreq & 0xffff));
dib7000m_write_word(state, 928, bw->sad_cfg);
}
static void dib7000m_reset_pll(struct dib7000m_state *state)
{
const struct dibx000_bandwidth_config *bw = state->cfg.bw;
u16 reg_907,reg_910;
/* default */
reg_907 = (bw->pll_bypass << 15) | (bw->modulo << 7) |
(bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) |
(bw->enable_refdiv << 1) | (0 << 0);
reg_910 = (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset;
// for this oscillator frequency should be 30 MHz for the Master (default values in the board_parameters give that value)
// this is only working only for 30 MHz crystals
if (!state->cfg.quartz_direct) {
reg_910 |= (1 << 5); // forcing the predivider to 1
// if the previous front-end is baseband, its output frequency is 15 MHz (prev freq divided by 2)
if(state->cfg.input_clk_is_div_2)
reg_907 |= (16 << 9);
else // otherwise the previous front-end puts out its input (default 30MHz) - no extra division necessary
reg_907 |= (8 << 9);
} else {
reg_907 |= (bw->pll_ratio & 0x3f) << 9;
reg_910 |= (bw->pll_prediv << 5);
}
dib7000m_write_word(state, 910, reg_910); // pll cfg
dib7000m_write_word(state, 907, reg_907); // clk cfg0
dib7000m_write_word(state, 908, 0x0006); // clk_cfg1
dib7000m_reset_pll_common(state, bw);
}
static void dib7000mc_reset_pll(struct dib7000m_state *state)
{
const struct dibx000_bandwidth_config *bw = state->cfg.bw;
u16 clk_cfg1;
// clk_cfg0
dib7000m_write_word(state, 907, (bw->pll_prediv << 8) | (bw->pll_ratio << 0));
// clk_cfg1
//dib7000m_write_word(state, 908, (1 << 14) | (3 << 12) |(0 << 11) |
clk_cfg1 = (0 << 14) | (3 << 12) |(0 << 11) |
(bw->IO_CLK_en_core << 10) | (bw->bypclk_div << 5) | (bw->enable_refdiv << 4) |
(1 << 3) | (bw->pll_range << 1) | (bw->pll_reset << 0);
dib7000m_write_word(state, 908, clk_cfg1);
clk_cfg1 = (clk_cfg1 & 0xfff7) | (bw->pll_bypass << 3);
dib7000m_write_word(state, 908, clk_cfg1);
// smpl_cfg
dib7000m_write_word(state, 910, (1 << 12) | (2 << 10) | (bw->modulo << 8) | (bw->ADClkSrc << 7));
dib7000m_reset_pll_common(state, bw);
}
static int dib7000m_reset_gpio(struct dib7000m_state *st)
{
/* reset the GPIOs */
dib7000m_write_word(st, 773, st->cfg.gpio_dir);
dib7000m_write_word(st, 774, st->cfg.gpio_val);
/* TODO 782 is P_gpio_od */
dib7000m_write_word(st, 775, st->cfg.gpio_pwm_pos);
dib7000m_write_word(st, 780, st->cfg.pwm_freq_div);
return 0;
}
static u16 dib7000m_defaults_common[] =
{
// auto search configuration
3, 2,
0x0004,
0x1000,
0x0814,
12, 6,
0x001b,
0x7740,
0x005b,
0x8d80,
0x01c9,
0xc380,
0x0000,
0x0080,
0x0000,
0x0090,
0x0001,
0xd4c0,
1, 26,
0x6680, // P_corm_thres Lock algorithms configuration
1, 170,
0x0410, // P_palf_alpha_regul, P_palf_filter_freeze, P_palf_filter_on
8, 173,
0,
0,
0,
0,
0,
0,
0,
0,
1, 182,
8192, // P_fft_nb_to_cut
2, 195,
0x0ccd, // P_pha3_thres
0, // P_cti_use_cpe, P_cti_use_prog
1, 205,
0x200f, // P_cspu_regul, P_cspu_win_cut
5, 214,
0x023d, // P_adp_regul_cnt
0x00a4, // P_adp_noise_cnt
0x00a4, // P_adp_regul_ext
0x7ff0, // P_adp_noise_ext
0x3ccc, // P_adp_fil
1, 226,
0, // P_2d_byp_ti_num
1, 255,
0x800, // P_equal_thres_wgn
1, 263,
0x0001,
1, 281,
0x0010, // P_fec_*
1, 294,
0x0062, // P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard
0
};
static u16 dib7000m_defaults[] =
{
/* set ADC level to -16 */
11, 76,
(1 << 13) - 825 - 117,
(1 << 13) - 837 - 117,
(1 << 13) - 811 - 117,
(1 << 13) - 766 - 117,
(1 << 13) - 737 - 117,
(1 << 13) - 693 - 117,
(1 << 13) - 648 - 117,
(1 << 13) - 619 - 117,
(1 << 13) - 575 - 117,
(1 << 13) - 531 - 117,
(1 << 13) - 501 - 117,
// Tuner IO bank: max drive (14mA)
1, 912,
0x2c8a,
1, 1817,
1,
0,
};
static int dib7000m_demod_reset(struct dib7000m_state *state)
{
dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
/* always leave the VBG voltage on - it consumes almost nothing but takes a long time to start */
dib7000m_set_adc_state(state, DIBX000_VBG_ENABLE);
/* restart all parts */
dib7000m_write_word(state, 898, 0xffff);
dib7000m_write_word(state, 899, 0xffff);
dib7000m_write_word(state, 900, 0xff0f);
dib7000m_write_word(state, 901, 0xfffc);
dib7000m_write_word(state, 898, 0);
dib7000m_write_word(state, 899, 0);
dib7000m_write_word(state, 900, 0);
dib7000m_write_word(state, 901, 0);
if (state->revision == 0x4000)
dib7000m_reset_pll(state);
else
dib7000mc_reset_pll(state);
if (dib7000m_reset_gpio(state) != 0)
dprintk( "GPIO reset was not successful.");
if (dib7000m_set_output_mode(state, OUTMODE_HIGH_Z) != 0)
dprintk( "OUTPUT_MODE could not be reset.");
/* unforce divstr regardless whether i2c enumeration was done or not */
dib7000m_write_word(state, 1794, dib7000m_read_word(state, 1794) & ~(1 << 1) );
dib7000m_set_bandwidth(state, 8000);
dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON);
dib7000m_sad_calib(state);
dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_OFF);
if (state->cfg.dvbt_mode)
dib7000m_write_word(state, 1796, 0x0); // select DVB-T output
if (state->cfg.mobile_mode)
dib7000m_write_word(state, 261 + state->reg_offs, 2);
else
dib7000m_write_word(state, 224 + state->reg_offs, 1);
// P_iqc_alpha_pha, P_iqc_alpha_amp, P_iqc_dcc_alpha, ...
if(state->cfg.tuner_is_baseband)
dib7000m_write_word(state, 36, 0x0755);
else
dib7000m_write_word(state, 36, 0x1f55);
// P_divclksel=3 P_divbitsel=1
if (state->revision == 0x4000)
dib7000m_write_word(state, 909, (3 << 10) | (1 << 6));
else
dib7000m_write_word(state, 909, (3 << 4) | 1);
dib7000m_write_tab(state, dib7000m_defaults_common);
dib7000m_write_tab(state, dib7000m_defaults);
dib7000m_set_power_mode(state, DIB7000M_POWER_INTERFACE_ONLY);
state->internal_clk = state->cfg.bw->internal;
return 0;
}
static void dib7000m_restart_agc(struct dib7000m_state *state)
{
// P_restart_iqc & P_restart_agc
dib7000m_write_word(state, 898, 0x0c00);
dib7000m_write_word(state, 898, 0x0000);
}
static int dib7000m_agc_soft_split(struct dib7000m_state *state)
{
u16 agc,split_offset;
if(!state->current_agc || !state->current_agc->perform_agc_softsplit || state->current_agc->split.max == 0)
return 0;
// n_agc_global
agc = dib7000m_read_word(state, 390);
if (agc > state->current_agc->split.min_thres)
split_offset = state->current_agc->split.min;
else if (agc < state->current_agc->split.max_thres)
split_offset = state->current_agc->split.max;
else
split_offset = state->current_agc->split.max *
(agc - state->current_agc->split.min_thres) /
(state->current_agc->split.max_thres - state->current_agc->split.min_thres);
dprintk( "AGC split_offset: %d",split_offset);
// P_agc_force_split and P_agc_split_offset
return dib7000m_write_word(state, 103, (dib7000m_read_word(state, 103) & 0xff00) | split_offset);
}
static int dib7000m_update_lna(struct dib7000m_state *state)
{
u16 dyn_gain;
if (state->cfg.update_lna) {
// read dyn_gain here (because it is demod-dependent and not fe)
dyn_gain = dib7000m_read_word(state, 390);
if (state->cfg.update_lna(&state->demod,dyn_gain)) { // LNA has changed
dib7000m_restart_agc(state);
return 1;
}
}
return 0;
}
static int dib7000m_set_agc_config(struct dib7000m_state *state, u8 band)
{
struct dibx000_agc_config *agc = NULL;
int i;
if (state->current_band == band && state->current_agc != NULL)
return 0;
state->current_band = band;
for (i = 0; i < state->cfg.agc_config_count; i++)
if (state->cfg.agc[i].band_caps & band) {
agc = &state->cfg.agc[i];
break;
}
if (agc == NULL) {
dprintk( "no valid AGC configuration found for band 0x%02x",band);
return -EINVAL;
}
state->current_agc = agc;
/* AGC */
dib7000m_write_word(state, 72 , agc->setup);
dib7000m_write_word(state, 73 , agc->inv_gain);
dib7000m_write_word(state, 74 , agc->time_stabiliz);
dib7000m_write_word(state, 97 , (agc->alpha_level << 12) | agc->thlock);
// Demod AGC loop configuration
dib7000m_write_word(state, 98, (agc->alpha_mant << 5) | agc->alpha_exp);
dib7000m_write_word(state, 99, (agc->beta_mant << 6) | agc->beta_exp);
dprintk( "WBD: ref: %d, sel: %d, active: %d, alpha: %d",
state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel);
/* AGC continued */
if (state->wbd_ref != 0)
dib7000m_write_word(state, 102, state->wbd_ref);
else // use default
dib7000m_write_word(state, 102, agc->wbd_ref);
dib7000m_write_word(state, 103, (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8) );
dib7000m_write_word(state, 104, agc->agc1_max);
dib7000m_write_word(state, 105, agc->agc1_min);
dib7000m_write_word(state, 106, agc->agc2_max);
dib7000m_write_word(state, 107, agc->agc2_min);
dib7000m_write_word(state, 108, (agc->agc1_pt1 << 8) | agc->agc1_pt2 );
dib7000m_write_word(state, 109, (agc->agc1_slope1 << 8) | agc->agc1_slope2);
dib7000m_write_word(state, 110, (agc->agc2_pt1 << 8) | agc->agc2_pt2);
dib7000m_write_word(state, 111, (agc->agc2_slope1 << 8) | agc->agc2_slope2);
if (state->revision > 0x4000) { // settings for the MC
dib7000m_write_word(state, 71, agc->agc1_pt3);
// dprintk( "929: %x %d %d",
// (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2), agc->wbd_inv, agc->wbd_sel);
dib7000m_write_word(state, 929, (dib7000m_read_word(state, 929) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2));
} else {
// wrong default values
u16 b[9] = { 676, 696, 717, 737, 758, 778, 799, 819, 840 };
for (i = 0; i < 9; i++)
dib7000m_write_word(state, 88 + i, b[i]);
}
return 0;
}
static void dib7000m_update_timf(struct dib7000m_state *state)
{
u32 timf = (dib7000m_read_word(state, 436) << 16) | dib7000m_read_word(state, 437);
state->timf = timf * 160 / (state->current_bandwidth / 50);
dib7000m_write_word(state, 23, (u16) (timf >> 16));
dib7000m_write_word(state, 24, (u16) (timf & 0xffff));
dprintk( "updated timf_frequency: %d (default: %d)",state->timf, state->timf_default);
}
static int dib7000m_agc_startup(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000m_state *state = demod->demodulator_priv;
u16 cfg_72 = dib7000m_read_word(state, 72);
int ret = -1;
u8 *agc_state = &state->agc_state;
u8 agc_split;
switch (state->agc_state) {
case 0:
// set power-up level: interf+analog+AGC
dib7000m_set_power_mode(state, DIB7000M_POWER_INTERF_ANALOG_AGC);
dib7000m_set_adc_state(state, DIBX000_ADC_ON);
if (dib7000m_set_agc_config(state, BAND_OF_FREQUENCY(ch->frequency/1000)) != 0)
return -1;
ret = 7; /* ADC power up */
(*agc_state)++;
break;
case 1:
/* AGC initialization */
if (state->cfg.agc_control)
state->cfg.agc_control(&state->demod, 1);
dib7000m_write_word(state, 75, 32768);
if (!state->current_agc->perform_agc_softsplit) {
/* we are using the wbd - so slow AGC startup */
dib7000m_write_word(state, 103, 1 << 8); /* force 0 split on WBD and restart AGC */
(*agc_state)++;
ret = 5;
} else {
/* default AGC startup */
(*agc_state) = 4;
/* wait AGC rough lock time */
ret = 7;
}
dib7000m_restart_agc(state);
break;
case 2: /* fast split search path after 5sec */
dib7000m_write_word(state, 72, cfg_72 | (1 << 4)); /* freeze AGC loop */
dib7000m_write_word(state, 103, 2 << 9); /* fast split search 0.25kHz */
(*agc_state)++;
ret = 14;
break;
case 3: /* split search ended */
agc_split = (u8)dib7000m_read_word(state, 392); /* store the split value for the next time */
dib7000m_write_word(state, 75, dib7000m_read_word(state, 390)); /* set AGC gain start value */
dib7000m_write_word(state, 72, cfg_72 & ~(1 << 4)); /* std AGC loop */
dib7000m_write_word(state, 103, (state->current_agc->wbd_alpha << 9) | agc_split); /* standard split search */
dib7000m_restart_agc(state);
dprintk( "SPLIT %p: %hd", demod, agc_split);
(*agc_state)++;
ret = 5;
break;
case 4: /* LNA startup */
/* wait AGC accurate lock time */
ret = 7;
if (dib7000m_update_lna(state))
// wait only AGC rough lock time
ret = 5;
else
(*agc_state)++;
break;
case 5:
dib7000m_agc_soft_split(state);
if (state->cfg.agc_control)
state->cfg.agc_control(&state->demod, 0);
(*agc_state)++;
break;
default:
break;
}
return ret;
}
static void dib7000m_set_channel(struct dib7000m_state *state, struct dtv_frontend_properties *ch,
u8 seq)
{
u16 value, est[4];
dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
/* nfft, guard, qam, alpha */
value = 0;
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= (0 << 7); break;
case TRANSMISSION_MODE_4K: value |= (2 << 7); break;
default:
case TRANSMISSION_MODE_8K: value |= (1 << 7); break;
}
switch (ch->guard_interval) {
case GUARD_INTERVAL_1_32: value |= (0 << 5); break;
case GUARD_INTERVAL_1_16: value |= (1 << 5); break;
case GUARD_INTERVAL_1_4: value |= (3 << 5); break;
default:
case GUARD_INTERVAL_1_8: value |= (2 << 5); break;
}
switch (ch->modulation) {
case QPSK: value |= (0 << 3); break;
case QAM_16: value |= (1 << 3); break;
default:
case QAM_64: value |= (2 << 3); break;
}
switch (HIERARCHY_1) {
case HIERARCHY_2: value |= 2; break;
case HIERARCHY_4: value |= 4; break;
default:
case HIERARCHY_1: value |= 1; break;
}
dib7000m_write_word(state, 0, value);
dib7000m_write_word(state, 5, (seq << 4));
/* P_dintl_native, P_dintlv_inv, P_hrch, P_code_rate, P_select_hp */
value = 0;
if (1 != 0)
value |= (1 << 6);
if (ch->hierarchy == 1)
value |= (1 << 4);
if (1 == 1)
value |= 1;
switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) {
case FEC_2_3: value |= (2 << 1); break;
case FEC_3_4: value |= (3 << 1); break;
case FEC_5_6: value |= (5 << 1); break;
case FEC_7_8: value |= (7 << 1); break;
default:
case FEC_1_2: value |= (1 << 1); break;
}
dib7000m_write_word(state, 267 + state->reg_offs, value);
/* offset loop parameters */
/* P_timf_alpha = 6, P_corm_alpha=6, P_corm_thres=0x80 */
dib7000m_write_word(state, 26, (6 << 12) | (6 << 8) | 0x80);
/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=1, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
dib7000m_write_word(state, 29, (0 << 14) | (4 << 10) | (1 << 9) | (3 << 5) | (1 << 4) | (0x3));
/* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max=3 */
dib7000m_write_word(state, 32, (0 << 4) | 0x3);
/* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step=5 */
dib7000m_write_word(state, 33, (0 << 4) | 0x5);
/* P_dvsy_sync_wait */
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_8K: value = 256; break;
case TRANSMISSION_MODE_4K: value = 128; break;
case TRANSMISSION_MODE_2K:
default: value = 64; break;
}
switch (ch->guard_interval) {
case GUARD_INTERVAL_1_16: value *= 2; break;
case GUARD_INTERVAL_1_8: value *= 4; break;
case GUARD_INTERVAL_1_4: value *= 8; break;
default:
case GUARD_INTERVAL_1_32: value *= 1; break;
}
state->div_sync_wait = (value * 3) / 2 + 32; // add 50% SFN margin + compensate for one DVSY-fifo TODO
/* deactive the possibility of diversity reception if extended interleave - not for 7000MC */
/* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */
if (1 == 1 || state->revision > 0x4000)
state->div_force_off = 0;
else
state->div_force_off = 1;
dib7000m_set_diversity_in(&state->demod, state->div_state);
/* channel estimation fine configuration */
switch (ch->modulation) {
case QAM_64:
est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */
est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */
est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */
break;
case QAM_16:
est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */
est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */
est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */
est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */
break;
default:
est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */
est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */
est[2] = 0x0333; /* P_adp_regul_ext 0.1 */
est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */
break;
}
for (value = 0; value < 4; value++)
dib7000m_write_word(state, 214 + value + state->reg_offs, est[value]);
// set power-up level: autosearch
dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD);
}
static int dib7000m_autosearch_start(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000m_state *state = demod->demodulator_priv;
struct dtv_frontend_properties schan;
int ret = 0;
u32 value, factor;
schan = *ch;
schan.modulation = QAM_64;
schan.guard_interval = GUARD_INTERVAL_1_32;
schan.transmission_mode = TRANSMISSION_MODE_8K;
schan.code_rate_HP = FEC_2_3;
schan.code_rate_LP = FEC_3_4;
schan.hierarchy = 0;
dib7000m_set_channel(state, &schan, 7);
factor = BANDWIDTH_TO_KHZ(schan.bandwidth_hz);
if (factor >= 5000)
factor = 1;
else
factor = 6;
// always use the setting for 8MHz here lock_time for 7,6 MHz are longer
value = 30 * state->internal_clk * factor;
ret |= dib7000m_write_word(state, 6, (u16) ((value >> 16) & 0xffff)); // lock0 wait time
ret |= dib7000m_write_word(state, 7, (u16) (value & 0xffff)); // lock0 wait time
value = 100 * state->internal_clk * factor;
ret |= dib7000m_write_word(state, 8, (u16) ((value >> 16) & 0xffff)); // lock1 wait time
ret |= dib7000m_write_word(state, 9, (u16) (value & 0xffff)); // lock1 wait time
value = 500 * state->internal_clk * factor;
ret |= dib7000m_write_word(state, 10, (u16) ((value >> 16) & 0xffff)); // lock2 wait time
ret |= dib7000m_write_word(state, 11, (u16) (value & 0xffff)); // lock2 wait time
// start search
value = dib7000m_read_word(state, 0);
ret |= dib7000m_write_word(state, 0, (u16) (value | (1 << 9)));
/* clear n_irq_pending */
if (state->revision == 0x4000)
dib7000m_write_word(state, 1793, 0);
else
dib7000m_read_word(state, 537);
ret |= dib7000m_write_word(state, 0, (u16) value);
return ret;
}
static int dib7000m_autosearch_irq(struct dib7000m_state *state, u16 reg)
{
u16 irq_pending = dib7000m_read_word(state, reg);
if (irq_pending & 0x1) { // failed
dprintk( "autosearch failed");
return 1;
}
if (irq_pending & 0x2) { // succeeded
dprintk( "autosearch succeeded");
return 2;
}
return 0; // still pending
}
static int dib7000m_autosearch_is_irq(struct dvb_frontend *demod)
{
struct dib7000m_state *state = demod->demodulator_priv;
if (state->revision == 0x4000)
return dib7000m_autosearch_irq(state, 1793);
else
return dib7000m_autosearch_irq(state, 537);
}
static int dib7000m_tune(struct dvb_frontend *demod)
{
struct dtv_frontend_properties *ch = &demod->dtv_property_cache;
struct dib7000m_state *state = demod->demodulator_priv;
int ret = 0;
u16 value;
// we are already tuned - just resuming from suspend
if (ch != NULL)
dib7000m_set_channel(state, ch, 0);
else
return -EINVAL;
// restart demod
ret |= dib7000m_write_word(state, 898, 0x4000);
ret |= dib7000m_write_word(state, 898, 0x0000);
msleep(45);
dib7000m_set_power_mode(state, DIB7000M_POWER_COR4_CRY_ESRAM_MOUT_NUD);
/* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */
ret |= dib7000m_write_word(state, 29, (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3));
// never achieved a lock before - wait for timfreq to update
if (state->timf == 0)
msleep(200);
//dump_reg(state);
/* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */
value = (6 << 8) | 0x80;
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= (7 << 12); break;
case TRANSMISSION_MODE_4K: value |= (8 << 12); break;
default:
case TRANSMISSION_MODE_8K: value |= (9 << 12); break;
}
ret |= dib7000m_write_word(state, 26, value);
/* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */
value = (0 << 4);
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= 0x6; break;
case TRANSMISSION_MODE_4K: value |= 0x7; break;
default:
case TRANSMISSION_MODE_8K: value |= 0x8; break;
}
ret |= dib7000m_write_word(state, 32, value);
/* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */
value = (0 << 4);
switch (ch->transmission_mode) {
case TRANSMISSION_MODE_2K: value |= 0x6; break;
case TRANSMISSION_MODE_4K: value |= 0x7; break;
default:
case TRANSMISSION_MODE_8K: value |= 0x8; break;
}
ret |= dib7000m_write_word(state, 33, value);
// we achieved a lock - it's time to update the timf freq
if ((dib7000m_read_word(state, 535) >> 6) & 0x1)
dib7000m_update_timf(state);
dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz));
return ret;
}
static int dib7000m_wakeup(struct dvb_frontend *demod)
{
struct dib7000m_state *state = demod->demodulator_priv;
dib7000m_set_power_mode(state, DIB7000M_POWER_ALL);
if (dib7000m_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0)
dprintk( "could not start Slow ADC");
return 0;
}
static int dib7000m_sleep(struct dvb_frontend *demod)
{
struct dib7000m_state *st = demod->demodulator_priv;
dib7000m_set_output_mode(st, OUTMODE_HIGH_Z);
dib7000m_set_power_mode(st, DIB7000M_POWER_INTERFACE_ONLY);
return dib7000m_set_adc_state(st, DIBX000_SLOW_ADC_OFF) |
dib7000m_set_adc_state(st, DIBX000_ADC_OFF);
}
static int dib7000m_identify(struct dib7000m_state *state)
{
u16 value;
if ((value = dib7000m_read_word(state, 896)) != 0x01b3) {
dprintk( "wrong Vendor ID (0x%x)",value);
return -EREMOTEIO;
}
state->revision = dib7000m_read_word(state, 897);
if (state->revision != 0x4000 &&
state->revision != 0x4001 &&
state->revision != 0x4002 &&
state->revision != 0x4003) {
dprintk( "wrong Device ID (0x%x)",value);
return -EREMOTEIO;
}
/* protect this driver to be used with 7000PC */
if (state->revision == 0x4000 && dib7000m_read_word(state, 769) == 0x4000) {
dprintk( "this driver does not work with DiB7000PC");
return -EREMOTEIO;
}
switch (state->revision) {
case 0x4000: dprintk( "found DiB7000MA/PA/MB/PB"); break;
case 0x4001: state->reg_offs = 1; dprintk( "found DiB7000HC"); break;
case 0x4002: state->reg_offs = 1; dprintk( "found DiB7000MC"); break;
case 0x4003: state->reg_offs = 1; dprintk( "found DiB9000"); break;
}
return 0;
}
static int dib7000m_get_frontend(struct dvb_frontend* fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000m_state *state = fe->demodulator_priv;
u16 tps = dib7000m_read_word(state,480);
fep->inversion = INVERSION_AUTO;
fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth);
switch ((tps >> 8) & 0x3) {
case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break;
case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break;
/* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */
}
switch (tps & 0x3) {
case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break;
case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break;
case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break;
case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break;
}
switch ((tps >> 14) & 0x3) {
case 0: fep->modulation = QPSK; break;
case 1: fep->modulation = QAM_16; break;
case 2:
default: fep->modulation = QAM_64; break;
}
/* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */
/* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */
fep->hierarchy = HIERARCHY_NONE;
switch ((tps >> 5) & 0x7) {
case 1: fep->code_rate_HP = FEC_1_2; break;
case 2: fep->code_rate_HP = FEC_2_3; break;
case 3: fep->code_rate_HP = FEC_3_4; break;
case 5: fep->code_rate_HP = FEC_5_6; break;
case 7:
default: fep->code_rate_HP = FEC_7_8; break;
}
switch ((tps >> 2) & 0x7) {
case 1: fep->code_rate_LP = FEC_1_2; break;
case 2: fep->code_rate_LP = FEC_2_3; break;
case 3: fep->code_rate_LP = FEC_3_4; break;
case 5: fep->code_rate_LP = FEC_5_6; break;
case 7:
default: fep->code_rate_LP = FEC_7_8; break;
}
/* native interleaver: (dib7000m_read_word(state, 481) >> 5) & 0x1 */
return 0;
}
static int dib7000m_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *fep = &fe->dtv_property_cache;
struct dib7000m_state *state = fe->demodulator_priv;
int time, ret;
dib7000m_set_output_mode(state, OUTMODE_HIGH_Z);
dib7000m_set_bandwidth(state, BANDWIDTH_TO_KHZ(fep->bandwidth_hz));
if (fe->ops.tuner_ops.set_params)
fe->ops.tuner_ops.set_params(fe);
/* start up the AGC */
state->agc_state = 0;
do {
time = dib7000m_agc_startup(fe);
if (time != -1)
msleep(time);
} while (time != -1);
if (fep->transmission_mode == TRANSMISSION_MODE_AUTO ||
fep->guard_interval == GUARD_INTERVAL_AUTO ||
fep->modulation == QAM_AUTO ||
fep->code_rate_HP == FEC_AUTO) {
int i = 800, found;
dib7000m_autosearch_start(fe);
do {
msleep(1);
found = dib7000m_autosearch_is_irq(fe);
} while (found == 0 && i--);
dprintk("autosearch returns: %d",found);
if (found == 0 || found == 1)
return 0; // no channel found
dib7000m_get_frontend(fe);
}
ret = dib7000m_tune(fe);
/* make this a config parameter */
dib7000m_set_output_mode(state, OUTMODE_MPEG2_FIFO);
return ret;
}
static int dib7000m_read_status(struct dvb_frontend *fe, fe_status_t *stat)
{
struct dib7000m_state *state = fe->demodulator_priv;
u16 lock = dib7000m_read_word(state, 535);
*stat = 0;
if (lock & 0x8000)
*stat |= FE_HAS_SIGNAL;
if (lock & 0x3000)
*stat |= FE_HAS_CARRIER;
if (lock & 0x0100)
*stat |= FE_HAS_VITERBI;
if (lock & 0x0010)
*stat |= FE_HAS_SYNC;
if (lock & 0x0008)
*stat |= FE_HAS_LOCK;
return 0;
}
static int dib7000m_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct dib7000m_state *state = fe->demodulator_priv;
*ber = (dib7000m_read_word(state, 526) << 16) | dib7000m_read_word(state, 527);
return 0;
}
static int dib7000m_read_unc_blocks(struct dvb_frontend *fe, u32 *unc)
{
struct dib7000m_state *state = fe->demodulator_priv;
*unc = dib7000m_read_word(state, 534);
return 0;
}
static int dib7000m_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct dib7000m_state *state = fe->demodulator_priv;
u16 val = dib7000m_read_word(state, 390);
*strength = 65535 - val;
return 0;
}
static int dib7000m_read_snr(struct dvb_frontend* fe, u16 *snr)
{
*snr = 0x0000;
return 0;
}
static int dib7000m_fe_get_tune_settings(struct dvb_frontend* fe, struct dvb_frontend_tune_settings *tune)
{
tune->min_delay_ms = 1000;
return 0;
}
static void dib7000m_release(struct dvb_frontend *demod)
{
struct dib7000m_state *st = demod->demodulator_priv;
dibx000_exit_i2c_master(&st->i2c_master);
kfree(st);
}
struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating)
{
struct dib7000m_state *st = demod->demodulator_priv;
return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating);
}
EXPORT_SYMBOL(dib7000m_get_i2c_master);
int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
{
struct dib7000m_state *state = fe->demodulator_priv;
u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
val |= (onoff & 0x1) << 4;
dprintk("PID filter enabled %d", onoff);
return dib7000m_write_word(state, 294 + state->reg_offs, val);
}
EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
{
struct dib7000m_state *state = fe->demodulator_priv;
dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
return dib7000m_write_word(state, 300 + state->reg_offs + id,
onoff ? (1 << 13) | pid : 0);
}
EXPORT_SYMBOL(dib7000m_pid_filter);
#if 0
/* used with some prototype boards */
int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
u8 default_addr, struct dib7000m_config cfg[])
{
struct dib7000m_state st = { .i2c_adap = i2c };
int k = 0;
u8 new_addr = 0;
for (k = no_of_demods-1; k >= 0; k--) {
st.cfg = cfg[k];
/* designated i2c address */
new_addr = (0x40 + k) << 1;
st.i2c_addr = new_addr;
if (dib7000m_identify(&st) != 0) {
st.i2c_addr = default_addr;
if (dib7000m_identify(&st) != 0) {
dprintk("DiB7000M #%d: not identified", k);
return -EIO;
}
}
/* start diversity to pull_down div_str - just for i2c-enumeration */
dib7000m_set_output_mode(&st, OUTMODE_DIVERSITY);
dib7000m_write_word(&st, 1796, 0x0); // select DVB-T output
/* set new i2c address and force divstart */
dib7000m_write_word(&st, 1794, (new_addr << 2) | 0x2);
dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr);
}
for (k = 0; k < no_of_demods; k++) {
st.cfg = cfg[k];
st.i2c_addr = (0x40 + k) << 1;
// unforce divstr
dib7000m_write_word(&st,1794, st.i2c_addr << 2);
/* deactivate div - it was just for i2c-enumeration */
dib7000m_set_output_mode(&st, OUTMODE_HIGH_Z);
}
return 0;
}
EXPORT_SYMBOL(dib7000m_i2c_enumeration);
#endif
static struct dvb_frontend_ops dib7000m_ops;
struct dvb_frontend * dib7000m_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000m_config *cfg)
{
struct dvb_frontend *demod;
struct dib7000m_state *st;
st = kzalloc(sizeof(struct dib7000m_state), GFP_KERNEL);
if (st == NULL)
return NULL;
memcpy(&st->cfg, cfg, sizeof(struct dib7000m_config));
st->i2c_adap = i2c_adap;
st->i2c_addr = i2c_addr;
demod = &st->demod;
demod->demodulator_priv = st;
memcpy(&st->demod.ops, &dib7000m_ops, sizeof(struct dvb_frontend_ops));
mutex_init(&st->i2c_buffer_lock);
st->timf_default = cfg->bw->timf;
if (dib7000m_identify(st) != 0)
goto error;
if (st->revision == 0x4000)
dibx000_init_i2c_master(&st->i2c_master, DIB7000, st->i2c_adap, st->i2c_addr);
else
dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c_adap, st->i2c_addr);
dib7000m_demod_reset(st);
return demod;
error:
kfree(st);
return NULL;
}
EXPORT_SYMBOL(dib7000m_attach);
static struct dvb_frontend_ops dib7000m_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "DiBcom 7000MA/MB/PA/PB/MC",
.frequency_min = 44250000,
.frequency_max = 867250000,
.frequency_stepsize = 62500,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 |
FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO |
FE_CAN_GUARD_INTERVAL_AUTO |
FE_CAN_RECOVER |
FE_CAN_HIERARCHY_AUTO,
},
.release = dib7000m_release,
.init = dib7000m_wakeup,
.sleep = dib7000m_sleep,
.set_frontend = dib7000m_set_frontend,
.get_tune_settings = dib7000m_fe_get_tune_settings,
.get_frontend = dib7000m_get_frontend,
.read_status = dib7000m_read_status,
.read_ber = dib7000m_read_ber,
.read_signal_strength = dib7000m_read_signal_strength,
.read_snr = dib7000m_read_snr,
.read_ucblocks = dib7000m_read_unc_blocks,
};
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
GuneetAtwal/kernel_klte
|
fs/udf/misc.c
|
8697
|
7963
|
/*
* misc.c
*
* PURPOSE
* Miscellaneous routines for the OSTA-UDF(tm) filesystem.
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998 Dave Boynton
* (C) 1998-2004 Ben Fennema
* (C) 1999-2000 Stelias Computing Inc
*
* HISTORY
*
* 04/19/99 blf partial support for reading/writing specific EA's
*/
#include "udfdecl.h"
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/crc-itu-t.h>
#include "udf_i.h"
#include "udf_sb.h"
struct buffer_head *udf_tgetblk(struct super_block *sb, int block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
return sb_getblk(sb, udf_fixed_to_variable(block));
else
return sb_getblk(sb, block);
}
struct buffer_head *udf_tread(struct super_block *sb, int block)
{
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV))
return sb_bread(sb, udf_fixed_to_variable(block));
else
return sb_bread(sb, block);
}
struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
uint32_t type, uint8_t loc)
{
uint8_t *ea = NULL, *ad = NULL;
int offset;
uint16_t crclen;
struct udf_inode_info *iinfo = UDF_I(inode);
ea = iinfo->i_ext.i_data;
if (iinfo->i_lenEAttr) {
ad = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
} else {
ad = ea;
size += sizeof(struct extendedAttrHeaderDesc);
}
offset = inode->i_sb->s_blocksize - udf_file_entry_alloc_offset(inode) -
iinfo->i_lenAlloc;
/* TODO - Check for FreeEASpace */
if (loc & 0x01 && offset >= size) {
struct extendedAttrHeaderDesc *eahd;
eahd = (struct extendedAttrHeaderDesc *)ea;
if (iinfo->i_lenAlloc)
memmove(&ad[size], ad, iinfo->i_lenAlloc);
if (iinfo->i_lenEAttr) {
/* check checksum/crc */
if (eahd->descTag.tagIdent !=
cpu_to_le16(TAG_IDENT_EAHD) ||
le32_to_cpu(eahd->descTag.tagLocation) !=
iinfo->i_location.logicalBlockNum)
return NULL;
} else {
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
size -= sizeof(struct extendedAttrHeaderDesc);
iinfo->i_lenEAttr +=
sizeof(struct extendedAttrHeaderDesc);
eahd->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EAHD);
if (sbi->s_udfrev >= 0x0200)
eahd->descTag.descVersion = cpu_to_le16(3);
else
eahd->descTag.descVersion = cpu_to_le16(2);
eahd->descTag.tagSerialNum =
cpu_to_le16(sbi->s_serial_number);
eahd->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.logicalBlockNum);
eahd->impAttrLocation = cpu_to_le32(0xFFFFFFFF);
eahd->appAttrLocation = cpu_to_le32(0xFFFFFFFF);
}
offset = iinfo->i_lenEAttr;
if (type < 2048) {
if (le32_to_cpu(eahd->appAttrLocation) <
iinfo->i_lenEAttr) {
uint32_t aal =
le32_to_cpu(eahd->appAttrLocation);
memmove(&ea[offset - aal + size],
&ea[aal], offset - aal);
offset -= aal;
eahd->appAttrLocation =
cpu_to_le32(aal + size);
}
if (le32_to_cpu(eahd->impAttrLocation) <
iinfo->i_lenEAttr) {
uint32_t ial =
le32_to_cpu(eahd->impAttrLocation);
memmove(&ea[offset - ial + size],
&ea[ial], offset - ial);
offset -= ial;
eahd->impAttrLocation =
cpu_to_le32(ial + size);
}
} else if (type < 65536) {
if (le32_to_cpu(eahd->appAttrLocation) <
iinfo->i_lenEAttr) {
uint32_t aal =
le32_to_cpu(eahd->appAttrLocation);
memmove(&ea[offset - aal + size],
&ea[aal], offset - aal);
offset -= aal;
eahd->appAttrLocation =
cpu_to_le32(aal + size);
}
}
/* rewrite CRC + checksum of eahd */
crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(struct tag);
eahd->descTag.descCRCLength = cpu_to_le16(crclen);
eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd +
sizeof(struct tag), crclen));
eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
iinfo->i_lenEAttr += size;
return (struct genericFormat *)&ea[offset];
}
if (loc & 0x02)
;
return NULL;
}
struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
uint8_t subtype)
{
struct genericFormat *gaf;
uint8_t *ea = NULL;
uint32_t offset;
struct udf_inode_info *iinfo = UDF_I(inode);
ea = iinfo->i_ext.i_data;
if (iinfo->i_lenEAttr) {
struct extendedAttrHeaderDesc *eahd;
eahd = (struct extendedAttrHeaderDesc *)ea;
/* check checksum/crc */
if (eahd->descTag.tagIdent !=
cpu_to_le16(TAG_IDENT_EAHD) ||
le32_to_cpu(eahd->descTag.tagLocation) !=
iinfo->i_location.logicalBlockNum)
return NULL;
if (type < 2048)
offset = sizeof(struct extendedAttrHeaderDesc);
else if (type < 65536)
offset = le32_to_cpu(eahd->impAttrLocation);
else
offset = le32_to_cpu(eahd->appAttrLocation);
while (offset < iinfo->i_lenEAttr) {
gaf = (struct genericFormat *)&ea[offset];
if (le32_to_cpu(gaf->attrType) == type &&
gaf->attrSubtype == subtype)
return gaf;
else
offset += le32_to_cpu(gaf->attrLength);
}
}
return NULL;
}
/*
* udf_read_tagged
*
* PURPOSE
* Read the first block of a tagged descriptor.
*
* HISTORY
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
uint32_t location, uint16_t *ident)
{
struct tag *tag_p;
struct buffer_head *bh = NULL;
u8 checksum;
/* Read the block */
if (block == 0xFFFFFFFF)
return NULL;
bh = udf_tread(sb, block);
if (!bh) {
udf_err(sb, "read failed, block=%u, location=%d\n",
block, location);
return NULL;
}
tag_p = (struct tag *)(bh->b_data);
*ident = le16_to_cpu(tag_p->tagIdent);
if (location != le32_to_cpu(tag_p->tagLocation)) {
udf_debug("location mismatch block %u, tag %u != %u\n",
block, le32_to_cpu(tag_p->tagLocation), location);
goto error_out;
}
/* Verify the tag checksum */
checksum = udf_tag_checksum(tag_p);
if (checksum != tag_p->tagChecksum) {
udf_err(sb, "tag checksum failed, block %u: 0x%02x != 0x%02x\n",
block, checksum, tag_p->tagChecksum);
goto error_out;
}
/* Verify the tag version */
if (tag_p->descVersion != cpu_to_le16(0x0002U) &&
tag_p->descVersion != cpu_to_le16(0x0003U)) {
udf_err(sb, "tag version 0x%04x != 0x0002 || 0x0003, block %u\n",
le16_to_cpu(tag_p->descVersion), block);
goto error_out;
}
/* Verify the descriptor CRC */
if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize ||
le16_to_cpu(tag_p->descCRC) == crc_itu_t(0,
bh->b_data + sizeof(struct tag),
le16_to_cpu(tag_p->descCRCLength)))
return bh;
udf_debug("Crc failure block %d: crc = %d, crclen = %d\n", block,
le16_to_cpu(tag_p->descCRC),
le16_to_cpu(tag_p->descCRCLength));
error_out:
brelse(bh);
return NULL;
}
struct buffer_head *udf_read_ptagged(struct super_block *sb,
struct kernel_lb_addr *loc,
uint32_t offset, uint16_t *ident)
{
return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
loc->logicalBlockNum + offset, ident);
}
void udf_update_tag(char *data, int length)
{
struct tag *tptr = (struct tag *)data;
length -= sizeof(struct tag);
tptr->descCRCLength = cpu_to_le16(length);
tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(struct tag), length));
tptr->tagChecksum = udf_tag_checksum(tptr);
}
void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
uint32_t loc, int length)
{
struct tag *tptr = (struct tag *)data;
tptr->tagIdent = cpu_to_le16(ident);
tptr->descVersion = cpu_to_le16(version);
tptr->tagSerialNum = cpu_to_le16(snum);
tptr->tagLocation = cpu_to_le32(loc);
udf_update_tag(data, length);
}
u8 udf_tag_checksum(const struct tag *t)
{
u8 *data = (u8 *)t;
u8 checksum = 0;
int i;
for (i = 0; i < sizeof(struct tag); ++i)
if (i != 4) /* position of checksum */
checksum += data[i];
return checksum;
}
|
gpl-2.0
|
Krabappel2548/kernel_msm8x60
|
drivers/ide/pdc202xx_new.c
|
9209
|
14896
|
/*
* Promise TX2/TX4/TX2000/133 IDE driver
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Split from:
* linux/drivers/ide/pdc202xx.c Version 0.35 Mar. 30, 2002
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2005-2007 MontaVista Software, Inc.
* Portions Copyright (C) 1999 Promise Technology, Inc.
* Author: Frank Tiernan (frankt@promise.com)
* Released under terms of General Public License
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ide.h>
#include <asm/io.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#endif
#define DRV_NAME "pdc202xx_new"
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
#else
#define DBG(fmt, args...)
#endif
static u8 max_dma_rate(struct pci_dev *pdev)
{
u8 mode;
switch(pdev->device) {
case PCI_DEVICE_ID_PROMISE_20277:
case PCI_DEVICE_ID_PROMISE_20276:
case PCI_DEVICE_ID_PROMISE_20275:
case PCI_DEVICE_ID_PROMISE_20271:
case PCI_DEVICE_ID_PROMISE_20269:
mode = 4;
break;
case PCI_DEVICE_ID_PROMISE_20270:
case PCI_DEVICE_ID_PROMISE_20268:
mode = 3;
break;
default:
return 0;
}
return mode;
}
/**
* get_indexed_reg - Get indexed register
* @hwif: for the port address
* @index: index of the indexed register
*/
static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
{
u8 value;
outb(index, hwif->dma_base + 1);
value = inb(hwif->dma_base + 3);
DBG("index[%02X] value[%02X]\n", index, value);
return value;
}
/**
* set_indexed_reg - Set indexed register
* @hwif: for the port address
* @index: index of the indexed register
*/
static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
{
outb(index, hwif->dma_base + 1);
outb(value, hwif->dma_base + 3);
DBG("index[%02X] value[%02X]\n", index, value);
}
/*
* ATA Timing Tables based on 133 MHz PLL output clock.
*
* If the PLL outputs 100 MHz clock, the ASIC hardware will set
* the timing registers automatically when "set features" command is
* issued to the device. However, if the PLL output clock is 133 MHz,
* the following tables must be used.
*/
static struct pio_timing {
u8 reg0c, reg0d, reg13;
} pio_timings [] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0, IORDY off, Prefetch off */
{ 0x46, 0x29, 0xa4 }, /* PIO mode 1, IORDY off, Prefetch off */
{ 0x23, 0x26, 0x64 }, /* PIO mode 2, IORDY off, Prefetch off */
{ 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
};
static struct mwdma_timing {
u8 reg0e, reg0f;
} mwdma_timings [] = {
{ 0xdf, 0x5f }, /* MWDMA mode 0 */
{ 0x6b, 0x27 }, /* MWDMA mode 1 */
{ 0x69, 0x25 }, /* MWDMA mode 2 */
};
static struct udma_timing {
u8 reg10, reg11, reg12;
} udma_timings [] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
{ 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
{ 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
{ 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
};
static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
const u8 speed = drive->dma_mode;
/*
* IDE core issues SETFEATURES_XFER to the drive first (thanks to
* IDE_HFLAG_POST_SET_MODE in ->host_flags). PDC202xx hardware will
* automatically set the timing registers based on 100 MHz PLL output.
*
* As we set up the PLL to output 133 MHz for UltraDMA/133 capable
* chips, we must override the default register settings...
*/
if (max_dma_rate(dev) == 4) {
u8 mode = speed & 0x07;
if (speed >= XFER_UDMA_0) {
set_indexed_reg(hwif, 0x10 + adj,
udma_timings[mode].reg10);
set_indexed_reg(hwif, 0x11 + adj,
udma_timings[mode].reg11);
set_indexed_reg(hwif, 0x12 + adj,
udma_timings[mode].reg12);
} else {
set_indexed_reg(hwif, 0x0e + adj,
mwdma_timings[mode].reg0e);
set_indexed_reg(hwif, 0x0f + adj,
mwdma_timings[mode].reg0f);
}
} else if (speed == XFER_UDMA_2) {
/* Set tHOLD bit to 0 if using UDMA mode 2 */
u8 tmp = get_indexed_reg(hwif, 0x10 + adj);
set_indexed_reg(hwif, 0x10 + adj, tmp & 0x7f);
}
}
static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
const u8 pio = drive->pio_mode - XFER_PIO_0;
if (max_dma_rate(dev) == 4) {
set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
set_indexed_reg(hwif, 0x0d + adj, pio_timings[pio].reg0d);
set_indexed_reg(hwif, 0x13 + adj, pio_timings[pio].reg13);
}
}
static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
{
if (get_indexed_reg(hwif, 0x0b) & 0x04)
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
static void pdcnew_reset(ide_drive_t *drive)
{
/*
* Deleted this because it is redundant from the caller.
*/
printk(KERN_WARNING "pdc202xx_new: %s channel reset.\n",
drive->hwif->channel ? "Secondary" : "Primary");
}
/**
* read_counter - Read the byte count registers
* @dma_base: for the port address
*/
static long read_counter(u32 dma_base)
{
u32 pri_dma_base = dma_base, sec_dma_base = dma_base + 0x08;
u8 cnt0, cnt1, cnt2, cnt3;
long count = 0, last;
int retry = 3;
do {
last = count;
/* Read the current count */
outb(0x20, pri_dma_base + 0x01);
cnt0 = inb(pri_dma_base + 0x03);
outb(0x21, pri_dma_base + 0x01);
cnt1 = inb(pri_dma_base + 0x03);
outb(0x20, sec_dma_base + 0x01);
cnt2 = inb(sec_dma_base + 0x03);
outb(0x21, sec_dma_base + 0x01);
cnt3 = inb(sec_dma_base + 0x03);
count = (cnt3 << 23) | (cnt2 << 15) | (cnt1 << 8) | cnt0;
/*
* The 30-bit decrementing counter is read in 4 pieces.
* Incorrect value may be read when the most significant bytes
* are changing...
*/
} while (retry-- && (((last ^ count) & 0x3fff8000) || last < count));
DBG("cnt0[%02X] cnt1[%02X] cnt2[%02X] cnt3[%02X]\n",
cnt0, cnt1, cnt2, cnt3);
return count;
}
/**
* detect_pll_input_clock - Detect the PLL input clock in Hz.
* @dma_base: for the port address
* E.g. 16949000 on 33 MHz PCI bus, i.e. half of the PCI clock.
*/
static long detect_pll_input_clock(unsigned long dma_base)
{
struct timeval start_time, end_time;
long start_count, end_count;
long pll_input, usec_elapsed;
u8 scr1;
start_count = read_counter(dma_base);
do_gettimeofday(&start_time);
/* Start the test mode */
outb(0x01, dma_base + 0x01);
scr1 = inb(dma_base + 0x03);
DBG("scr1[%02X]\n", scr1);
outb(scr1 | 0x40, dma_base + 0x03);
/* Let the counter run for 10 ms. */
mdelay(10);
end_count = read_counter(dma_base);
do_gettimeofday(&end_time);
/* Stop the test mode */
outb(0x01, dma_base + 0x01);
scr1 = inb(dma_base + 0x03);
DBG("scr1[%02X]\n", scr1);
outb(scr1 & ~0x40, dma_base + 0x03);
/*
* Calculate the input clock in Hz
* (the clock counter is 30 bit wide and counts down)
*/
usec_elapsed = (end_time.tv_sec - start_time.tv_sec) * 1000000 +
(end_time.tv_usec - start_time.tv_usec);
pll_input = ((start_count - end_count) & 0x3fffffff) / 10 *
(10000000 / usec_elapsed);
DBG("start[%ld] end[%ld]\n", start_count, end_count);
return pll_input;
}
#ifdef CONFIG_PPC_PMAC
static void apple_kiwi_init(struct pci_dev *pdev)
{
struct device_node *np = pci_device_to_OF_node(pdev);
u8 conf;
if (np == NULL || !of_device_is_compatible(np, "kiwi-root"))
return;
if (pdev->revision >= 0x03) {
/* Setup chip magic config stuff (from darwin) */
pci_read_config_byte (pdev, 0x40, &conf);
pci_write_config_byte(pdev, 0x40, (conf | 0x01));
}
}
#endif /* CONFIG_PPC_PMAC */
static int init_chipset_pdcnew(struct pci_dev *dev)
{
const char *name = DRV_NAME;
unsigned long dma_base = pci_resource_start(dev, 4);
unsigned long sec_dma_base = dma_base + 0x08;
long pll_input, pll_output, ratio;
int f, r;
u8 pll_ctl0, pll_ctl1;
if (dma_base == 0)
return -EFAULT;
#ifdef CONFIG_PPC_PMAC
apple_kiwi_init(dev);
#endif
/* Calculate the required PLL output frequency */
switch(max_dma_rate(dev)) {
case 4: /* it's 133 MHz for Ultra133 chips */
pll_output = 133333333;
break;
case 3: /* and 100 MHz for Ultra100 chips */
default:
pll_output = 100000000;
break;
}
/*
* Detect PLL input clock.
* On some systems, where PCI bus is running at non-standard clock rate
* (e.g. 25 or 40 MHz), we have to adjust the cycle time.
* PDC20268 and newer chips employ PLL circuit to help correct timing
* registers setting.
*/
pll_input = detect_pll_input_clock(dma_base);
printk(KERN_INFO "%s %s: PLL input clock is %ld kHz\n",
name, pci_name(dev), pll_input / 1000);
/* Sanity check */
if (unlikely(pll_input < 5000000L || pll_input > 70000000L)) {
printk(KERN_ERR "%s %s: Bad PLL input clock %ld Hz, giving up!"
"\n", name, pci_name(dev), pll_input);
goto out;
}
#ifdef DEBUG
DBG("pll_output is %ld Hz\n", pll_output);
/* Show the current clock value of PLL control register
* (maybe already configured by the BIOS)
*/
outb(0x02, sec_dma_base + 0x01);
pll_ctl0 = inb(sec_dma_base + 0x03);
outb(0x03, sec_dma_base + 0x01);
pll_ctl1 = inb(sec_dma_base + 0x03);
DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
#endif
/*
* Calculate the ratio of F, R and NO
* POUT = (F + 2) / (( R + 2) * NO)
*/
ratio = pll_output / (pll_input / 1000);
if (ratio < 8600L) { /* 8.6x */
/* Using NO = 0x01, R = 0x0d */
r = 0x0d;
} else if (ratio < 12900L) { /* 12.9x */
/* Using NO = 0x01, R = 0x08 */
r = 0x08;
} else if (ratio < 16100L) { /* 16.1x */
/* Using NO = 0x01, R = 0x06 */
r = 0x06;
} else if (ratio < 64000L) { /* 64x */
r = 0x00;
} else {
/* Invalid ratio */
printk(KERN_ERR "%s %s: Bad ratio %ld, giving up!\n",
name, pci_name(dev), ratio);
goto out;
}
f = (ratio * (r + 2)) / 1000 - 2;
DBG("F[%d] R[%d] ratio*1000[%ld]\n", f, r, ratio);
if (unlikely(f < 0 || f > 127)) {
/* Invalid F */
printk(KERN_ERR "%s %s: F[%d] invalid!\n",
name, pci_name(dev), f);
goto out;
}
pll_ctl0 = (u8) f;
pll_ctl1 = (u8) r;
DBG("Writing pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
outb(0x02, sec_dma_base + 0x01);
outb(pll_ctl0, sec_dma_base + 0x03);
outb(0x03, sec_dma_base + 0x01);
outb(pll_ctl1, sec_dma_base + 0x03);
/* Wait the PLL circuit to be stable */
mdelay(30);
#ifdef DEBUG
/*
* Show the current clock value of PLL control register
*/
outb(0x02, sec_dma_base + 0x01);
pll_ctl0 = inb(sec_dma_base + 0x03);
outb(0x03, sec_dma_base + 0x01);
pll_ctl1 = inb(sec_dma_base + 0x03);
DBG("pll_ctl[%02X][%02X]\n", pll_ctl0, pll_ctl1);
#endif
out:
return 0;
}
static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
{
struct pci_dev *dev2;
dev2 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn) + 1,
PCI_FUNC(dev->devfn)));
if (dev2 &&
dev2->vendor == dev->vendor &&
dev2->device == dev->device) {
if (dev2->irq != dev->irq) {
dev2->irq = dev->irq;
printk(KERN_INFO DRV_NAME " %s: PCI config space "
"interrupt fixed\n", pci_name(dev));
}
return dev2;
}
return NULL;
}
static const struct ide_port_ops pdcnew_port_ops = {
.set_pio_mode = pdcnew_set_pio_mode,
.set_dma_mode = pdcnew_set_dma_mode,
.resetproc = pdcnew_reset,
.cable_detect = pdcnew_cable_detect,
};
#define DECLARE_PDCNEW_DEV(udma) \
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_pdcnew, \
.port_ops = &pdcnew_port_ops, \
.host_flags = IDE_HFLAG_POST_SET_MODE | \
IDE_HFLAG_ERROR_STOPS_FIFO | \
IDE_HFLAG_OFF_BOARD, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
.udma_mask = udma, \
}
static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
/* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
/* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
};
/**
* pdc202new_init_one - called when a pdc202xx is found
* @dev: the pdc202new device
* @id: the matching pci id
*
* Called when the PCI registration layer (or the IDE initialization)
* finds a device matching our IDE device tables.
*/
static int __devinit pdc202new_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct ide_port_info *d = &pdcnew_chipsets[id->driver_data];
struct pci_dev *bridge = dev->bus->self;
if (dev->device == PCI_DEVICE_ID_PROMISE_20270 && bridge &&
bridge->vendor == PCI_VENDOR_ID_DEC &&
bridge->device == PCI_DEVICE_ID_DEC_21150) {
struct pci_dev *dev2;
if (PCI_SLOT(dev->devfn) & 2)
return -ENODEV;
dev2 = pdc20270_get_dev2(dev);
if (dev2) {
int ret = ide_pci_init_two(dev, dev2, d, NULL);
if (ret < 0)
pci_dev_put(dev2);
return ret;
}
}
if (dev->device == PCI_DEVICE_ID_PROMISE_20276 && bridge &&
bridge->vendor == PCI_VENDOR_ID_INTEL &&
(bridge->device == PCI_DEVICE_ID_INTEL_I960 ||
bridge->device == PCI_DEVICE_ID_INTEL_I960RM)) {
printk(KERN_INFO DRV_NAME " %s: attached to I2O RAID controller,"
" skipping\n", pci_name(dev));
return -ENODEV;
}
return ide_pci_init_one(dev, d, NULL);
}
static void __devexit pdc202new_remove(struct pci_dev *dev)
{
struct ide_host *host = pci_get_drvdata(dev);
struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
ide_pci_remove(dev);
pci_dev_put(dev2);
}
static const struct pci_device_id pdc202new_pci_tbl[] = {
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20268), 0 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20269), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20270), 0 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20271), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20275), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20276), 1 },
{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20277), 1 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, pdc202new_pci_tbl);
static struct pci_driver pdc202new_pci_driver = {
.name = "Promise_IDE",
.id_table = pdc202new_pci_tbl,
.probe = pdc202new_init_one,
.remove = __devexit_p(pdc202new_remove),
.suspend = ide_pci_suspend,
.resume = ide_pci_resume,
};
static int __init pdc202new_ide_init(void)
{
return ide_pci_register_driver(&pdc202new_pci_driver);
}
static void __exit pdc202new_ide_exit(void)
{
pci_unregister_driver(&pdc202new_pci_driver);
}
module_init(pdc202new_ide_init);
module_exit(pdc202new_ide_exit);
MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
MODULE_DESCRIPTION("PCI driver module for Promise PDC20268 and higher");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
PatrikKT/android_kernel_huawei_y550
|
arch/m68k/platform/coldfire/intc-2.c
|
10233
|
5369
|
/*
* intc-2.c
*
* General interrupt controller code for the many ColdFire cores that use
* interrupt controllers with 63 interrupt sources, organized as 56 fully-
* programmable + 7 fixed-level interrupt sources. This includes the 523x
* family, the 5270, 5271, 5274, 5275, and the 528x family which have two such
* controllers, and the 547x and 548x families which have only one of them.
*
* The external 7 fixed interrupts are part the the Edge Port unit of these
* ColdFire parts. They can be configured as level or edge triggered.
*
* (C) Copyright 2009-2011, Greg Ungerer <gerg@snapgear.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/traps.h>
/*
* Bit definitions for the ICR family of registers.
*/
#define MCFSIM_ICR_LEVEL(l) ((l)<<3) /* Level l intr */
#define MCFSIM_ICR_PRI(p) (p) /* Priority p intr */
/*
* The EDGE Port interrupts are the fixed 7 external interrupts.
* They need some special treatment, for example they need to be acked.
*/
#define EINT0 64 /* Is not actually used, but spot reserved for it */
#define EINT1 65 /* EDGE Port interrupt 1 */
#define EINT7 71 /* EDGE Port interrupt 7 */
#ifdef MCFICM_INTC1
#define NR_VECS 128
#else
#define NR_VECS 64
#endif
static void intc_irq_mask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
unsigned long imraddr;
u32 val, imrbit;
#ifdef MCFICM_INTC1
imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
#else
imraddr = MCFICM_INTC0;
#endif
imraddr += (irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL;
imrbit = 0x1 << (irq & 0x1f);
val = __raw_readl(imraddr);
__raw_writel(val | imrbit, imraddr);
}
static void intc_irq_unmask(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
unsigned long imraddr;
u32 val, imrbit;
#ifdef MCFICM_INTC1
imraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
#else
imraddr = MCFICM_INTC0;
#endif
imraddr += ((irq & 0x20) ? MCFINTC_IMRH : MCFINTC_IMRL);
imrbit = 0x1 << (irq & 0x1f);
/* Don't set the "maskall" bit! */
if ((irq & 0x20) == 0)
imrbit |= 0x1;
val = __raw_readl(imraddr);
__raw_writel(val & ~imrbit, imraddr);
}
/*
* Only the external (or EDGE Port) interrupts need to be acknowledged
* here, as part of the IRQ handler. They only really need to be ack'ed
* if they are in edge triggered mode, but there is no harm in doing it
* for all types.
*/
static void intc_irq_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
__raw_writeb(0x1 << (irq - EINT0), MCFEPORT_EPFR);
}
/*
* Each vector needs a unique priority and level associated with it.
* We don't really care so much what they are, we don't rely on the
* traditional priority interrupt scheme of the m68k/ColdFire. This
* only needs to be set once for an interrupt, and we will never change
* these values once we have set them.
*/
static u8 intc_intpri = MCFSIM_ICR_LEVEL(6) | MCFSIM_ICR_PRI(6);
static unsigned int intc_irq_startup(struct irq_data *d)
{
unsigned int irq = d->irq - MCFINT_VECBASE;
unsigned long icraddr;
#ifdef MCFICM_INTC1
icraddr = (irq & 0x40) ? MCFICM_INTC1 : MCFICM_INTC0;
#else
icraddr = MCFICM_INTC0;
#endif
icraddr += MCFINTC_ICR0 + (irq & 0x3f);
if (__raw_readb(icraddr) == 0)
__raw_writeb(intc_intpri--, icraddr);
irq = d->irq;
if ((irq >= EINT1) && (irq <= EINT7)) {
u8 v;
irq -= EINT0;
/* Set EPORT line as input */
v = __raw_readb(MCFEPORT_EPDDR);
__raw_writeb(v & ~(0x1 << irq), MCFEPORT_EPDDR);
/* Set EPORT line as interrupt source */
v = __raw_readb(MCFEPORT_EPIER);
__raw_writeb(v | (0x1 << irq), MCFEPORT_EPIER);
}
intc_irq_unmask(d);
return 0;
}
static int intc_irq_set_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = d->irq;
u16 pa, tb;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
tb = 0x1;
break;
case IRQ_TYPE_EDGE_FALLING:
tb = 0x2;
break;
case IRQ_TYPE_EDGE_BOTH:
tb = 0x3;
break;
default:
/* Level triggered */
tb = 0;
break;
}
if (tb)
irq_set_handler(irq, handle_edge_irq);
irq -= EINT0;
pa = __raw_readw(MCFEPORT_EPPAR);
pa = (pa & ~(0x3 << (irq * 2))) | (tb << (irq * 2));
__raw_writew(pa, MCFEPORT_EPPAR);
return 0;
}
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.irq_startup = intc_irq_startup,
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
};
static struct irq_chip intc_irq_chip_edge_port = {
.name = "CF-INTC-EP",
.irq_startup = intc_irq_startup,
.irq_mask = intc_irq_mask,
.irq_unmask = intc_irq_unmask,
.irq_ack = intc_irq_ack,
.irq_set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
{
int irq;
/* Mask all interrupt sources */
__raw_writel(0x1, MCFICM_INTC0 + MCFINTC_IMRL);
#ifdef MCFICM_INTC1
__raw_writel(0x1, MCFICM_INTC1 + MCFINTC_IMRL);
#endif
for (irq = MCFINT_VECBASE; (irq < MCFINT_VECBASE + NR_VECS); irq++) {
if ((irq >= EINT1) && (irq <=EINT7))
irq_set_chip(irq, &intc_irq_chip_edge_port);
else
irq_set_chip(irq, &intc_irq_chip);
irq_set_irq_type(irq, IRQ_TYPE_LEVEL_HIGH);
irq_set_handler(irq, handle_level_irq);
}
}
|
gpl-2.0
|
mariogrip/android_kernel_oneplus_msm8974
|
arch/x86/kernel/crash_dump_64.c
|
12537
|
1327
|
/*
* Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/io.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
if (userbuf) {
if (copy_to_user(buf, vaddr + offset, csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, vaddr + offset, csize);
set_iounmap_nonlazy();
iounmap(vaddr);
return csize;
}
|
gpl-2.0
|
smaccm/odroid-3.14.y-linaro
|
fs/ceph/strings.c
|
250
|
4092
|
/*
* Ceph fs string constants
*/
#include <linux/module.h>
#include <linux/ceph/types.h>
const char *ceph_mds_state_name(int s)
{
switch (s) {
/* down and out */
case CEPH_MDS_STATE_DNE: return "down:dne";
case CEPH_MDS_STATE_STOPPED: return "down:stopped";
/* up and out */
case CEPH_MDS_STATE_BOOT: return "up:boot";
case CEPH_MDS_STATE_STANDBY: return "up:standby";
case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
case CEPH_MDS_STATE_REPLAYONCE: return "up:oneshot-replay";
case CEPH_MDS_STATE_CREATING: return "up:creating";
case CEPH_MDS_STATE_STARTING: return "up:starting";
/* up and in */
case CEPH_MDS_STATE_REPLAY: return "up:replay";
case CEPH_MDS_STATE_RESOLVE: return "up:resolve";
case CEPH_MDS_STATE_RECONNECT: return "up:reconnect";
case CEPH_MDS_STATE_REJOIN: return "up:rejoin";
case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay";
case CEPH_MDS_STATE_ACTIVE: return "up:active";
case CEPH_MDS_STATE_STOPPING: return "up:stopping";
}
return "???";
}
const char *ceph_session_op_name(int op)
{
switch (op) {
case CEPH_SESSION_REQUEST_OPEN: return "request_open";
case CEPH_SESSION_OPEN: return "open";
case CEPH_SESSION_REQUEST_CLOSE: return "request_close";
case CEPH_SESSION_CLOSE: return "close";
case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps";
case CEPH_SESSION_RENEWCAPS: return "renewcaps";
case CEPH_SESSION_STALE: return "stale";
case CEPH_SESSION_RECALL_STATE: return "recall_state";
case CEPH_SESSION_FLUSHMSG: return "flushmsg";
case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack";
}
return "???";
}
const char *ceph_mds_op_name(int op)
{
switch (op) {
case CEPH_MDS_OP_LOOKUP: return "lookup";
case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
case CEPH_MDS_OP_LOOKUPINO: return "lookupino";
case CEPH_MDS_OP_GETATTR: return "getattr";
case CEPH_MDS_OP_SETXATTR: return "setxattr";
case CEPH_MDS_OP_SETATTR: return "setattr";
case CEPH_MDS_OP_RMXATTR: return "rmxattr";
case CEPH_MDS_OP_SETLAYOUT: return "setlayou";
case CEPH_MDS_OP_SETDIRLAYOUT: return "setdirlayout";
case CEPH_MDS_OP_READDIR: return "readdir";
case CEPH_MDS_OP_MKNOD: return "mknod";
case CEPH_MDS_OP_LINK: return "link";
case CEPH_MDS_OP_UNLINK: return "unlink";
case CEPH_MDS_OP_RENAME: return "rename";
case CEPH_MDS_OP_MKDIR: return "mkdir";
case CEPH_MDS_OP_RMDIR: return "rmdir";
case CEPH_MDS_OP_SYMLINK: return "symlink";
case CEPH_MDS_OP_CREATE: return "create";
case CEPH_MDS_OP_OPEN: return "open";
case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap";
case CEPH_MDS_OP_LSSNAP: return "lssnap";
case CEPH_MDS_OP_MKSNAP: return "mksnap";
case CEPH_MDS_OP_RMSNAP: return "rmsnap";
case CEPH_MDS_OP_SETFILELOCK: return "setfilelock";
case CEPH_MDS_OP_GETFILELOCK: return "getfilelock";
}
return "???";
}
const char *ceph_cap_op_name(int op)
{
switch (op) {
case CEPH_CAP_OP_GRANT: return "grant";
case CEPH_CAP_OP_REVOKE: return "revoke";
case CEPH_CAP_OP_TRUNC: return "trunc";
case CEPH_CAP_OP_EXPORT: return "export";
case CEPH_CAP_OP_IMPORT: return "import";
case CEPH_CAP_OP_UPDATE: return "update";
case CEPH_CAP_OP_DROP: return "drop";
case CEPH_CAP_OP_FLUSH: return "flush";
case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack";
case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap";
case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack";
case CEPH_CAP_OP_RELEASE: return "release";
case CEPH_CAP_OP_RENEW: return "renew";
}
return "???";
}
const char *ceph_lease_op_name(int o)
{
switch (o) {
case CEPH_MDS_LEASE_REVOKE: return "revoke";
case CEPH_MDS_LEASE_RELEASE: return "release";
case CEPH_MDS_LEASE_RENEW: return "renew";
case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack";
}
return "???";
}
const char *ceph_snap_op_name(int o)
{
switch (o) {
case CEPH_SNAP_OP_UPDATE: return "update";
case CEPH_SNAP_OP_CREATE: return "create";
case CEPH_SNAP_OP_DESTROY: return "destroy";
case CEPH_SNAP_OP_SPLIT: return "split";
}
return "???";
}
|
gpl-2.0
|
YUPlayGodDev/android_kernel_cyanogen_msm8916
|
net/sctp/associola.c
|
506
|
47937
|
/* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 La Monte H.P. Yarroll
*
* This file is part of the SCTP kernel implementation
*
* This module provides the abstraction for an SCTP association.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, write to
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <lksctp-developers@lists.sourceforge.net>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@us.ibm.com>
* Xingang Guo <xingang.guo@intel.com>
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
*
* Any bugs reported given to us we will try to fix... any fixes shared will
* be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <net/ipv6.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
/* Forward declarations for internal functions. */
static void sctp_assoc_bh_rcv(struct work_struct *work);
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
/* 1st Level Abstractions. */
/* Initialize a new association from provided memory. */
static struct sctp_association *sctp_association_init(struct sctp_association *asoc,
const struct sctp_endpoint *ep,
const struct sock *sk,
sctp_scope_t scope,
gfp_t gfp)
{
struct net *net = sock_net(sk);
struct sctp_sock *sp;
int i;
sctp_paramhdr_t *p;
int err;
/* Retrieve the SCTP per socket area. */
sp = sctp_sk((struct sock *)sk);
/* Discarding const is appropriate here. */
asoc->ep = (struct sctp_endpoint *)ep;
sctp_endpoint_hold(asoc->ep);
/* Hold the sock. */
asoc->base.sk = (struct sock *)sk;
sock_hold(asoc->base.sk);
/* Initialize the common base substructure. */
asoc->base.type = SCTP_EP_TYPE_ASSOCIATION;
/* Initialize the object handling fields. */
atomic_set(&asoc->base.refcnt, 1);
asoc->base.dead = false;
/* Initialize the bind addr area. */
sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port);
asoc->state = SCTP_STATE_CLOSED;
/* Set these values from the socket values, a conversion between
* millsecons to seconds/microseconds must also be done.
*/
asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000;
asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
* 1000;
asoc->frag_point = 0;
asoc->user_frag = sp->user_frag;
/* Set the association max_retrans and RTO values from the
* socket values.
*/
asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt;
asoc->pf_retrans = net->sctp.pf_retrans;
asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial);
asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max);
asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min);
asoc->overall_error_count = 0;
/* Initialize the association's heartbeat interval based on the
* sock configured value.
*/
asoc->hbinterval = msecs_to_jiffies(sp->hbinterval);
/* Initialize path max retrans value. */
asoc->pathmaxrxt = sp->pathmaxrxt;
/* Initialize default path MTU. */
asoc->pathmtu = sp->pathmtu;
/* Set association default SACK delay */
asoc->sackdelay = msecs_to_jiffies(sp->sackdelay);
asoc->sackfreq = sp->sackfreq;
/* Set the association default flags controlling
* Heartbeat, SACK delay, and Path MTU Discovery.
*/
asoc->param_flags = sp->param_flags;
/* Initialize the maximum mumber of new data packets that can be sent
* in a burst.
*/
asoc->max_burst = sp->max_burst;
/* initialize association timers */
asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0;
/* sctpimpguide Section 2.12.2
* If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the
* recommended value of 5 times 'RTO.Max'.
*/
asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]
= 5 * asoc->rto_max;
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
setup_timer(&asoc->timers[i], sctp_timer_events[i],
(unsigned long)asoc);
/* Pull default initialization values from the sock options.
* Note: This assumes that the values have already been
* validated in the sock.
*/
asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams;
asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams;
asoc->max_init_attempts = sp->initmsg.sinit_max_attempts;
asoc->max_init_timeo =
msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo);
/* Allocate storage for the ssnmap after the inbound and outbound
* streams have been negotiated during Init.
*/
asoc->ssnmap = NULL;
/* Set the local window size for receive.
* This is also the rcvbuf space per association.
* RFC 6 - A SCTP receiver MUST be able to receive a minimum of
* 1500 bytes in one SCTP packet.
*/
if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW)
asoc->rwnd = SCTP_DEFAULT_MINWINDOW;
else
asoc->rwnd = sk->sk_rcvbuf/2;
asoc->a_rwnd = asoc->rwnd;
asoc->rwnd_over = 0;
asoc->rwnd_press = 0;
/* Use my own max window until I learn something better. */
asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
/* Set the sndbuf size for transmit. */
asoc->sndbuf_used = 0;
/* Initialize the receive memory counter */
atomic_set(&asoc->rmem_alloc, 0);
init_waitqueue_head(&asoc->wait);
asoc->c.my_vtag = sctp_generate_tag(ep);
asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */
asoc->c.peer_vtag = 0;
asoc->c.my_ttag = 0;
asoc->c.peer_ttag = 0;
asoc->c.my_port = ep->base.bind_addr.port;
asoc->c.initial_tsn = sctp_generate_tsn(ep);
asoc->next_tsn = asoc->c.initial_tsn;
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
asoc->highest_sacked = asoc->ctsn_ack_point;
asoc->last_cwr_tsn = asoc->ctsn_ack_point;
asoc->unack_data = 0;
/* ADDIP Section 4.1 Asconf Chunk Procedures
*
* When an endpoint has an ASCONF signaled change to be sent to the
* remote endpoint it should do the following:
* ...
* A2) a serial number should be assigned to the chunk. The serial
* number SHOULD be a monotonically increasing number. The serial
* numbers SHOULD be initialized at the start of the
* association to the same value as the initial TSN.
*/
asoc->addip_serial = asoc->c.initial_tsn;
INIT_LIST_HEAD(&asoc->addip_chunk_list);
INIT_LIST_HEAD(&asoc->asconf_ack_list);
/* Make an empty list of remote transport addresses. */
INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
asoc->peer.transport_count = 0;
/* RFC 2960 5.1 Normal Establishment of an Association
*
* After the reception of the first data chunk in an
* association the endpoint must immediately respond with a
* sack to acknowledge the data chunk. Subsequent
* acknowledgements should be done as described in Section
* 6.2.
*
* [We implement this by telling a new association that it
* already received one packet.]
*/
asoc->peer.sack_needed = 1;
asoc->peer.sack_cnt = 0;
asoc->peer.sack_generation = 1;
/* Assume that the peer will tell us if he recognizes ASCONF
* as part of INIT exchange.
* The sctp_addip_noauth option is there for backward compatibilty
* and will revert old behavior.
*/
asoc->peer.asconf_capable = 0;
if (net->sctp.addip_noauth)
asoc->peer.asconf_capable = 1;
asoc->asconf_addr_del_pending = NULL;
asoc->src_out_of_asoc_ok = 0;
asoc->new_transport = NULL;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv);
/* Create an output queue. */
sctp_outq_init(asoc, &asoc->outqueue);
if (!sctp_ulpq_init(&asoc->ulpq, asoc))
goto fail_init;
memset(&asoc->peer.tsn_map, 0, sizeof(struct sctp_tsnmap));
asoc->need_ecne = 0;
asoc->assoc_id = 0;
/* Assume that peer would support both address types unless we are
* told otherwise.
*/
asoc->peer.ipv4_address = 1;
if (asoc->base.sk->sk_family == PF_INET6)
asoc->peer.ipv6_address = 1;
INIT_LIST_HEAD(&asoc->asocs);
asoc->autoclose = sp->autoclose;
asoc->default_stream = sp->default_stream;
asoc->default_ppid = sp->default_ppid;
asoc->default_flags = sp->default_flags;
asoc->default_context = sp->default_context;
asoc->default_timetolive = sp->default_timetolive;
asoc->default_rcv_context = sp->default_rcv_context;
/* SCTP_GET_ASSOC_STATS COUNTERS */
memset(&asoc->stats, 0, sizeof(struct sctp_priv_assoc_stats));
/* AUTH related initializations */
INIT_LIST_HEAD(&asoc->endpoint_shared_keys);
err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp);
if (err)
goto fail_init;
asoc->active_key_id = ep->active_key_id;
asoc->asoc_shared_key = NULL;
asoc->default_hmac_id = 0;
/* Save the hmacs and chunks list into this association */
if (ep->auth_hmacs_list)
memcpy(asoc->c.auth_hmacs, ep->auth_hmacs_list,
ntohs(ep->auth_hmacs_list->param_hdr.length));
if (ep->auth_chunk_list)
memcpy(asoc->c.auth_chunks, ep->auth_chunk_list,
ntohs(ep->auth_chunk_list->param_hdr.length));
/* Get the AUTH random number for this association */
p = (sctp_paramhdr_t *)asoc->c.auth_random;
p->type = SCTP_PARAM_RANDOM;
p->length = htons(sizeof(sctp_paramhdr_t) + SCTP_AUTH_RANDOM_LENGTH);
get_random_bytes(p+1, SCTP_AUTH_RANDOM_LENGTH);
return asoc;
fail_init:
sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
return NULL;
}
/* Allocate and initialize a new association */
struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep,
const struct sock *sk,
sctp_scope_t scope,
gfp_t gfp)
{
struct sctp_association *asoc;
asoc = t_new(struct sctp_association, gfp);
if (!asoc)
goto fail;
if (!sctp_association_init(asoc, ep, sk, scope, gfp))
goto fail_init;
SCTP_DBG_OBJCNT_INC(assoc);
SCTP_DEBUG_PRINTK("Created asoc %p\n", asoc);
return asoc;
fail_init:
kfree(asoc);
fail:
return NULL;
}
/* Free this association if possible. There may still be users, so
* the actual deallocation may be delayed.
*/
void sctp_association_free(struct sctp_association *asoc)
{
struct sock *sk = asoc->base.sk;
struct sctp_transport *transport;
struct list_head *pos, *temp;
int i;
/* Only real associations count against the endpoint, so
* don't bother for if this is a temporary association.
*/
if (!list_empty(&asoc->asocs)) {
list_del(&asoc->asocs);
/* Decrement the backlog value for a TCP-style listening
* socket.
*/
if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
sk->sk_ack_backlog--;
}
/* Mark as dead, so other users can know this structure is
* going away.
*/
asoc->base.dead = true;
/* Dispose of any data lying around in the outqueue. */
sctp_outq_free(&asoc->outqueue);
/* Dispose of any pending messages for the upper layer. */
sctp_ulpq_free(&asoc->ulpq);
/* Dispose of any pending chunks on the inqueue. */
sctp_inq_free(&asoc->base.inqueue);
sctp_tsnmap_free(&asoc->peer.tsn_map);
/* Free ssnmap storage. */
sctp_ssnmap_free(asoc->ssnmap);
/* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr);
/* Do we need to go through all of our timers and
* delete them? To be safe we will try to delete all, but we
* should be able to go through and make a guess based
* on our state.
*/
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) {
if (del_timer(&asoc->timers[i]))
sctp_association_put(asoc);
}
/* Free peer's cached cookie. */
kfree(asoc->peer.cookie);
kfree(asoc->peer.peer_random);
kfree(asoc->peer.peer_chunks);
kfree(asoc->peer.peer_hmacs);
/* Release the transport structures. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
list_del_rcu(pos);
sctp_transport_free(transport);
}
asoc->peer.transport_count = 0;
sctp_asconf_queue_teardown(asoc);
/* Free pending address space being deleted */
if (asoc->asconf_addr_del_pending != NULL)
kfree(asoc->asconf_addr_del_pending);
/* AUTH - Free the endpoint shared keys */
sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
/* AUTH - Free the association shared key */
sctp_auth_key_put(asoc->asoc_shared_key);
sctp_association_put(asoc);
}
/* Cleanup and free up an association. */
static void sctp_association_destroy(struct sctp_association *asoc)
{
SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return);
sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
if (asoc->assoc_id != 0) {
spin_lock_bh(&sctp_assocs_id_lock);
idr_remove(&sctp_assocs_id, asoc->assoc_id);
spin_unlock_bh(&sctp_assocs_id_lock);
}
WARN_ON(atomic_read(&asoc->rmem_alloc));
kfree(asoc);
SCTP_DBG_OBJCNT_DEC(assoc);
}
/* Change the primary destination address for the peer. */
void sctp_assoc_set_primary(struct sctp_association *asoc,
struct sctp_transport *transport)
{
int changeover = 0;
/* it's a changeover only if we already have a primary path
* that we are changing
*/
if (asoc->peer.primary_path != NULL &&
asoc->peer.primary_path != transport)
changeover = 1 ;
asoc->peer.primary_path = transport;
/* Set a default msg_name for events. */
memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
sizeof(union sctp_addr));
/* If the primary path is changing, assume that the
* user wants to use this new path.
*/
if ((transport->state == SCTP_ACTIVE) ||
(transport->state == SCTP_UNKNOWN))
asoc->peer.active_path = transport;
/*
* SFR-CACC algorithm:
* Upon the receipt of a request to change the primary
* destination address, on the data structure for the new
* primary destination, the sender MUST do the following:
*
* 1) If CHANGEOVER_ACTIVE is set, then there was a switch
* to this destination address earlier. The sender MUST set
* CYCLING_CHANGEOVER to indicate that this switch is a
* double switch to the same destination address.
*
* Really, only bother is we have data queued or outstanding on
* the association.
*/
if (!asoc->outqueue.outstanding_bytes && !asoc->outqueue.out_qlen)
return;
if (transport->cacc.changeover_active)
transport->cacc.cycling_changeover = changeover;
/* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that
* a changeover has occurred.
*/
transport->cacc.changeover_active = changeover;
/* 3) The sender MUST store the next TSN to be sent in
* next_tsn_at_change.
*/
transport->cacc.next_tsn_at_change = asoc->next_tsn;
}
/* Remove a transport from an association. */
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer)
{
struct list_head *pos;
struct sctp_transport *transport;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_rm_peer:association %p addr: ",
" port: %d\n",
asoc,
(&peer->ipaddr),
ntohs(peer->ipaddr.v4.sin_port));
/* If we are to remove the current retran_path, update it
* to the next peer before removing this peer from the list.
*/
if (asoc->peer.retran_path == peer)
sctp_assoc_update_retran_path(asoc);
/* Remove this peer from the list. */
list_del_rcu(&peer->transports);
/* Get the first transport of asoc. */
pos = asoc->peer.transport_addr_list.next;
transport = list_entry(pos, struct sctp_transport, transports);
/* Update any entries that match the peer to be deleted. */
if (asoc->peer.primary_path == peer)
sctp_assoc_set_primary(asoc, transport);
if (asoc->peer.active_path == peer)
asoc->peer.active_path = transport;
if (asoc->peer.retran_path == peer)
asoc->peer.retran_path = transport;
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
/* If we remove the transport an INIT was last sent to, set it to
* NULL. Combined with the update of the retran path above, this
* will cause the next INIT to be sent to the next available
* transport, maintaining the cycle.
*/
if (asoc->init_last_sent_to == peer)
asoc->init_last_sent_to = NULL;
/* If we remove the transport an SHUTDOWN was last sent to, set it
* to NULL. Combined with the update of the retran path above, this
* will cause the next SHUTDOWN to be sent to the next available
* transport, maintaining the cycle.
*/
if (asoc->shutdown_last_sent_to == peer)
asoc->shutdown_last_sent_to = NULL;
/* If we remove the transport an ASCONF was last sent to, set it to
* NULL.
*/
if (asoc->addip_last_asconf &&
asoc->addip_last_asconf->transport == peer)
asoc->addip_last_asconf->transport = NULL;
/* If we have something on the transmitted list, we have to
* save it off. The best place is the active path.
*/
if (!list_empty(&peer->transmitted)) {
struct sctp_transport *active = asoc->peer.active_path;
struct sctp_chunk *ch;
/* Reset the transport of each chunk on this list */
list_for_each_entry(ch, &peer->transmitted,
transmitted_list) {
ch->transport = NULL;
ch->rtt_in_progress = 0;
}
list_splice_tail_init(&peer->transmitted,
&active->transmitted);
/* Start a T3 timer here in case it wasn't running so
* that these migrated packets have a chance to get
* retrnasmitted.
*/
if (!timer_pending(&active->T3_rtx_timer))
if (!mod_timer(&active->T3_rtx_timer,
jiffies + active->rto))
sctp_transport_hold(active);
}
asoc->peer.transport_count--;
sctp_transport_free(peer);
}
/* Add a transport address to an association. */
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
const union sctp_addr *addr,
const gfp_t gfp,
const int peer_state)
{
struct net *net = sock_net(asoc->base.sk);
struct sctp_transport *peer;
struct sctp_sock *sp;
unsigned short port;
sp = sctp_sk(asoc->base.sk);
/* AF_INET and AF_INET6 share common port field. */
port = ntohs(addr->v4.sin_port);
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_add_peer:association %p addr: ",
" port: %d state:%d\n",
asoc,
addr,
port,
peer_state);
/* Set the port if it has not been set yet. */
if (0 == asoc->peer.port)
asoc->peer.port = port;
/* Check to see if this is a duplicate. */
peer = sctp_assoc_lookup_paddr(asoc, addr);
if (peer) {
/* An UNKNOWN state is only set on transports added by
* user in sctp_connectx() call. Such transports should be
* considered CONFIRMED per RFC 4960, Section 5.4.
*/
if (peer->state == SCTP_UNKNOWN) {
peer->state = SCTP_ACTIVE;
}
return peer;
}
peer = sctp_transport_new(net, addr, gfp);
if (!peer)
return NULL;
sctp_transport_set_owner(peer, asoc);
/* Initialize the peer's heartbeat interval based on the
* association configured value.
*/
peer->hbinterval = asoc->hbinterval;
/* Set the path max_retrans. */
peer->pathmaxrxt = asoc->pathmaxrxt;
/* And the partial failure retrnas threshold */
peer->pf_retrans = asoc->pf_retrans;
/* Initialize the peer's SACK delay timeout based on the
* association configured value.
*/
peer->sackdelay = asoc->sackdelay;
peer->sackfreq = asoc->sackfreq;
/* Enable/disable heartbeat, SACK delay, and path MTU discovery
* based on association setting.
*/
peer->param_flags = asoc->param_flags;
sctp_transport_route(peer, NULL, sp);
/* Initialize the pmtu of the transport. */
if (peer->param_flags & SPP_PMTUD_DISABLE) {
if (asoc->pathmtu)
peer->pathmtu = asoc->pathmtu;
else
peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
}
/* If this is the first transport addr on this association,
* initialize the association PMTU to the peer's PMTU.
* If not and the current association PMTU is higher than the new
* peer's PMTU, reset the association PMTU to the new peer's PMTU.
*/
if (asoc->pathmtu)
asoc->pathmtu = min_t(int, peer->pathmtu, asoc->pathmtu);
else
asoc->pathmtu = peer->pathmtu;
SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to "
"%d\n", asoc, asoc->pathmtu);
peer->pmtu_pending = 0;
asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
*/
sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
asoc->peer.port);
/* 7.2.1 Slow-Start
*
* o The initial cwnd before DATA transmission or after a sufficiently
* long idle period MUST be set to
* min(4*MTU, max(2*MTU, 4380 bytes))
*
* o The initial value of ssthresh MAY be arbitrarily high
* (for example, implementations MAY use the size of the
* receiver advertised window).
*/
peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
/* At this point, we may not have the receiver's advertised window,
* so initialize ssthresh to the default value and it will be set
* later when we process the INIT.
*/
peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
peer->partial_bytes_acked = 0;
peer->flight_size = 0;
peer->burst_limited = 0;
/* Set the transport's RTO.initial value */
peer->rto = asoc->rto_initial;
sctp_max_rto(asoc, peer);
/* Set the peer's active state. */
peer->state = peer_state;
/* Attach the remote transport to our asoc. */
list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
asoc->peer.transport_count++;
/* If we do not yet have a primary path, set one. */
if (!asoc->peer.primary_path) {
sctp_assoc_set_primary(asoc, peer);
asoc->peer.retran_path = peer;
}
if (asoc->peer.active_path == asoc->peer.retran_path &&
peer->state != SCTP_UNCONFIRMED) {
asoc->peer.retran_path = peer;
}
return peer;
}
/* Delete a transport address from an association. */
void sctp_assoc_del_peer(struct sctp_association *asoc,
const union sctp_addr *addr)
{
struct list_head *pos;
struct list_head *temp;
struct sctp_transport *transport;
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport, transports);
if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) {
/* Do book keeping for removing the peer and free it. */
sctp_assoc_rm_peer(asoc, transport);
break;
}
}
}
/* Lookup a transport by address. */
struct sctp_transport *sctp_assoc_lookup_paddr(
const struct sctp_association *asoc,
const union sctp_addr *address)
{
struct sctp_transport *t;
/* Cycle through all transports searching for a peer address. */
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (sctp_cmp_addr_exact(address, &t->ipaddr))
return t;
}
return NULL;
}
/* Remove all transports except a give one */
void sctp_assoc_del_nonprimary_peers(struct sctp_association *asoc,
struct sctp_transport *primary)
{
struct sctp_transport *temp;
struct sctp_transport *t;
list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
transports) {
/* if the current transport is not the primary one, delete it */
if (t != primary)
sctp_assoc_rm_peer(asoc, t);
}
}
/* Engage in transport control operations.
* Mark the transport up or down and send a notification to the user.
* Select and update the new active and retran paths.
*/
void sctp_assoc_control_transport(struct sctp_association *asoc,
struct sctp_transport *transport,
sctp_transport_cmd_t command,
sctp_sn_error_t error)
{
struct sctp_transport *t = NULL;
struct sctp_transport *first;
struct sctp_transport *second;
struct sctp_ulpevent *event;
struct sockaddr_storage addr;
int spc_state = 0;
bool ulp_notify = true;
/* Record the transition on the transport. */
switch (command) {
case SCTP_TRANSPORT_UP:
/* If we are moving from UNCONFIRMED state due
* to heartbeat success, report the SCTP_ADDR_CONFIRMED
* state to the user, otherwise report SCTP_ADDR_AVAILABLE.
*/
if (SCTP_UNCONFIRMED == transport->state &&
SCTP_HEARTBEAT_SUCCESS == error)
spc_state = SCTP_ADDR_CONFIRMED;
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
* active state and set cwnd to 1, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
transport->cwnd = 1;
}
transport->state = SCTP_ACTIVE;
break;
case SCTP_TRANSPORT_DOWN:
/* If the transport was never confirmed, do not transition it
* to inactive state. Also, release the cached route since
* there may be a better route next time.
*/
if (transport->state != SCTP_UNCONFIRMED)
transport->state = SCTP_INACTIVE;
else {
dst_release(transport->dst);
transport->dst = NULL;
}
spc_state = SCTP_ADDR_UNREACHABLE;
break;
case SCTP_TRANSPORT_PF:
transport->state = SCTP_PF;
ulp_notify = false;
break;
default:
return;
}
/* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the
* user.
*/
if (ulp_notify) {
memset(&addr, 0, sizeof(struct sockaddr_storage));
memcpy(&addr, &transport->ipaddr,
transport->af_specific->sockaddr_len);
event = sctp_ulpevent_make_peer_addr_change(asoc, &addr,
0, spc_state, error, GFP_ATOMIC);
if (event)
sctp_ulpq_tail_event(&asoc->ulpq, event);
}
/* Select new active and retran paths. */
/* Look for the two most recently used active transports.
*
* This code produces the wrong ordering whenever jiffies
* rolls over, but we still get usable transports, so we don't
* worry about it.
*/
first = NULL; second = NULL;
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if ((t->state == SCTP_INACTIVE) ||
(t->state == SCTP_UNCONFIRMED) ||
(t->state == SCTP_PF))
continue;
if (!first || t->last_time_heard > first->last_time_heard) {
second = first;
first = t;
}
if (!second || t->last_time_heard > second->last_time_heard)
second = t;
}
/* RFC 2960 6.4 Multi-Homed SCTP Endpoints
*
* By default, an endpoint should always transmit to the
* primary path, unless the SCTP user explicitly specifies the
* destination transport address (and possibly source
* transport address) to use.
*
* [If the primary is active but not most recent, bump the most
* recently used transport.]
*/
if (((asoc->peer.primary_path->state == SCTP_ACTIVE) ||
(asoc->peer.primary_path->state == SCTP_UNKNOWN)) &&
first != asoc->peer.primary_path) {
second = first;
first = asoc->peer.primary_path;
}
/* If we failed to find a usable transport, just camp on the
* primary, even if it is inactive.
*/
if (!first) {
first = asoc->peer.primary_path;
second = asoc->peer.primary_path;
}
/* Set the active and retran transports. */
asoc->peer.active_path = first;
asoc->peer.retran_path = second;
}
/* Hold a reference to an association. */
void sctp_association_hold(struct sctp_association *asoc)
{
atomic_inc(&asoc->base.refcnt);
}
/* Release a reference to an association and cleanup
* if there are no more references.
*/
void sctp_association_put(struct sctp_association *asoc)
{
if (atomic_dec_and_test(&asoc->base.refcnt))
sctp_association_destroy(asoc);
}
/* Allocate the next TSN, Transmission Sequence Number, for the given
* association.
*/
__u32 sctp_association_get_next_tsn(struct sctp_association *asoc)
{
/* From Section 1.6 Serial Number Arithmetic:
* Transmission Sequence Numbers wrap around when they reach
* 2**32 - 1. That is, the next TSN a DATA chunk MUST use
* after transmitting TSN = 2*32 - 1 is TSN = 0.
*/
__u32 retval = asoc->next_tsn;
asoc->next_tsn++;
asoc->unack_data++;
return retval;
}
/* Compare two addresses to see if they match. Wildcard addresses
* only match themselves.
*/
int sctp_cmp_addr_exact(const union sctp_addr *ss1,
const union sctp_addr *ss2)
{
struct sctp_af *af;
af = sctp_get_af_specific(ss1->sa.sa_family);
if (unlikely(!af))
return 0;
return af->cmp_addr(ss1, ss2);
}
/* Return an ecne chunk to get prepended to a packet.
* Note: We are sly and return a shared, prealloced chunk. FIXME:
* No we don't, but we could/should.
*/
struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc)
{
struct sctp_chunk *chunk;
/* Send ECNE if needed.
* Not being able to allocate a chunk here is not deadly.
*/
if (asoc->need_ecne)
chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn);
else
chunk = NULL;
return chunk;
}
/*
* Find which transport this TSN was sent on.
*/
struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,
__u32 tsn)
{
struct sctp_transport *active;
struct sctp_transport *match;
struct sctp_transport *transport;
struct sctp_chunk *chunk;
__be32 key = htonl(tsn);
match = NULL;
/*
* FIXME: In general, find a more efficient data structure for
* searching.
*/
/*
* The general strategy is to search each transport's transmitted
* list. Return which transport this TSN lives on.
*
* Let's be hopeful and check the active_path first.
* Another optimization would be to know if there is only one
* outbound path and not have to look for the TSN at all.
*
*/
active = asoc->peer.active_path;
list_for_each_entry(chunk, &active->transmitted,
transmitted_list) {
if (key == chunk->subh.data_hdr->tsn) {
match = active;
goto out;
}
}
/* If not found, go search all the other transports. */
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
if (transport == active)
continue;
list_for_each_entry(chunk, &transport->transmitted,
transmitted_list) {
if (key == chunk->subh.data_hdr->tsn) {
match = transport;
goto out;
}
}
}
out:
return match;
}
/* Is this the association we are looking for? */
struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc,
struct net *net,
const union sctp_addr *laddr,
const union sctp_addr *paddr)
{
struct sctp_transport *transport;
if ((htons(asoc->base.bind_addr.port) == laddr->v4.sin_port) &&
(htons(asoc->peer.port) == paddr->v4.sin_port) &&
net_eq(sock_net(asoc->base.sk), net)) {
transport = sctp_assoc_lookup_paddr(asoc, paddr);
if (!transport)
goto out;
if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
sctp_sk(asoc->base.sk)))
goto out;
}
transport = NULL;
out:
return transport;
}
/* Do delayed input processing. This is scheduled by sctp_rcv(). */
static void sctp_assoc_bh_rcv(struct work_struct *work)
{
struct sctp_association *asoc =
container_of(work, struct sctp_association,
base.inqueue.immediate);
struct net *net = sock_net(asoc->base.sk);
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sctp_inq *inqueue;
int state;
sctp_subtype_t subtype;
int error = 0;
/* The association should be held so we should be safe. */
ep = asoc->ep;
inqueue = &asoc->base.inqueue;
sctp_association_hold(asoc);
while (NULL != (chunk = sctp_inq_pop(inqueue))) {
state = asoc->state;
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
/* SCTP-AUTH, Section 6.3:
* The receiver has a list of chunk types which it expects
* to be received only after an AUTH-chunk. This list has
* been sent to the peer during the association setup. It
* MUST silently discard these chunks if they are not placed
* after an AUTH chunk in the packet.
*/
if (sctp_auth_recv_cid(subtype.chunk, asoc) && !chunk->auth)
continue;
/* Remember where the last DATA chunk came from so we
* know where to send the SACK.
*/
if (sctp_chunk_is_data(chunk))
asoc->peer.last_data_from = chunk->transport;
else {
SCTP_INC_STATS(net, SCTP_MIB_INCTRLCHUNKS);
asoc->stats.ictrlchunks++;
if (chunk->chunk_hdr->type == SCTP_CID_SACK)
asoc->stats.isacks++;
}
if (chunk->transport)
chunk->transport->last_time_heard = jiffies;
/* Run through the state machine. */
error = sctp_do_sm(net, SCTP_EVENT_T_CHUNK, subtype,
state, ep, asoc, chunk, GFP_ATOMIC);
/* Check to see if the association is freed in response to
* the incoming chunk. If so, get out of the while loop.
*/
if (asoc->base.dead)
break;
/* If there is an error on chunk, discard this packet. */
if (error && chunk)
chunk->pdiscard = 1;
}
sctp_association_put(asoc);
}
/* This routine moves an association from its old sk to a new sk. */
void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk)
{
struct sctp_sock *newsp = sctp_sk(newsk);
struct sock *oldsk = assoc->base.sk;
/* Delete the association from the old endpoint's list of
* associations.
*/
list_del_init(&assoc->asocs);
/* Decrement the backlog value for a TCP-style socket. */
if (sctp_style(oldsk, TCP))
oldsk->sk_ack_backlog--;
/* Release references to the old endpoint and the sock. */
sctp_endpoint_put(assoc->ep);
sock_put(assoc->base.sk);
/* Get a reference to the new endpoint. */
assoc->ep = newsp->ep;
sctp_endpoint_hold(assoc->ep);
/* Get a reference to the new sock. */
assoc->base.sk = newsk;
sock_hold(assoc->base.sk);
/* Add the association to the new endpoint's list of associations. */
sctp_endpoint_add_asoc(newsp->ep, assoc);
}
/* Update an association (possibly from unexpected COOKIE-ECHO processing). */
void sctp_assoc_update(struct sctp_association *asoc,
struct sctp_association *new)
{
struct sctp_transport *trans;
struct list_head *pos, *temp;
/* Copy in new parameters of peer. */
asoc->c = new->c;
asoc->peer.rwnd = new->peer.rwnd;
asoc->peer.sack_needed = new->peer.sack_needed;
asoc->peer.i = new->peer.i;
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
asoc->peer.i.initial_tsn, GFP_ATOMIC);
/* Remove any peer addresses not present in the new association. */
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
trans = list_entry(pos, struct sctp_transport, transports);
if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) {
sctp_assoc_rm_peer(asoc, trans);
continue;
}
if (asoc->state >= SCTP_STATE_ESTABLISHED)
sctp_transport_reset(trans);
}
/* If the case is A (association restart), use
* initial_tsn as next_tsn. If the case is B, use
* current next_tsn in case data sent to peer
* has been discarded and needs retransmission.
*/
if (asoc->state >= SCTP_STATE_ESTABLISHED) {
asoc->next_tsn = new->next_tsn;
asoc->ctsn_ack_point = new->ctsn_ack_point;
asoc->adv_peer_ack_point = new->adv_peer_ack_point;
/* Reinitialize SSN for both local streams
* and peer's streams.
*/
sctp_ssnmap_clear(asoc->ssnmap);
/* Flush the ULP reassembly and ordered queue.
* Any data there will now be stale and will
* cause problems.
*/
sctp_ulpq_flush(&asoc->ulpq);
/* reset the overall association error count so
* that the restarted association doesn't get torn
* down on the next retransmission timer.
*/
asoc->overall_error_count = 0;
} else {
/* Add any peer addresses from the new association. */
list_for_each_entry(trans, &new->peer.transport_addr_list,
transports) {
if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr))
sctp_assoc_add_peer(asoc, &trans->ipaddr,
GFP_ATOMIC, trans->state);
}
asoc->ctsn_ack_point = asoc->next_tsn - 1;
asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
if (!asoc->ssnmap) {
/* Move the ssnmap. */
asoc->ssnmap = new->ssnmap;
new->ssnmap = NULL;
}
if (!asoc->assoc_id) {
/* get a new association id since we don't have one
* yet.
*/
sctp_assoc_set_id(asoc, GFP_ATOMIC);
}
}
/* SCTP-AUTH: Save the peer parameters from the new assocaitions
* and also move the association shared keys over
*/
kfree(asoc->peer.peer_random);
asoc->peer.peer_random = new->peer.peer_random;
new->peer.peer_random = NULL;
kfree(asoc->peer.peer_chunks);
asoc->peer.peer_chunks = new->peer.peer_chunks;
new->peer.peer_chunks = NULL;
kfree(asoc->peer.peer_hmacs);
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
sctp_auth_key_put(asoc->asoc_shared_key);
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
/* Update the retran path for sending a retransmitted packet.
* Round-robin through the active transports, else round-robin
* through the inactive transports as this is the next best thing
* we can try.
*/
void sctp_assoc_update_retran_path(struct sctp_association *asoc)
{
struct sctp_transport *t, *next;
struct list_head *head = &asoc->peer.transport_addr_list;
struct list_head *pos;
if (asoc->peer.transport_count == 1)
return;
/* Find the next transport in a round-robin fashion. */
t = asoc->peer.retran_path;
pos = &t->transports;
next = NULL;
while (1) {
/* Skip the head. */
if (pos->next == head)
pos = head->next;
else
pos = pos->next;
t = list_entry(pos, struct sctp_transport, transports);
/* We have exhausted the list, but didn't find any
* other active transports. If so, use the next
* transport.
*/
if (t == asoc->peer.retran_path) {
t = next;
break;
}
/* Try to find an active transport. */
if ((t->state == SCTP_ACTIVE) ||
(t->state == SCTP_UNKNOWN)) {
break;
} else {
/* Keep track of the next transport in case
* we don't find any active transport.
*/
if (t->state != SCTP_UNCONFIRMED && !next)
next = t;
}
}
if (t)
asoc->peer.retran_path = t;
else
t = asoc->peer.retran_path;
SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
" %p addr: ",
" port: %d\n",
asoc,
(&t->ipaddr),
ntohs(t->ipaddr.v4.sin_port));
}
/* Choose the transport for sending retransmit packet. */
struct sctp_transport *sctp_assoc_choose_alter_transport(
struct sctp_association *asoc, struct sctp_transport *last_sent_to)
{
/* If this is the first time packet is sent, use the active path,
* else use the retran path. If the last packet was sent over the
* retran path, update the retran path and use it.
*/
if (!last_sent_to)
return asoc->peer.active_path;
else {
if (last_sent_to == asoc->peer.retran_path)
sctp_assoc_update_retran_path(asoc);
return asoc->peer.retran_path;
}
}
/* Update the association's pmtu and frag_point by going through all the
* transports. This routine is called when a transport's PMTU has changed.
*/
void sctp_assoc_sync_pmtu(struct sock *sk, struct sctp_association *asoc)
{
struct sctp_transport *t;
__u32 pmtu = 0;
if (!asoc)
return;
/* Get the lowest pmtu of all the transports. */
list_for_each_entry(t, &asoc->peer.transport_addr_list,
transports) {
if (t->pmtu_pending && t->dst) {
sctp_transport_update_pmtu(sk, t, dst_mtu(t->dst));
t->pmtu_pending = 0;
}
if (!pmtu || (t->pathmtu < pmtu))
pmtu = t->pathmtu;
}
if (pmtu) {
asoc->pathmtu = pmtu;
asoc->frag_point = sctp_frag_point(asoc, pmtu);
}
SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
__func__, asoc, asoc->pathmtu, asoc->frag_point);
}
/* Should we send a SACK to update our peer? */
static inline int sctp_peer_needs_update(struct sctp_association *asoc)
{
struct net *net = sock_net(asoc->base.sk);
switch (asoc->state) {
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
case SCTP_STATE_SHUTDOWN_SENT:
if ((asoc->rwnd > asoc->a_rwnd) &&
((asoc->rwnd - asoc->a_rwnd) >= max_t(__u32,
(asoc->base.sk->sk_rcvbuf >> net->sctp.rwnd_upd_shift),
asoc->pathmtu)))
return 1;
break;
default:
break;
}
return 0;
}
/* Increase asoc's rwnd by len and send any window update SACK if needed. */
void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
{
struct sctp_chunk *sack;
struct timer_list *timer;
if (asoc->rwnd_over) {
if (asoc->rwnd_over >= len) {
asoc->rwnd_over -= len;
} else {
asoc->rwnd += (len - asoc->rwnd_over);
asoc->rwnd_over = 0;
}
} else {
asoc->rwnd += len;
}
/* If we had window pressure, start recovering it
* once our rwnd had reached the accumulated pressure
* threshold. The idea is to recover slowly, but up
* to the initial advertised window.
*/
if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
int change = min(asoc->pathmtu, asoc->rwnd_press);
asoc->rwnd += change;
asoc->rwnd_press -= change;
}
SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
"- %u\n", __func__, asoc, len, asoc->rwnd,
asoc->rwnd_over, asoc->a_rwnd);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
if (sctp_peer_needs_update(asoc)) {
asoc->a_rwnd = asoc->rwnd;
SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p "
"rwnd: %u a_rwnd: %u\n", __func__,
asoc, asoc->rwnd, asoc->a_rwnd);
sack = sctp_make_sack(asoc);
if (!sack)
return;
asoc->peer.sack_needed = 0;
sctp_outq_tail(&asoc->outqueue, sack);
/* Stop the SACK timer. */
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
if (del_timer(timer))
sctp_association_put(asoc);
}
}
/* Decrease asoc's rwnd by len. */
void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
{
int rx_count;
int over = 0;
SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
if (asoc->ep->rcvbuf_policy)
rx_count = atomic_read(&asoc->rmem_alloc);
else
rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
/* If we've reached or overflowed our receive buffer, announce
* a 0 rwnd if rwnd would still be positive. Store the
* the pottential pressure overflow so that the window can be restored
* back to original value.
*/
if (rx_count >= asoc->base.sk->sk_rcvbuf)
over = 1;
if (asoc->rwnd >= len) {
asoc->rwnd -= len;
if (over) {
asoc->rwnd_press += asoc->rwnd;
asoc->rwnd = 0;
}
} else {
asoc->rwnd_over = len - asoc->rwnd;
asoc->rwnd = 0;
}
SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
__func__, asoc, len, asoc->rwnd,
asoc->rwnd_over, asoc->rwnd_press);
}
/* Build the bind address list for the association based on info from the
* local endpoint and the remote peer.
*/
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
sctp_scope_t scope, gfp_t gfp)
{
int flags;
/* Use scoping rules to determine the subset of addresses from
* the endpoint.
*/
flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
if (asoc->peer.ipv4_address)
flags |= SCTP_ADDR4_PEERSUPP;
if (asoc->peer.ipv6_address)
flags |= SCTP_ADDR6_PEERSUPP;
return sctp_bind_addr_copy(sock_net(asoc->base.sk),
&asoc->base.bind_addr,
&asoc->ep->base.bind_addr,
scope, gfp, flags);
}
/* Build the association's bind address list from the cookie. */
int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc,
struct sctp_cookie *cookie,
gfp_t gfp)
{
int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length);
int var_size3 = cookie->raw_addr_list_len;
__u8 *raw = (__u8 *)cookie->peer_init + var_size2;
return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3,
asoc->ep->base.bind_addr.port, gfp);
}
/* Lookup laddr in the bind address list of an association. */
int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
const union sctp_addr *laddr)
{
int found = 0;
if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) &&
sctp_bind_addr_match(&asoc->base.bind_addr, laddr,
sctp_sk(asoc->base.sk)))
found = 1;
return found;
}
/* Set an association id for a given association */
int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
{
bool preload = gfp & __GFP_WAIT;
int ret;
/* If the id is already assigned, keep it. */
if (asoc->assoc_id)
return 0;
if (preload)
idr_preload(gfp);
spin_lock_bh(&sctp_assocs_id_lock);
/* 0 is not a valid assoc_id, must be >= 1 */
ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
spin_unlock_bh(&sctp_assocs_id_lock);
if (preload)
idr_preload_end();
if (ret < 0)
return ret;
asoc->assoc_id = (sctp_assoc_t)ret;
return 0;
}
/* Free the ASCONF queue */
static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
{
struct sctp_chunk *asconf;
struct sctp_chunk *tmp;
list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
list_del_init(&asconf->list);
sctp_chunk_free(asconf);
}
}
/* Free asconf_ack cache */
static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
{
struct sctp_chunk *ack;
struct sctp_chunk *tmp;
list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
transmitted_list) {
list_del_init(&ack->transmitted_list);
sctp_chunk_free(ack);
}
}
/* Clean up the ASCONF_ACK queue */
void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
{
struct sctp_chunk *ack;
struct sctp_chunk *tmp;
/* We can remove all the entries from the queue up to
* the "Peer-Sequence-Number".
*/
list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
transmitted_list) {
if (ack->subh.addip_hdr->serial ==
htonl(asoc->peer.addip_serial))
break;
list_del_init(&ack->transmitted_list);
sctp_chunk_free(ack);
}
}
/* Find the ASCONF_ACK whose serial number matches ASCONF */
struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
const struct sctp_association *asoc,
__be32 serial)
{
struct sctp_chunk *ack;
/* Walk through the list of cached ASCONF-ACKs and find the
* ack chunk whose serial number matches that of the request.
*/
list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
if (ack->subh.addip_hdr->serial == serial) {
sctp_chunk_hold(ack);
return ack;
}
}
return NULL;
}
void sctp_asconf_queue_teardown(struct sctp_association *asoc)
{
/* Free any cached ASCONF_ACK chunk. */
sctp_assoc_free_asconf_acks(asoc);
/* Free the ASCONF queue. */
sctp_assoc_free_asconf_queue(asoc);
/* Free any cached ASCONF chunk. */
if (asoc->addip_last_asconf)
sctp_chunk_free(asoc->addip_last_asconf);
}
|
gpl-2.0
|
gabwerkz/bproj
|
drivers/video/xilinxfb.c
|
762
|
14069
|
/*
* Xilinx TFT frame buffer driver
*
* Author: MontaVista Software, Inc.
* source@mvista.com
*
* 2002-2007 (c) MontaVista Software, Inc.
* 2007 (c) Secret Lab Technologies, Ltd.
* 2009 (c) Xilinx Inc.
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
/*
* This driver was based on au1100fb.c by MontaVista rewritten for 2.6
* by Embedded Alley Solutions <source@embeddedalley.com>, which in turn
* was based on skeletonfb.c, Skeleton for a frame buffer device by
* Geert Uytterhoeven.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/xilinxfb.h>
#include <linux/slab.h>
#include <asm/dcr.h>
#define DRIVER_NAME "xilinxfb"
/*
* Xilinx calls it "PLB TFT LCD Controller" though it can also be used for
* the VGA port on the Xilinx ML40x board. This is a hardware display
* controller for a 640x480 resolution TFT or VGA screen.
*
* The interface to the framebuffer is nice and simple. There are two
* control registers. The first tells the LCD interface where in memory
* the frame buffer is (only the 11 most significant bits are used, so
* don't start thinking about scrolling). The second allows the LCD to
* be turned on or off as well as rotated 180 degrees.
*
* In case of direct PLB access the second control register will be at
* an offset of 4 as compared to the DCR access where the offset is 1
* i.e. REG_CTRL. So this is taken care in the function
* xilinx_fb_out_be32 where it left shifts the offset 2 times in case of
* direct PLB access.
*/
#define NUM_REGS 2
#define REG_FB_ADDR 0
#define REG_CTRL 1
#define REG_CTRL_ENABLE 0x0001
#define REG_CTRL_ROTATE 0x0002
/*
* The hardware only handles a single mode: 640x480 24 bit true
* color. Each pixel gets a word (32 bits) of memory. Within each word,
* the 8 most significant bits are ignored, the next 8 bits are the red
* level, the next 8 bits are the green level and the 8 least
* significant bits are the blue level. Each row of the LCD uses 1024
* words, but only the first 640 pixels are displayed with the other 384
* words being ignored. There are 480 rows.
*/
#define BYTES_PER_PIXEL 4
#define BITS_PER_PIXEL (BYTES_PER_PIXEL * 8)
#define RED_SHIFT 16
#define GREEN_SHIFT 8
#define BLUE_SHIFT 0
#define PALETTE_ENTRIES_NO 16 /* passed to fb_alloc_cmap() */
/*
* Default xilinxfb configuration
*/
static struct xilinxfb_platform_data xilinx_fb_default_pdata = {
.xres = 640,
.yres = 480,
.xvirt = 1024,
.yvirt = 480,
};
/*
* Here are the default fb_fix_screeninfo and fb_var_screeninfo structures
*/
static struct fb_fix_screeninfo xilinx_fb_fix = {
.id = "Xilinx",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.accel = FB_ACCEL_NONE
};
static struct fb_var_screeninfo xilinx_fb_var = {
.bits_per_pixel = BITS_PER_PIXEL,
.red = { RED_SHIFT, 8, 0 },
.green = { GREEN_SHIFT, 8, 0 },
.blue = { BLUE_SHIFT, 8, 0 },
.transp = { 0, 0, 0 },
.activate = FB_ACTIVATE_NOW
};
#define PLB_ACCESS_FLAG 0x1 /* 1 = PLB, 0 = DCR */
struct xilinxfb_drvdata {
struct fb_info info; /* FB driver info record */
phys_addr_t regs_phys; /* phys. address of the control
registers */
void __iomem *regs; /* virt. address of the control
registers */
dcr_host_t dcr_host;
unsigned int dcr_len;
void *fb_virt; /* virt. address of the frame buffer */
dma_addr_t fb_phys; /* phys. address of the frame buffer */
int fb_alloced; /* Flag, was the fb memory alloced? */
u8 flags; /* features of the driver */
u32 reg_ctrl_default;
u32 pseudo_palette[PALETTE_ENTRIES_NO];
/* Fake palette of 16 colors */
};
#define to_xilinxfb_drvdata(_info) \
container_of(_info, struct xilinxfb_drvdata, info)
/*
* The XPS TFT Controller can be accessed through PLB or DCR interface.
* To perform the read/write on the registers we need to check on
* which bus its connected and call the appropriate write API.
*/
static void xilinx_fb_out_be32(struct xilinxfb_drvdata *drvdata, u32 offset,
u32 val)
{
if (drvdata->flags & PLB_ACCESS_FLAG)
out_be32(drvdata->regs + (offset << 2), val);
else
dcr_write(drvdata->dcr_host, offset, val);
}
static int
xilinx_fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *fbi)
{
u32 *palette = fbi->pseudo_palette;
if (regno >= PALETTE_ENTRIES_NO)
return -EINVAL;
if (fbi->var.grayscale) {
/* Convert color to grayscale.
* grayscale = 0.30*R + 0.59*G + 0.11*B */
red = green = blue =
(red * 77 + green * 151 + blue * 28 + 127) >> 8;
}
/* fbi->fix.visual is always FB_VISUAL_TRUECOLOR */
/* We only handle 8 bits of each color. */
red >>= 8;
green >>= 8;
blue >>= 8;
palette[regno] = (red << RED_SHIFT) | (green << GREEN_SHIFT) |
(blue << BLUE_SHIFT);
return 0;
}
static int
xilinx_fb_blank(int blank_mode, struct fb_info *fbi)
{
struct xilinxfb_drvdata *drvdata = to_xilinxfb_drvdata(fbi);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
/* turn on panel */
xilinx_fb_out_be32(drvdata, REG_CTRL, drvdata->reg_ctrl_default);
break;
case FB_BLANK_NORMAL:
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
/* turn off panel */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
default:
break;
}
return 0; /* success */
}
static struct fb_ops xilinxfb_ops =
{
.owner = THIS_MODULE,
.fb_setcolreg = xilinx_fb_setcolreg,
.fb_blank = xilinx_fb_blank,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
/* ---------------------------------------------------------------------
* Bus independent setup/teardown
*/
static int xilinxfb_assign(struct device *dev,
struct xilinxfb_drvdata *drvdata,
unsigned long physaddr,
struct xilinxfb_platform_data *pdata)
{
int rc;
int fbsize = pdata->xvirt * pdata->yvirt * BYTES_PER_PIXEL;
if (drvdata->flags & PLB_ACCESS_FLAG) {
/*
* Map the control registers in if the controller
* is on direct PLB interface.
*/
if (!request_mem_region(physaddr, 8, DRIVER_NAME)) {
dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
physaddr);
rc = -ENODEV;
goto err_region;
}
drvdata->regs_phys = physaddr;
drvdata->regs = ioremap(physaddr, 8);
if (!drvdata->regs) {
dev_err(dev, "Couldn't lock memory region at 0x%08lX\n",
physaddr);
rc = -ENODEV;
goto err_map;
}
}
/* Allocate the framebuffer memory */
if (pdata->fb_phys) {
drvdata->fb_phys = pdata->fb_phys;
drvdata->fb_virt = ioremap(pdata->fb_phys, fbsize);
} else {
drvdata->fb_alloced = 1;
drvdata->fb_virt = dma_alloc_coherent(dev, PAGE_ALIGN(fbsize),
&drvdata->fb_phys, GFP_KERNEL);
}
if (!drvdata->fb_virt) {
dev_err(dev, "Could not allocate frame buffer memory\n");
rc = -ENOMEM;
if (drvdata->flags & PLB_ACCESS_FLAG)
goto err_fbmem;
else
goto err_region;
}
/* Clear (turn to black) the framebuffer */
memset_io((void __iomem *)drvdata->fb_virt, 0, fbsize);
/* Tell the hardware where the frame buffer is */
xilinx_fb_out_be32(drvdata, REG_FB_ADDR, drvdata->fb_phys);
/* Turn on the display */
drvdata->reg_ctrl_default = REG_CTRL_ENABLE;
if (pdata->rotate_screen)
drvdata->reg_ctrl_default |= REG_CTRL_ROTATE;
xilinx_fb_out_be32(drvdata, REG_CTRL,
drvdata->reg_ctrl_default);
/* Fill struct fb_info */
drvdata->info.device = dev;
drvdata->info.screen_base = (void __iomem *)drvdata->fb_virt;
drvdata->info.fbops = &xilinxfb_ops;
drvdata->info.fix = xilinx_fb_fix;
drvdata->info.fix.smem_start = drvdata->fb_phys;
drvdata->info.fix.smem_len = fbsize;
drvdata->info.fix.line_length = pdata->xvirt * BYTES_PER_PIXEL;
drvdata->info.pseudo_palette = drvdata->pseudo_palette;
drvdata->info.flags = FBINFO_DEFAULT;
drvdata->info.var = xilinx_fb_var;
drvdata->info.var.height = pdata->screen_height_mm;
drvdata->info.var.width = pdata->screen_width_mm;
drvdata->info.var.xres = pdata->xres;
drvdata->info.var.yres = pdata->yres;
drvdata->info.var.xres_virtual = pdata->xvirt;
drvdata->info.var.yres_virtual = pdata->yvirt;
/* Allocate a colour map */
rc = fb_alloc_cmap(&drvdata->info.cmap, PALETTE_ENTRIES_NO, 0);
if (rc) {
dev_err(dev, "Fail to allocate colormap (%d entries)\n",
PALETTE_ENTRIES_NO);
goto err_cmap;
}
/* Register new frame buffer */
rc = register_framebuffer(&drvdata->info);
if (rc) {
dev_err(dev, "Could not register frame buffer\n");
goto err_regfb;
}
if (drvdata->flags & PLB_ACCESS_FLAG) {
/* Put a banner in the log (for DEBUG) */
dev_dbg(dev, "regs: phys=%lx, virt=%p\n", physaddr,
drvdata->regs);
}
/* Put a banner in the log (for DEBUG) */
dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
(unsigned long long)drvdata->fb_phys, drvdata->fb_virt, fbsize);
return 0; /* success */
err_regfb:
fb_dealloc_cmap(&drvdata->info.cmap);
err_cmap:
if (drvdata->fb_alloced)
dma_free_coherent(dev, PAGE_ALIGN(fbsize), drvdata->fb_virt,
drvdata->fb_phys);
else
iounmap(drvdata->fb_virt);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
err_fbmem:
if (drvdata->flags & PLB_ACCESS_FLAG)
iounmap(drvdata->regs);
err_map:
if (drvdata->flags & PLB_ACCESS_FLAG)
release_mem_region(physaddr, 8);
err_region:
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return rc;
}
static int xilinxfb_release(struct device *dev)
{
struct xilinxfb_drvdata *drvdata = dev_get_drvdata(dev);
#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
xilinx_fb_blank(VESA_POWERDOWN, &drvdata->info);
#endif
unregister_framebuffer(&drvdata->info);
fb_dealloc_cmap(&drvdata->info.cmap);
if (drvdata->fb_alloced)
dma_free_coherent(dev, PAGE_ALIGN(drvdata->info.fix.smem_len),
drvdata->fb_virt, drvdata->fb_phys);
else
iounmap(drvdata->fb_virt);
/* Turn off the display */
xilinx_fb_out_be32(drvdata, REG_CTRL, 0);
/* Release the resources, as allocated based on interface */
if (drvdata->flags & PLB_ACCESS_FLAG) {
iounmap(drvdata->regs);
release_mem_region(drvdata->regs_phys, 8);
} else
dcr_unmap(drvdata->dcr_host, drvdata->dcr_len);
kfree(drvdata);
dev_set_drvdata(dev, NULL);
return 0;
}
/* ---------------------------------------------------------------------
* OF bus binding
*/
static int __devinit
xilinxfb_of_probe(struct of_device *op, const struct of_device_id *match)
{
const u32 *prop;
u32 *p;
u32 tft_access;
struct xilinxfb_platform_data pdata;
struct resource res;
int size, rc, start;
struct xilinxfb_drvdata *drvdata;
/* Copy with the default pdata (not a ptr reference!) */
pdata = xilinx_fb_default_pdata;
dev_dbg(&op->dev, "xilinxfb_of_probe(%p, %p)\n", op, match);
/* Allocate the driver data region */
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
dev_err(&op->dev, "Couldn't allocate device private record\n");
return -ENOMEM;
}
/*
* To check whether the core is connected directly to DCR or PLB
* interface and initialize the tft_access accordingly.
*/
p = (u32 *)of_get_property(op->dev.of_node, "xlnx,dcr-splb-slave-if", NULL);
tft_access = p ? *p : 0;
/*
* Fill the resource structure if its direct PLB interface
* otherwise fill the dcr_host structure.
*/
if (tft_access) {
drvdata->flags |= PLB_ACCESS_FLAG;
rc = of_address_to_resource(op->dev.of_node, 0, &res);
if (rc) {
dev_err(&op->dev, "invalid address\n");
goto err;
}
} else {
res.start = 0;
start = dcr_resource_start(op->dev.of_node, 0);
drvdata->dcr_len = dcr_resource_len(op->dev.of_node, 0);
drvdata->dcr_host = dcr_map(op->dev.of_node, start, drvdata->dcr_len);
if (!DCR_MAP_OK(drvdata->dcr_host)) {
dev_err(&op->dev, "invalid DCR address\n");
goto err;
}
}
prop = of_get_property(op->dev.of_node, "phys-size", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.screen_width_mm = prop[0];
pdata.screen_height_mm = prop[1];
}
prop = of_get_property(op->dev.of_node, "resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xres = prop[0];
pdata.yres = prop[1];
}
prop = of_get_property(op->dev.of_node, "virtual-resolution", &size);
if ((prop) && (size >= sizeof(u32)*2)) {
pdata.xvirt = prop[0];
pdata.yvirt = prop[1];
}
if (of_find_property(op->dev.of_node, "rotate-display", NULL))
pdata.rotate_screen = 1;
dev_set_drvdata(&op->dev, drvdata);
return xilinxfb_assign(&op->dev, drvdata, res.start, &pdata);
err:
kfree(drvdata);
return -ENODEV;
}
static int __devexit xilinxfb_of_remove(struct of_device *op)
{
return xilinxfb_release(&op->dev);
}
/* Match table for of_platform binding */
static struct of_device_id xilinxfb_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-tft-1.00.a", },
{ .compatible = "xlnx,plb-tft-cntlr-ref-1.00.a", },
{ .compatible = "xlnx,plb-dvi-cntlr-ref-1.00.c", },
{},
};
MODULE_DEVICE_TABLE(of, xilinxfb_of_match);
static struct of_platform_driver xilinxfb_of_driver = {
.probe = xilinxfb_of_probe,
.remove = __devexit_p(xilinxfb_of_remove),
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = xilinxfb_of_match,
},
};
/* ---------------------------------------------------------------------
* Module setup and teardown
*/
static int __init
xilinxfb_init(void)
{
return of_register_platform_driver(&xilinxfb_of_driver);
}
static void __exit
xilinxfb_cleanup(void)
{
of_unregister_platform_driver(&xilinxfb_of_driver);
}
module_init(xilinxfb_init);
module_exit(xilinxfb_cleanup);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx TFT frame buffer driver");
MODULE_LICENSE("GPL");
|
gpl-2.0
|
mstfkaratas/kernel_htc_msm7227
|
fs/cifs/cifsencrypt.c
|
762
|
12826
|
/*
* fs/cifs/cifsencrypt.c
*
* Copyright (C) International Business Machines Corp., 2005,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifs_debug.h"
#include "md5.h"
#include "cifs_unicode.h"
#include "cifsproto.h"
#include <linux/ctype.h>
#include <linux/random.h>
/* Calculate and return the CIFS signature based on the mac key and SMB PDU */
/* the 16 byte signature must be allocated by the caller */
/* Note we only use the 1st eight bytes */
/* Note that the smb header signature field on input contains the
sequence number before this function is called */
extern void mdfour(unsigned char *out, unsigned char *in, int n);
extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
extern void SMBencrypt(unsigned char *passwd, const unsigned char *c8,
unsigned char *p24);
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
const struct mac_key *key, char *signature)
{
struct MD5Context context;
if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
cifs_MD5_init(&context);
cifs_MD5_update(&context, (char *)&key->data, key->len);
cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
cifs_MD5_final(signature, &context);
return 0;
}
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
spin_lock(&GlobalMid_Lock);
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
*pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
rc = cifs_calculate_signature(cifs_pdu, &server->mac_signing_key,
smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
return rc;
}
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
const struct mac_key *key, char *signature)
{
struct MD5Context context;
int i;
if ((iov == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
cifs_MD5_init(&context);
cifs_MD5_update(&context, (char *)&key->data, key->len);
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cERROR(1, "null iovec entry");
return -EIO;
}
/* The first entry includes a length field (which does not get
signed that occupies the first 4 bytes before the header */
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
cifs_MD5_update(&context, iov[0].iov_base+4,
iov[0].iov_len-4);
} else
cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
}
cifs_MD5_final(signature, &context);
return 0;
}
int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
struct smb_hdr *cifs_pdu = iov[0].iov_base;
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
if ((cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) == 0)
return rc;
spin_lock(&GlobalMid_Lock);
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
*pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++;
spin_unlock(&GlobalMid_Lock);
rc = cifs_calc_signature2(iov, n_vec, &server->mac_signing_key,
smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
return rc;
}
int cifs_verify_signature(struct smb_hdr *cifs_pdu,
const struct mac_key *mac_key,
__u32 expected_sequence_number)
{
unsigned int rc;
char server_response_sig[8];
char what_we_think_sig_should_be[20];
if ((cifs_pdu == NULL) || (mac_key == NULL))
return -EINVAL;
if (cifs_pdu->Command == SMB_COM_NEGOTIATE)
return 0;
if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
struct smb_com_lock_req *pSMB =
(struct smb_com_lock_req *)cifs_pdu;
if (pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)
return 0;
}
/* BB what if signatures are supposed to be on for session but
server does not send one? BB */
/* Do not need to verify session setups with signature "BSRSPYL " */
if (memcmp(cifs_pdu->Signature.SecuritySignature, "BSRSPYL ", 8) == 0)
cFYI(1, "dummy signature received for smb command 0x%x",
cifs_pdu->Command);
/* save off the origiginal signature so we can modify the smb and check
its signature against what the server sent */
memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(expected_sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
rc = cifs_calculate_signature(cifs_pdu, mac_key,
what_we_think_sig_should_be);
if (rc)
return rc;
/* cifs_dump_mem("what we think it should be: ",
what_we_think_sig_should_be, 16); */
if (memcmp(server_response_sig, what_we_think_sig_should_be, 8))
return -EACCES;
else
return 0;
}
/* We fill in key by putting in 40 byte array which was allocated by caller */
int cifs_calculate_mac_key(struct mac_key *key, const char *rn,
const char *password)
{
char temp_key[16];
if ((key == NULL) || (rn == NULL))
return -EINVAL;
E_md4hash(password, temp_key);
mdfour(key->data.ntlm, temp_key, 16);
memcpy(key->data.ntlm+16, rn, CIFS_SESS_KEY_SIZE);
key->len = 40;
return 0;
}
int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *ses,
const struct nls_table *nls_info)
{
char temp_hash[16];
struct HMACMD5Context ctx;
char *ucase_buf;
__le16 *unicode_buf;
unsigned int i, user_name_len, dom_name_len;
if (ses == NULL)
return -EINVAL;
E_md4hash(ses->password, temp_hash);
hmac_md5_init_limK_to_64(temp_hash, 16, &ctx);
user_name_len = strlen(ses->userName);
if (user_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
if (ses->domainName == NULL)
return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
dom_name_len = strlen(ses->domainName);
if (dom_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
ucase_buf = kmalloc((MAX_USERNAME_SIZE+1), GFP_KERNEL);
if (ucase_buf == NULL)
return -ENOMEM;
unicode_buf = kmalloc((MAX_USERNAME_SIZE+1)*4, GFP_KERNEL);
if (unicode_buf == NULL) {
kfree(ucase_buf);
return -ENOMEM;
}
for (i = 0; i < user_name_len; i++)
ucase_buf[i] = nls_info->charset2upper[(int)ses->userName[i]];
ucase_buf[i] = 0;
user_name_len = cifs_strtoUCS(unicode_buf, ucase_buf,
MAX_USERNAME_SIZE*2, nls_info);
unicode_buf[user_name_len] = 0;
user_name_len++;
for (i = 0; i < dom_name_len; i++)
ucase_buf[i] = nls_info->charset2upper[(int)ses->domainName[i]];
ucase_buf[i] = 0;
dom_name_len = cifs_strtoUCS(unicode_buf+user_name_len, ucase_buf,
MAX_USERNAME_SIZE*2, nls_info);
unicode_buf[user_name_len + dom_name_len] = 0;
hmac_md5_update((const unsigned char *) unicode_buf,
(user_name_len+dom_name_len)*2, &ctx);
hmac_md5_final(ses->server->ntlmv2_hash, &ctx);
kfree(ucase_buf);
kfree(unicode_buf);
return 0;
}
#ifdef CONFIG_CIFS_WEAK_PW_HASH
void calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
char *lnm_session_key)
{
int i;
char password_with_pad[CIFS_ENCPWD_SIZE];
memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
if (password)
strncpy(password_with_pad, password, CIFS_ENCPWD_SIZE);
if (!encrypt && global_secflags & CIFSSEC_MAY_PLNTXT) {
memset(lnm_session_key, 0, CIFS_SESS_KEY_SIZE);
memcpy(lnm_session_key, password_with_pad,
CIFS_ENCPWD_SIZE);
return;
}
/* calculate old style session key */
/* calling toupper is less broken than repeatedly
calling nls_toupper would be since that will never
work for UTF8, but neither handles multibyte code pages
but the only alternative would be converting to UCS-16 (Unicode)
(using a routine something like UniStrupr) then
uppercasing and then converting back from Unicode - which
would only worth doing it if we knew it were utf8. Basically
utf8 and other multibyte codepages each need their own strupper
function since a byte at a time will ont work. */
for (i = 0; i < CIFS_ENCPWD_SIZE; i++)
password_with_pad[i] = toupper(password_with_pad[i]);
SMBencrypt(password_with_pad, cryptkey, lnm_session_key);
/* clear password before we return/free memory */
memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
}
#endif /* CIFS_WEAK_PW_HASH */
static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
int rc = 0;
int len;
char nt_hash[16];
struct HMACMD5Context *pctxt;
wchar_t *user;
wchar_t *domain;
pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
if (pctxt == NULL)
return -ENOMEM;
/* calculate md4 hash of password */
E_md4hash(ses->password, nt_hash);
/* convert Domainname to unicode and uppercase */
hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
/* convert ses->userName to unicode and uppercase */
len = strlen(ses->userName);
user = kmalloc(2 + (len * 2), GFP_KERNEL);
if (user == NULL)
goto calc_exit_2;
len = cifs_strtoUCS((__le16 *)user, ses->userName, len, nls_cp);
UniStrupr(user);
hmac_md5_update((char *)user, 2*len, pctxt);
/* convert ses->domainName to unicode and uppercase */
if (ses->domainName) {
len = strlen(ses->domainName);
domain = kmalloc(2 + (len * 2), GFP_KERNEL);
if (domain == NULL)
goto calc_exit_1;
len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
nls_cp);
/* the following line was removed since it didn't work well
with lower cased domain name that passed as an option.
Maybe converting the domain name earlier makes sense */
/* UniStrupr(domain); */
hmac_md5_update((char *)domain, 2*len, pctxt);
kfree(domain);
}
calc_exit_1:
kfree(user);
calc_exit_2:
/* BB FIXME what about bytes 24 through 40 of the signing key?
compare with the NTLM example */
hmac_md5_final(ses->server->ntlmv2_hash, pctxt);
kfree(pctxt);
return rc;
}
void setup_ntlmv2_rsp(struct cifsSesInfo *ses, char *resp_buf,
const struct nls_table *nls_cp)
{
int rc;
struct ntlmv2_resp *buf = (struct ntlmv2_resp *)resp_buf;
struct HMACMD5Context context;
buf->blob_signature = cpu_to_le32(0x00000101);
buf->reserved = 0;
buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
buf->reserved2 = 0;
buf->names[0].type = cpu_to_le16(NTLMSSP_DOMAIN_TYPE);
buf->names[0].length = 0;
buf->names[1].type = 0;
buf->names[1].length = 0;
/* calculate buf->ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, nls_cp);
if (rc)
cERROR(1, "could not get v2 hash rc %d", rc);
CalcNTLMv2_response(ses, resp_buf);
/* now calculate the MAC key for NTLMv2 */
hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
hmac_md5_update(resp_buf, 16, &context);
hmac_md5_final(ses->server->mac_signing_key.data.ntlmv2.key, &context);
memcpy(&ses->server->mac_signing_key.data.ntlmv2.resp, resp_buf,
sizeof(struct ntlmv2_resp));
ses->server->mac_signing_key.len = 16 + sizeof(struct ntlmv2_resp);
}
void CalcNTLMv2_response(const struct cifsSesInfo *ses,
char *v2_session_response)
{
struct HMACMD5Context context;
/* rest of v2 struct already generated */
memcpy(v2_session_response + 8, ses->server->cryptKey, 8);
hmac_md5_init_limK_to_64(ses->server->ntlmv2_hash, 16, &context);
hmac_md5_update(v2_session_response+8,
sizeof(struct ntlmv2_resp) - 8, &context);
hmac_md5_final(v2_session_response, &context);
/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
}
|
gpl-2.0
|
rhcp011235/sch-i405_kernel
|
drivers/pcmcia/rsrc_iodyn.c
|
762
|
3849
|
/*
* rsrc_iodyn.c -- Resource management routines for MEM-static sockets.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*
* (C) 1999 David A. Hinds
*/
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
struct pcmcia_align_data {
unsigned long mask;
unsigned long offset;
};
static resource_size_t pcmcia_align(void *align_data,
const struct resource *res,
resource_size_t size, resource_size_t align)
{
struct pcmcia_align_data *data = align_data;
resource_size_t start;
start = (res->start & ~data->mask) + data->offset;
if (start < res->start)
start += data->mask + 1;
#ifdef CONFIG_X86
if (res->flags & IORESOURCE_IO) {
if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
}
#endif
#ifdef CONFIG_M68K
if (res->flags & IORESOURCE_IO) {
if ((res->start + size - 1) >= 1024)
start = res->end;
}
#endif
return start;
}
static struct resource *__iodyn_find_io_region(struct pcmcia_socket *s,
unsigned long base, int num,
unsigned long align)
{
struct resource *res = pcmcia_make_resource(0, num, IORESOURCE_IO,
dev_name(&s->dev));
struct pcmcia_align_data data;
unsigned long min = base;
int ret;
data.mask = align - 1;
data.offset = base & data.mask;
#ifdef CONFIG_PCI
if (s->cb_dev) {
ret = pci_bus_alloc_resource(s->cb_dev->bus, res, num, 1,
min, 0, pcmcia_align, &data);
} else
#endif
ret = allocate_resource(&ioport_resource, res, num, min, ~0UL,
1, pcmcia_align, &data);
if (ret != 0) {
kfree(res);
res = NULL;
}
return res;
}
static int iodyn_find_io(struct pcmcia_socket *s, unsigned int attr,
unsigned int *base, unsigned int num,
unsigned int align)
{
int i, ret = 0;
/* Check for an already-allocated window that must conflict with
* what was asked for. It is a hack because it does not catch all
* potential conflicts, just the most obvious ones.
*/
for (i = 0; i < MAX_IO_WIN; i++) {
if (!s->io[i].res)
continue;
if (!*base)
continue;
if ((s->io[i].res->start & (align-1)) == *base)
return -EBUSY;
}
for (i = 0; i < MAX_IO_WIN; i++) {
struct resource *res = s->io[i].res;
unsigned int try;
if (res && (res->flags & IORESOURCE_BITS) !=
(attr & IORESOURCE_BITS))
continue;
if (!res) {
if (align == 0)
align = 0x10000;
res = s->io[i].res = __iodyn_find_io_region(s, *base,
num, align);
if (!res)
return -EINVAL;
*base = res->start;
s->io[i].res->flags =
((res->flags & ~IORESOURCE_BITS) |
(attr & IORESOURCE_BITS));
s->io[i].InUse = num;
return 0;
}
/* Try to extend top of window */
try = res->end + 1;
if ((*base == 0) || (*base == try)) {
if (adjust_resource(s->io[i].res, res->start,
res->end - res->start + num + 1))
continue;
*base = try;
s->io[i].InUse += num;
return 0;
}
/* Try to extend bottom of window */
try = res->start - num;
if ((*base == 0) || (*base == try)) {
if (adjust_resource(s->io[i].res,
res->start - num,
res->end - res->start + num + 1))
continue;
*base = try;
s->io[i].InUse += num;
return 0;
}
}
return -EINVAL;
}
struct pccard_resource_ops pccard_iodyn_ops = {
.validate_mem = NULL,
.find_io = iodyn_find_io,
.find_mem = NULL,
.add_io = NULL,
.add_mem = NULL,
.init = static_init,
.exit = NULL,
};
EXPORT_SYMBOL(pccard_iodyn_ops);
|
gpl-2.0
|
hwoarang/linux
|
drivers/usb/core/sysfs.c
|
762
|
25308
|
/*
* drivers/usb/core/sysfs.c
*
* (C) Copyright 2002 David Brownell
* (C) Copyright 2002,2004 Greg Kroah-Hartman
* (C) Copyright 2002,2004 IBM Corp.
*
* All of the sysfs file attributes for usb devices and interfaces.
*
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include "usb.h"
/* Active configuration fields */
#define usb_actconfig_show(field, format_string) \
static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
struct usb_host_config *actconfig; \
ssize_t rc = 0; \
\
udev = to_usb_device(dev); \
usb_lock_device(udev); \
actconfig = udev->actconfig; \
if (actconfig) \
rc = sprintf(buf, format_string, \
actconfig->desc.field); \
usb_unlock_device(udev); \
return rc; \
} \
#define usb_actconfig_attr(field, format_string) \
usb_actconfig_show(field, format_string) \
static DEVICE_ATTR_RO(field)
usb_actconfig_attr(bNumInterfaces, "%2d\n");
usb_actconfig_attr(bmAttributes, "%2x\n");
static ssize_t bMaxPower_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
struct usb_host_config *actconfig;
ssize_t rc = 0;
udev = to_usb_device(dev);
usb_lock_device(udev);
actconfig = udev->actconfig;
if (actconfig)
rc = sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_RO(bMaxPower);
static ssize_t configuration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
struct usb_host_config *actconfig;
ssize_t rc = 0;
udev = to_usb_device(dev);
usb_lock_device(udev);
actconfig = udev->actconfig;
if (actconfig && actconfig->string)
rc = sprintf(buf, "%s\n", actconfig->string);
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_RO(configuration);
/* configuration value is always present, and r/w */
usb_actconfig_show(bConfigurationValue, "%u\n");
static ssize_t bConfigurationValue_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int config, value;
if (sscanf(buf, "%d", &config) != 1 || config < -1 || config > 255)
return -EINVAL;
usb_lock_device(udev);
value = usb_set_configuration(udev, config);
usb_unlock_device(udev);
return (value < 0) ? value : count;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR,
bConfigurationValue_show, bConfigurationValue_store);
/* String fields */
#define usb_string_attr(name) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
int retval; \
\
udev = to_usb_device(dev); \
usb_lock_device(udev); \
retval = sprintf(buf, "%s\n", udev->name); \
usb_unlock_device(udev); \
return retval; \
} \
static DEVICE_ATTR_RO(name)
usb_string_attr(product);
usb_string_attr(manufacturer);
usb_string_attr(serial);
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
char *speed;
udev = to_usb_device(dev);
switch (udev->speed) {
case USB_SPEED_LOW:
speed = "1.5";
break;
case USB_SPEED_UNKNOWN:
case USB_SPEED_FULL:
speed = "12";
break;
case USB_SPEED_HIGH:
speed = "480";
break;
case USB_SPEED_WIRELESS:
speed = "480";
break;
case USB_SPEED_SUPER:
speed = "5000";
break;
default:
speed = "unknown";
}
return sprintf(buf, "%s\n", speed);
}
static DEVICE_ATTR_RO(speed);
static ssize_t busnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->bus->busnum);
}
static DEVICE_ATTR_RO(busnum);
static ssize_t devnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->devnum);
}
static DEVICE_ATTR_RO(devnum);
static ssize_t devpath_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%s\n", udev->devpath);
}
static DEVICE_ATTR_RO(devpath);
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
u16 bcdUSB;
udev = to_usb_device(dev);
bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB);
return sprintf(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff);
}
static DEVICE_ATTR_RO(version);
static ssize_t maxchild_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->maxchild);
}
static DEVICE_ATTR_RO(maxchild);
static ssize_t quirks_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "0x%x\n", udev->quirks);
}
static DEVICE_ATTR_RO(quirks);
static ssize_t avoid_reset_quirk_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET));
}
static ssize_t avoid_reset_quirk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int val;
if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
return -EINVAL;
usb_lock_device(udev);
if (val)
udev->quirks |= USB_QUIRK_RESET;
else
udev->quirks &= ~USB_QUIRK_RESET;
usb_unlock_device(udev);
return count;
}
static DEVICE_ATTR_RW(avoid_reset_quirk);
static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
}
static DEVICE_ATTR_RO(urbnum);
static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev;
char *state;
udev = to_usb_device(dev);
switch (udev->removable) {
case USB_DEVICE_REMOVABLE:
state = "removable";
break;
case USB_DEVICE_FIXED:
state = "fixed";
break;
default:
state = "unknown";
}
return sprintf(buf, "%s\n", state);
}
static DEVICE_ATTR_RO(removable);
static ssize_t ltm_capable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (usb_device_supports_ltm(to_usb_device(dev)))
return sprintf(buf, "%s\n", "yes");
return sprintf(buf, "%s\n", "no");
}
static DEVICE_ATTR_RO(ltm_capable);
#ifdef CONFIG_PM
static ssize_t persist_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->persist_enabled);
}
static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int value;
/* Hubs are always enabled for USB_PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
if (sscanf(buf, "%d", &value) != 1)
return -EINVAL;
usb_lock_device(udev);
udev->persist_enabled = !!value;
usb_unlock_device(udev);
return count;
}
static DEVICE_ATTR_RW(persist);
static int add_persist_attributes(struct device *dev)
{
int rc = 0;
if (is_usb_device(dev)) {
struct usb_device *udev = to_usb_device(dev);
/* Hubs are automatically enabled for USB_PERSIST,
* no point in creating the attribute file.
*/
if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
rc = sysfs_add_file_to_group(&dev->kobj,
&dev_attr_persist.attr,
power_group_name);
}
return rc;
}
static void remove_persist_attributes(struct device *dev)
{
sysfs_remove_file_from_group(&dev->kobj,
&dev_attr_persist.attr,
power_group_name);
}
#else
#define add_persist_attributes(dev) 0
#define remove_persist_attributes(dev) do {} while (0)
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
static ssize_t connected_duration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%u\n",
jiffies_to_msecs(jiffies - udev->connect_time));
}
static DEVICE_ATTR_RO(connected_duration);
/*
* If the device is resumed, the last time the device was suspended has
* been pre-subtracted from active_duration. We add the current time to
* get the duration that the device was actually active.
*
* If the device is suspended, the active_duration is up-to-date.
*/
static ssize_t active_duration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
int duration;
if (udev->state != USB_STATE_SUSPENDED)
duration = jiffies_to_msecs(jiffies + udev->active_duration);
else
duration = jiffies_to_msecs(udev->active_duration);
return sprintf(buf, "%u\n", duration);
}
static DEVICE_ATTR_RO(active_duration);
static ssize_t autosuspend_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->power.autosuspend_delay / 1000);
}
static ssize_t autosuspend_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
int value;
if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/1000 ||
value <= -INT_MAX/1000)
return -EINVAL;
pm_runtime_set_autosuspend_delay(dev, value * 1000);
return count;
}
static DEVICE_ATTR_RW(autosuspend);
static const char on_string[] = "on";
static const char auto_string[] = "auto";
static void warn_level(void)
{
static int level_warned;
if (!level_warned) {
level_warned = 1;
printk(KERN_WARNING "WARNING! power/level is deprecated; "
"use power/control instead\n");
}
}
static ssize_t level_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p = auto_string;
warn_level();
if (udev->state != USB_STATE_SUSPENDED && !udev->dev.power.runtime_auto)
p = on_string;
return sprintf(buf, "%s\n", p);
}
static ssize_t level_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
int rc = count;
warn_level();
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
usb_lock_device(udev);
if (len == sizeof on_string - 1 &&
strncmp(buf, on_string, len) == 0)
usb_disable_autosuspend(udev);
else if (len == sizeof auto_string - 1 &&
strncmp(buf, auto_string, len) == 0)
usb_enable_autosuspend(udev);
else
rc = -EINVAL;
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_RW(level);
static ssize_t usb2_hardware_lpm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
if (udev->usb2_hw_lpm_allowed == 1)
p = "enabled";
else
p = "disabled";
return sprintf(buf, "%s\n", p);
}
static ssize_t usb2_hardware_lpm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
bool value;
int ret;
usb_lock_device(udev);
ret = strtobool(buf, &value);
if (!ret) {
udev->usb2_hw_lpm_allowed = value;
ret = usb_set_usb2_hardware_lpm(udev, value);
}
usb_unlock_device(udev);
if (!ret)
return count;
return ret;
}
static DEVICE_ATTR_RW(usb2_hardware_lpm);
static ssize_t usb2_lpm_l1_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->l1_params.timeout);
}
static ssize_t usb2_lpm_l1_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
u16 timeout;
if (kstrtou16(buf, 0, &timeout))
return -EINVAL;
udev->l1_params.timeout = timeout;
return count;
}
static DEVICE_ATTR_RW(usb2_lpm_l1_timeout);
static ssize_t usb2_lpm_besl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->l1_params.besl);
}
static ssize_t usb2_lpm_besl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
u8 besl;
if (kstrtou8(buf, 0, &besl) || besl > 15)
return -EINVAL;
udev->l1_params.besl = besl;
return count;
}
static DEVICE_ATTR_RW(usb2_lpm_besl);
static struct attribute *usb2_hardware_lpm_attr[] = {
&dev_attr_usb2_hardware_lpm.attr,
&dev_attr_usb2_lpm_l1_timeout.attr,
&dev_attr_usb2_lpm_besl.attr,
NULL,
};
static struct attribute_group usb2_hardware_lpm_attr_group = {
.name = power_group_name,
.attrs = usb2_hardware_lpm_attr,
};
static struct attribute *power_attrs[] = {
&dev_attr_autosuspend.attr,
&dev_attr_level.attr,
&dev_attr_connected_duration.attr,
&dev_attr_active_duration.attr,
NULL,
};
static struct attribute_group power_attr_group = {
.name = power_group_name,
.attrs = power_attrs,
};
static int add_power_attributes(struct device *dev)
{
int rc = 0;
if (is_usb_device(dev)) {
struct usb_device *udev = to_usb_device(dev);
rc = sysfs_merge_group(&dev->kobj, &power_attr_group);
if (udev->usb2_hw_lpm_capable == 1)
rc = sysfs_merge_group(&dev->kobj,
&usb2_hardware_lpm_attr_group);
}
return rc;
}
static void remove_power_attributes(struct device *dev)
{
sysfs_unmerge_group(&dev->kobj, &usb2_hardware_lpm_attr_group);
sysfs_unmerge_group(&dev->kobj, &power_attr_group);
}
#else
#define add_power_attributes(dev) 0
#define remove_power_attributes(dev) do {} while (0)
#endif /* CONFIG_PM_RUNTIME */
/* Descriptor fields */
#define usb_descriptor_attr_le16(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_device *udev; \
\
udev = to_usb_device(dev); \
return sprintf(buf, format_string, \
le16_to_cpu(udev->descriptor.field)); \
} \
static DEVICE_ATTR_RO(field)
usb_descriptor_attr_le16(idVendor, "%04x\n");
usb_descriptor_attr_le16(idProduct, "%04x\n");
usb_descriptor_attr_le16(bcdDevice, "%04x\n");
#define usb_descriptor_attr(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_device *udev; \
\
udev = to_usb_device(dev); \
return sprintf(buf, format_string, udev->descriptor.field); \
} \
static DEVICE_ATTR_RO(field)
usb_descriptor_attr(bDeviceClass, "%02x\n");
usb_descriptor_attr(bDeviceSubClass, "%02x\n");
usb_descriptor_attr(bDeviceProtocol, "%02x\n");
usb_descriptor_attr(bNumConfigurations, "%d\n");
usb_descriptor_attr(bMaxPacketSize0, "%d\n");
/* show if the device is authorized (1) or not (0) */
static ssize_t authorized_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *usb_dev = to_usb_device(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", usb_dev->authorized);
}
/*
* Authorize a device to be used in the system
*
* Writing a 0 deauthorizes the device, writing a 1 authorizes it.
*/
static ssize_t authorized_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t size)
{
ssize_t result;
struct usb_device *usb_dev = to_usb_device(dev);
unsigned val;
result = sscanf(buf, "%u\n", &val);
if (result != 1)
result = -EINVAL;
else if (val == 0)
result = usb_deauthorize_device(usb_dev);
else
result = usb_authorize_device(usb_dev);
return result < 0 ? result : size;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
authorized_show, authorized_store);
/* "Safely remove a device" */
static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int rc = 0;
usb_lock_device(udev);
if (udev->state != USB_STATE_NOTATTACHED) {
/* To avoid races, first unconfigure and then remove */
usb_set_configuration(udev, -1);
rc = usb_remove_device(udev);
}
if (rc == 0)
rc = count;
usb_unlock_device(udev);
return rc;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(remove, S_IWUSR, NULL, remove_store);
static struct attribute *dev_attrs[] = {
/* current configuration's attributes */
&dev_attr_configuration.attr,
&dev_attr_bNumInterfaces.attr,
&dev_attr_bConfigurationValue.attr,
&dev_attr_bmAttributes.attr,
&dev_attr_bMaxPower.attr,
/* device attributes */
&dev_attr_urbnum.attr,
&dev_attr_idVendor.attr,
&dev_attr_idProduct.attr,
&dev_attr_bcdDevice.attr,
&dev_attr_bDeviceClass.attr,
&dev_attr_bDeviceSubClass.attr,
&dev_attr_bDeviceProtocol.attr,
&dev_attr_bNumConfigurations.attr,
&dev_attr_bMaxPacketSize0.attr,
&dev_attr_speed.attr,
&dev_attr_busnum.attr,
&dev_attr_devnum.attr,
&dev_attr_devpath.attr,
&dev_attr_version.attr,
&dev_attr_maxchild.attr,
&dev_attr_quirks.attr,
&dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr,
&dev_attr_remove.attr,
&dev_attr_removable.attr,
&dev_attr_ltm_capable.attr,
NULL,
};
static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
/* When modifying this list, be sure to modify dev_string_attrs_are_visible()
* accordingly.
*/
static struct attribute *dev_string_attrs[] = {
&dev_attr_manufacturer.attr,
&dev_attr_product.attr,
&dev_attr_serial.attr,
NULL
};
static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct usb_device *udev = to_usb_device(dev);
if (a == &dev_attr_manufacturer.attr) {
if (udev->manufacturer == NULL)
return 0;
} else if (a == &dev_attr_product.attr) {
if (udev->product == NULL)
return 0;
} else if (a == &dev_attr_serial.attr) {
if (udev->serial == NULL)
return 0;
}
return a->mode;
}
static struct attribute_group dev_string_attr_grp = {
.attrs = dev_string_attrs,
.is_visible = dev_string_attrs_are_visible,
};
const struct attribute_group *usb_device_groups[] = {
&dev_attr_grp,
&dev_string_attr_grp,
NULL
};
/* Binary descriptors */
static ssize_t
read_descriptors(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct usb_device *udev = to_usb_device(dev);
size_t nleft = count;
size_t srclen, n;
int cfgno;
void *src;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
*/
usb_lock_device(udev);
for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
nleft > 0; ++cfgno) {
if (cfgno < 0) {
src = &udev->descriptor;
srclen = sizeof(struct usb_device_descriptor);
} else {
src = udev->rawdescriptors[cfgno];
srclen = __le16_to_cpu(udev->config[cfgno].desc.
wTotalLength);
}
if (off < srclen) {
n = min(nleft, srclen - (size_t) off);
memcpy(buf, src + off, n);
nleft -= n;
buf += n;
off = 0;
} else {
off -= srclen;
}
}
usb_unlock_device(udev);
return count - nleft;
}
static struct bin_attribute dev_bin_attr_descriptors = {
.attr = {.name = "descriptors", .mode = 0444},
.read = read_descriptors,
.size = 18 + 65535, /* dev descr + max-size raw descriptor */
};
int usb_create_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
int retval;
retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
if (retval)
goto error;
retval = add_persist_attributes(dev);
if (retval)
goto error;
retval = add_power_attributes(dev);
if (retval)
goto error;
return retval;
error:
usb_remove_sysfs_dev_files(udev);
return retval;
}
void usb_remove_sysfs_dev_files(struct usb_device *udev)
{
struct device *dev = &udev->dev;
remove_power_attributes(dev);
remove_persist_attributes(dev);
device_remove_bin_file(dev, &dev_bin_attr_descriptors);
}
/* Interface Association Descriptor fields */
#define usb_intf_assoc_attr(field, format_string) \
static ssize_t \
iad_##field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
\
return sprintf(buf, format_string, \
intf->intf_assoc->field); \
} \
static DEVICE_ATTR_RO(iad_##field)
usb_intf_assoc_attr(bFirstInterface, "%02x\n");
usb_intf_assoc_attr(bInterfaceCount, "%02d\n");
usb_intf_assoc_attr(bFunctionClass, "%02x\n");
usb_intf_assoc_attr(bFunctionSubClass, "%02x\n");
usb_intf_assoc_attr(bFunctionProtocol, "%02x\n");
/* Interface fields */
#define usb_intf_attr(field, format_string) \
static ssize_t \
field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
\
return sprintf(buf, format_string, \
intf->cur_altsetting->desc.field); \
} \
static DEVICE_ATTR_RO(field)
usb_intf_attr(bInterfaceNumber, "%02x\n");
usb_intf_attr(bAlternateSetting, "%2d\n");
usb_intf_attr(bNumEndpoints, "%02x\n");
usb_intf_attr(bInterfaceClass, "%02x\n");
usb_intf_attr(bInterfaceSubClass, "%02x\n");
usb_intf_attr(bInterfaceProtocol, "%02x\n");
static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf;
char *string;
intf = to_usb_interface(dev);
string = ACCESS_ONCE(intf->cur_altsetting->string);
if (!string)
return 0;
return sprintf(buf, "%s\n", string);
}
static DEVICE_ATTR_RO(interface);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *intf;
struct usb_device *udev;
struct usb_host_interface *alt;
intf = to_usb_interface(dev);
udev = interface_to_usbdev(intf);
alt = ACCESS_ONCE(intf->cur_altsetting);
return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X"
"ic%02Xisc%02Xip%02Xin%02X\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct),
le16_to_cpu(udev->descriptor.bcdDevice),
udev->descriptor.bDeviceClass,
udev->descriptor.bDeviceSubClass,
udev->descriptor.bDeviceProtocol,
alt->desc.bInterfaceClass,
alt->desc.bInterfaceSubClass,
alt->desc.bInterfaceProtocol,
alt->desc.bInterfaceNumber);
}
static DEVICE_ATTR_RO(modalias);
static ssize_t supports_autosuspend_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int s;
device_lock(dev);
/* Devices will be autosuspended even when an interface isn't claimed */
s = (!dev->driver || to_usb_driver(dev->driver)->supports_autosuspend);
device_unlock(dev);
return sprintf(buf, "%u\n", s);
}
static DEVICE_ATTR_RO(supports_autosuspend);
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
&dev_attr_bAlternateSetting.attr,
&dev_attr_bNumEndpoints.attr,
&dev_attr_bInterfaceClass.attr,
&dev_attr_bInterfaceSubClass.attr,
&dev_attr_bInterfaceProtocol.attr,
&dev_attr_modalias.attr,
&dev_attr_supports_autosuspend.attr,
NULL,
};
static struct attribute_group intf_attr_grp = {
.attrs = intf_attrs,
};
static struct attribute *intf_assoc_attrs[] = {
&dev_attr_iad_bFirstInterface.attr,
&dev_attr_iad_bInterfaceCount.attr,
&dev_attr_iad_bFunctionClass.attr,
&dev_attr_iad_bFunctionSubClass.attr,
&dev_attr_iad_bFunctionProtocol.attr,
NULL,
};
static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct usb_interface *intf = to_usb_interface(dev);
if (intf->intf_assoc == NULL)
return 0;
return a->mode;
}
static struct attribute_group intf_assoc_attr_grp = {
.attrs = intf_assoc_attrs,
.is_visible = intf_assoc_attrs_are_visible,
};
const struct attribute_group *usb_interface_groups[] = {
&intf_attr_grp,
&intf_assoc_attr_grp,
NULL
};
void usb_create_sysfs_intf_files(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_host_interface *alt = intf->cur_altsetting;
if (intf->sysfs_files_created || intf->unregistering)
return;
if (!alt->string && !(udev->quirks & USB_QUIRK_CONFIG_INTF_STRINGS))
alt->string = usb_cache_string(udev, alt->desc.iInterface);
if (alt->string && device_create_file(&intf->dev, &dev_attr_interface))
; /* We don't actually care if the function fails. */
intf->sysfs_files_created = 1;
}
void usb_remove_sysfs_intf_files(struct usb_interface *intf)
{
if (!intf->sysfs_files_created)
return;
device_remove_file(&intf->dev, &dev_attr_interface);
intf->sysfs_files_created = 0;
}
|
gpl-2.0
|
CoRfr/linux
|
drivers/media/i2c/adv7170.c
|
1018
|
10506
|
/*
* adv7170 - adv7170, adv7171 video encoder driver version 0.0.1
*
* Copyright (C) 2002 Maxim Yevtyushkin <max@linuxmedialabs.com>
*
* Based on adv7176 driver by:
*
* Copyright (C) 1998 Dave Perks <dperks@ibm.net>
* Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
* Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
* - some corrections for Pinnacle Systems Inc. DC10plus card.
*
* Changes by Ronald Bultje <rbultje@ronald.bitfreak.net>
* - moved over to linux>=2.4.x i2c protocol (1/1/2003)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
MODULE_DESCRIPTION("Analog Devices ADV7170 video encoder driver");
MODULE_AUTHOR("Maxim Yevtyushkin");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* ----------------------------------------------------------------------- */
struct adv7170 {
struct v4l2_subdev sd;
unsigned char reg[128];
v4l2_std_id norm;
int input;
};
static inline struct adv7170 *to_adv7170(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7170, sd);
}
static char *inputs[] = { "pass_through", "play_back" };
static enum v4l2_mbus_pixelcode adv7170_codes[] = {
V4L2_MBUS_FMT_UYVY8_2X8,
V4L2_MBUS_FMT_UYVY8_1X16,
};
/* ----------------------------------------------------------------------- */
static inline int adv7170_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7170 *encoder = to_adv7170(sd);
encoder->reg[reg] = value;
return i2c_smbus_write_byte_data(client, reg, value);
}
static inline int adv7170_read(struct v4l2_subdev *sd, u8 reg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_read_byte_data(client, reg);
}
static int adv7170_write_block(struct v4l2_subdev *sd,
const u8 *data, unsigned int len)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct adv7170 *encoder = to_adv7170(sd);
int ret = -1;
u8 reg;
/* the adv7170 has an autoincrement function, use it if
* the adapter understands raw I2C */
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
/* do raw I2C, not smbus compatible */
u8 block_data[32];
int block_len;
while (len >= 2) {
block_len = 0;
block_data[block_len++] = reg = data[0];
do {
block_data[block_len++] =
encoder->reg[reg++] = data[1];
len -= 2;
data += 2;
} while (len >= 2 && data[0] == reg && block_len < 32);
ret = i2c_master_send(client, block_data, block_len);
if (ret < 0)
break;
}
} else {
/* do some slow I2C emulation kind of thing */
while (len >= 2) {
reg = *data++;
ret = adv7170_write(sd, reg, *data++);
if (ret < 0)
break;
len -= 2;
}
}
return ret;
}
/* ----------------------------------------------------------------------- */
#define TR0MODE 0x4c
#define TR0RST 0x80
#define TR1CAPT 0x00
#define TR1PLAY 0x00
static const unsigned char init_NTSC[] = {
0x00, 0x10, /* MR0 */
0x01, 0x20, /* MR1 */
0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */
0x03, 0x80, /* MR3 */
0x04, 0x30, /* MR4 */
0x05, 0x00, /* Reserved */
0x06, 0x00, /* Reserved */
0x07, TR0MODE, /* TM0 */
0x08, TR1CAPT, /* TM1 */
0x09, 0x16, /* Fsc0 */
0x0a, 0x7c, /* Fsc1 */
0x0b, 0xf0, /* Fsc2 */
0x0c, 0x21, /* Fsc3 */
0x0d, 0x00, /* Subcarrier Phase */
0x0e, 0x00, /* Closed Capt. Ext 0 */
0x0f, 0x00, /* Closed Capt. Ext 1 */
0x10, 0x00, /* Closed Capt. 0 */
0x11, 0x00, /* Closed Capt. 1 */
0x12, 0x00, /* Pedestal Ctl 0 */
0x13, 0x00, /* Pedestal Ctl 1 */
0x14, 0x00, /* Pedestal Ctl 2 */
0x15, 0x00, /* Pedestal Ctl 3 */
0x16, 0x00, /* CGMS_WSS_0 */
0x17, 0x00, /* CGMS_WSS_1 */
0x18, 0x00, /* CGMS_WSS_2 */
0x19, 0x00, /* Teletext Ctl */
};
static const unsigned char init_PAL[] = {
0x00, 0x71, /* MR0 */
0x01, 0x20, /* MR1 */
0x02, 0x0e, /* MR2 RTC control: bits 2 and 1 */
0x03, 0x80, /* MR3 */
0x04, 0x30, /* MR4 */
0x05, 0x00, /* Reserved */
0x06, 0x00, /* Reserved */
0x07, TR0MODE, /* TM0 */
0x08, TR1CAPT, /* TM1 */
0x09, 0xcb, /* Fsc0 */
0x0a, 0x8a, /* Fsc1 */
0x0b, 0x09, /* Fsc2 */
0x0c, 0x2a, /* Fsc3 */
0x0d, 0x00, /* Subcarrier Phase */
0x0e, 0x00, /* Closed Capt. Ext 0 */
0x0f, 0x00, /* Closed Capt. Ext 1 */
0x10, 0x00, /* Closed Capt. 0 */
0x11, 0x00, /* Closed Capt. 1 */
0x12, 0x00, /* Pedestal Ctl 0 */
0x13, 0x00, /* Pedestal Ctl 1 */
0x14, 0x00, /* Pedestal Ctl 2 */
0x15, 0x00, /* Pedestal Ctl 3 */
0x16, 0x00, /* CGMS_WSS_0 */
0x17, 0x00, /* CGMS_WSS_1 */
0x18, 0x00, /* CGMS_WSS_2 */
0x19, 0x00, /* Teletext Ctl */
};
static int adv7170_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct adv7170 *encoder = to_adv7170(sd);
v4l2_dbg(1, debug, sd, "set norm %llx\n", (unsigned long long)std);
if (std & V4L2_STD_NTSC) {
adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC));
if (encoder->input == 0)
adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
} else if (std & V4L2_STD_PAL) {
adv7170_write_block(sd, init_PAL, sizeof(init_PAL));
if (encoder->input == 0)
adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
} else {
v4l2_dbg(1, debug, sd, "illegal norm: %llx\n",
(unsigned long long)std);
return -EINVAL;
}
v4l2_dbg(1, debug, sd, "switched to %llx\n", (unsigned long long)std);
encoder->norm = std;
return 0;
}
static int adv7170_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct adv7170 *encoder = to_adv7170(sd);
/* RJ: input = 0: input is from decoder
input = 1: input is from ZR36060
input = 2: color bar */
v4l2_dbg(1, debug, sd, "set input from %s\n",
input == 0 ? "decoder" : "ZR36060");
switch (input) {
case 0:
adv7170_write(sd, 0x01, 0x20);
adv7170_write(sd, 0x08, TR1CAPT); /* TR1 */
adv7170_write(sd, 0x02, 0x0e); /* Enable genlock */
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
/* udelay(10); */
break;
case 1:
adv7170_write(sd, 0x01, 0x00);
adv7170_write(sd, 0x08, TR1PLAY); /* TR1 */
adv7170_write(sd, 0x02, 0x08);
adv7170_write(sd, 0x07, TR0MODE | TR0RST);
adv7170_write(sd, 0x07, TR0MODE);
/* udelay(10); */
break;
default:
v4l2_dbg(1, debug, sd, "illegal input: %d\n", input);
return -EINVAL;
}
v4l2_dbg(1, debug, sd, "switched to %s\n", inputs[input]);
encoder->input = input;
return 0;
}
static int adv7170_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (index >= ARRAY_SIZE(adv7170_codes))
return -EINVAL;
*code = adv7170_codes[index];
return 0;
}
static int adv7170_g_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
u8 val = adv7170_read(sd, 0x7);
if ((val & 0x40) == (1 << 6))
mf->code = V4L2_MBUS_FMT_UYVY8_1X16;
else
mf->code = V4L2_MBUS_FMT_UYVY8_2X8;
mf->colorspace = V4L2_COLORSPACE_SMPTE170M;
mf->width = 0;
mf->height = 0;
mf->field = V4L2_FIELD_ANY;
return 0;
}
static int adv7170_s_fmt(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *mf)
{
u8 val = adv7170_read(sd, 0x7);
int ret;
switch (mf->code) {
case V4L2_MBUS_FMT_UYVY8_2X8:
val &= ~0x40;
break;
case V4L2_MBUS_FMT_UYVY8_1X16:
val |= 0x40;
break;
default:
v4l2_dbg(1, debug, sd,
"illegal v4l2_mbus_framefmt code: %d\n", mf->code);
return -EINVAL;
}
ret = adv7170_write(sd, 0x7, val);
return ret;
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_subdev_video_ops adv7170_video_ops = {
.s_std_output = adv7170_s_std_output,
.s_routing = adv7170_s_routing,
.s_mbus_fmt = adv7170_s_fmt,
.g_mbus_fmt = adv7170_g_fmt,
.enum_mbus_fmt = adv7170_enum_fmt,
};
static const struct v4l2_subdev_ops adv7170_ops = {
.video = &adv7170_video_ops,
};
/* ----------------------------------------------------------------------- */
static int adv7170_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7170 *encoder;
struct v4l2_subdev *sd;
int i;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
encoder = devm_kzalloc(&client->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
sd = &encoder->sd;
v4l2_i2c_subdev_init(sd, client, &adv7170_ops);
encoder->norm = V4L2_STD_NTSC;
encoder->input = 0;
i = adv7170_write_block(sd, init_NTSC, sizeof(init_NTSC));
if (i >= 0) {
i = adv7170_write(sd, 0x07, TR0MODE | TR0RST);
i = adv7170_write(sd, 0x07, TR0MODE);
i = adv7170_read(sd, 0x12);
v4l2_dbg(1, debug, sd, "revision %d\n", i & 1);
}
if (i < 0)
v4l2_dbg(1, debug, sd, "init error 0x%x\n", i);
return 0;
}
static int adv7170_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id adv7170_id[] = {
{ "adv7170", 0 },
{ "adv7171", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7170_id);
static struct i2c_driver adv7170_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "adv7170",
},
.probe = adv7170_probe,
.remove = adv7170_remove,
.id_table = adv7170_id,
};
module_i2c_driver(adv7170_driver);
|
gpl-2.0
|
markfasheh/linux-4.1-dedupe_fixes
|
drivers/video/fbdev/neofb.c
|
1274
|
56494
|
/*
* linux/drivers/video/neofb.c -- NeoMagic Framebuffer Driver
*
* Copyright (c) 2001-2002 Denis Oliver Kropp <dok@directfb.org>
*
*
* Card specific code is based on XFree86's neomagic driver.
* Framebuffer framework code is based on code of cyber2000fb.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of this
* archive for more details.
*
*
* 0.4.1
* - Cosmetic changes (dok)
*
* 0.4
* - Toshiba Libretto support, allow modes larger than LCD size if
* LCD is disabled, keep BIOS settings if internal/external display
* haven't been enabled explicitly
* (Thomas J. Moore <dark@mama.indstate.edu>)
*
* 0.3.3
* - Porting over to new fbdev api. (jsimmons)
*
* 0.3.2
* - got rid of all floating point (dok)
*
* 0.3.1
* - added module license (dok)
*
* 0.3
* - hardware accelerated clear and move for 2200 and above (dok)
* - maximum allowed dotclock is handled now (dok)
*
* 0.2.1
* - correct panning after X usage (dok)
* - added module and kernel parameters (dok)
* - no stretching if external display is enabled (dok)
*
* 0.2
* - initial version (dok)
*
*
* TODO
* - ioctl for internal/external switching
* - blanking
* - 32bit depth support, maybe impossible
* - disable pan-on-sync, need specs
*
* BUGS
* - white margin on bootup like with tdfxfb (colormap problem?)
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/pci.h>
#include <linux/init.h>
#ifdef CONFIG_TOSHIBA
#include <linux/toshiba.h>
#endif
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/pgtable.h>
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
#include <video/vga.h>
#include <video/neomagic.h>
#define NEOFB_VERSION "0.4.2"
/* --------------------------------------------------------------------- */
static bool internal;
static bool external;
static bool libretto;
static bool nostretch;
static bool nopciburst;
static char *mode_option = NULL;
#ifdef MODULE
MODULE_AUTHOR("(c) 2001-2002 Denis Oliver Kropp <dok@convergence.de>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FBDev driver for NeoMagic PCI Chips");
module_param(internal, bool, 0);
MODULE_PARM_DESC(internal, "Enable output on internal LCD Display.");
module_param(external, bool, 0);
MODULE_PARM_DESC(external, "Enable output on external CRT.");
module_param(libretto, bool, 0);
MODULE_PARM_DESC(libretto, "Force Libretto 100/110 800x480 LCD.");
module_param(nostretch, bool, 0);
MODULE_PARM_DESC(nostretch,
"Disable stretching of modes smaller than LCD.");
module_param(nopciburst, bool, 0);
MODULE_PARM_DESC(nopciburst, "Disable PCI burst mode.");
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Preferred video mode ('640x480-8@60', etc)");
#endif
/* --------------------------------------------------------------------- */
static biosMode bios8[] = {
{320, 240, 0x40},
{300, 400, 0x42},
{640, 400, 0x20},
{640, 480, 0x21},
{800, 600, 0x23},
{1024, 768, 0x25},
};
static biosMode bios16[] = {
{320, 200, 0x2e},
{320, 240, 0x41},
{300, 400, 0x43},
{640, 480, 0x31},
{800, 600, 0x34},
{1024, 768, 0x37},
};
static biosMode bios24[] = {
{640, 480, 0x32},
{800, 600, 0x35},
{1024, 768, 0x38}
};
#ifdef NO_32BIT_SUPPORT_YET
/* FIXME: guessed values, wrong */
static biosMode bios32[] = {
{640, 480, 0x33},
{800, 600, 0x36},
{1024, 768, 0x39}
};
#endif
static inline void write_le32(int regindex, u32 val, const struct neofb_par *par)
{
writel(val, par->neo2200 + par->cursorOff + regindex);
}
static int neoFindMode(int xres, int yres, int depth)
{
int xres_s;
int i, size;
biosMode *mode;
switch (depth) {
case 8:
size = ARRAY_SIZE(bios8);
mode = bios8;
break;
case 16:
size = ARRAY_SIZE(bios16);
mode = bios16;
break;
case 24:
size = ARRAY_SIZE(bios24);
mode = bios24;
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32:
size = ARRAY_SIZE(bios32);
mode = bios32;
break;
#endif
default:
return 0;
}
for (i = 0; i < size; i++) {
if (xres <= mode[i].x_res) {
xres_s = mode[i].x_res;
for (; i < size; i++) {
if (mode[i].x_res != xres_s)
return mode[i - 1].mode;
if (yres <= mode[i].y_res)
return mode[i].mode;
}
}
}
return mode[size - 1].mode;
}
/*
* neoCalcVCLK --
*
* Determine the closest clock frequency to the one requested.
*/
#define MAX_N 127
#define MAX_D 31
#define MAX_F 1
static void neoCalcVCLK(const struct fb_info *info,
struct neofb_par *par, long freq)
{
int n, d, f;
int n_best = 0, d_best = 0, f_best = 0;
long f_best_diff = 0x7ffff;
for (f = 0; f <= MAX_F; f++)
for (d = 0; d <= MAX_D; d++)
for (n = 0; n <= MAX_N; n++) {
long f_out;
long f_diff;
f_out = ((14318 * (n + 1)) / (d + 1)) >> f;
f_diff = abs(f_out - freq);
if (f_diff <= f_best_diff) {
f_best_diff = f_diff;
n_best = n;
d_best = d;
f_best = f;
}
if (f_out > freq)
break;
}
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) {
/* NOT_DONE: We are trying the full range of the 2200 clock.
We should be able to try n up to 2047 */
par->VCLK3NumeratorLow = n_best;
par->VCLK3NumeratorHigh = (f_best << 7);
} else
par->VCLK3NumeratorLow = n_best | (f_best << 7);
par->VCLK3Denominator = d_best;
#ifdef NEOFB_DEBUG
printk(KERN_DEBUG "neoVCLK: f:%ld NumLow=%d NumHi=%d Den=%d Df=%ld\n",
freq,
par->VCLK3NumeratorLow,
par->VCLK3NumeratorHigh,
par->VCLK3Denominator, f_best_diff);
#endif
}
/*
* vgaHWInit --
* Handle the initialization, etc. of a screen.
* Return FALSE on failure.
*/
static int vgaHWInit(const struct fb_var_screeninfo *var,
struct neofb_par *par)
{
int hsync_end = var->xres + var->right_margin + var->hsync_len;
int htotal = (hsync_end + var->left_margin) >> 3;
int vsync_start = var->yres + var->lower_margin;
int vsync_end = vsync_start + var->vsync_len;
int vtotal = vsync_end + var->upper_margin;
par->MiscOutReg = 0x23;
if (!(var->sync & FB_SYNC_HOR_HIGH_ACT))
par->MiscOutReg |= 0x40;
if (!(var->sync & FB_SYNC_VERT_HIGH_ACT))
par->MiscOutReg |= 0x80;
/*
* Time Sequencer
*/
par->Sequencer[0] = 0x00;
par->Sequencer[1] = 0x01;
par->Sequencer[2] = 0x0F;
par->Sequencer[3] = 0x00; /* Font select */
par->Sequencer[4] = 0x0E; /* Misc */
/*
* CRTC Controller
*/
par->CRTC[0] = htotal - 5;
par->CRTC[1] = (var->xres >> 3) - 1;
par->CRTC[2] = (var->xres >> 3) - 1;
par->CRTC[3] = ((htotal - 1) & 0x1F) | 0x80;
par->CRTC[4] = ((var->xres + var->right_margin) >> 3);
par->CRTC[5] = (((htotal - 1) & 0x20) << 2)
| (((hsync_end >> 3)) & 0x1F);
par->CRTC[6] = (vtotal - 2) & 0xFF;
par->CRTC[7] = (((vtotal - 2) & 0x100) >> 8)
| (((var->yres - 1) & 0x100) >> 7)
| ((vsync_start & 0x100) >> 6)
| (((var->yres - 1) & 0x100) >> 5)
| 0x10 | (((vtotal - 2) & 0x200) >> 4)
| (((var->yres - 1) & 0x200) >> 3)
| ((vsync_start & 0x200) >> 2);
par->CRTC[8] = 0x00;
par->CRTC[9] = (((var->yres - 1) & 0x200) >> 4) | 0x40;
if (var->vmode & FB_VMODE_DOUBLE)
par->CRTC[9] |= 0x80;
par->CRTC[10] = 0x00;
par->CRTC[11] = 0x00;
par->CRTC[12] = 0x00;
par->CRTC[13] = 0x00;
par->CRTC[14] = 0x00;
par->CRTC[15] = 0x00;
par->CRTC[16] = vsync_start & 0xFF;
par->CRTC[17] = (vsync_end & 0x0F) | 0x20;
par->CRTC[18] = (var->yres - 1) & 0xFF;
par->CRTC[19] = var->xres_virtual >> 4;
par->CRTC[20] = 0x00;
par->CRTC[21] = (var->yres - 1) & 0xFF;
par->CRTC[22] = (vtotal - 1) & 0xFF;
par->CRTC[23] = 0xC3;
par->CRTC[24] = 0xFF;
/*
* are these unnecessary?
* vgaHWHBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO);
* vgaHWVBlankKGA(mode, regp, 0, KGA_FIX_OVERSCAN | KGA_ENABLE_ON_ZERO);
*/
/*
* Graphics Display Controller
*/
par->Graphics[0] = 0x00;
par->Graphics[1] = 0x00;
par->Graphics[2] = 0x00;
par->Graphics[3] = 0x00;
par->Graphics[4] = 0x00;
par->Graphics[5] = 0x40;
par->Graphics[6] = 0x05; /* only map 64k VGA memory !!!! */
par->Graphics[7] = 0x0F;
par->Graphics[8] = 0xFF;
par->Attribute[0] = 0x00; /* standard colormap translation */
par->Attribute[1] = 0x01;
par->Attribute[2] = 0x02;
par->Attribute[3] = 0x03;
par->Attribute[4] = 0x04;
par->Attribute[5] = 0x05;
par->Attribute[6] = 0x06;
par->Attribute[7] = 0x07;
par->Attribute[8] = 0x08;
par->Attribute[9] = 0x09;
par->Attribute[10] = 0x0A;
par->Attribute[11] = 0x0B;
par->Attribute[12] = 0x0C;
par->Attribute[13] = 0x0D;
par->Attribute[14] = 0x0E;
par->Attribute[15] = 0x0F;
par->Attribute[16] = 0x41;
par->Attribute[17] = 0xFF;
par->Attribute[18] = 0x0F;
par->Attribute[19] = 0x00;
par->Attribute[20] = 0x00;
return 0;
}
static void vgaHWLock(struct vgastate *state)
{
/* Protect CRTC[0-7] */
vga_wcrt(state->vgabase, 0x11, vga_rcrt(state->vgabase, 0x11) | 0x80);
}
static void vgaHWUnlock(void)
{
/* Unprotect CRTC[0-7] */
vga_wcrt(NULL, 0x11, vga_rcrt(NULL, 0x11) & ~0x80);
}
static void neoLock(struct vgastate *state)
{
vga_wgfx(state->vgabase, 0x09, 0x00);
vgaHWLock(state);
}
static void neoUnlock(void)
{
vgaHWUnlock();
vga_wgfx(NULL, 0x09, 0x26);
}
/*
* VGA Palette management
*/
static int paletteEnabled = 0;
static inline void VGAenablePalette(void)
{
vga_r(NULL, VGA_IS1_RC);
vga_w(NULL, VGA_ATT_W, 0x00);
paletteEnabled = 1;
}
static inline void VGAdisablePalette(void)
{
vga_r(NULL, VGA_IS1_RC);
vga_w(NULL, VGA_ATT_W, 0x20);
paletteEnabled = 0;
}
static inline void VGAwATTR(u8 index, u8 value)
{
if (paletteEnabled)
index &= ~0x20;
else
index |= 0x20;
vga_r(NULL, VGA_IS1_RC);
vga_wattr(NULL, index, value);
}
static void vgaHWProtect(int on)
{
unsigned char tmp;
tmp = vga_rseq(NULL, 0x01);
if (on) {
/*
* Turn off screen and disable sequencer.
*/
vga_wseq(NULL, 0x00, 0x01); /* Synchronous Reset */
vga_wseq(NULL, 0x01, tmp | 0x20); /* disable the display */
VGAenablePalette();
} else {
/*
* Reenable sequencer, then turn on screen.
*/
vga_wseq(NULL, 0x01, tmp & ~0x20); /* reenable display */
vga_wseq(NULL, 0x00, 0x03); /* clear synchronousreset */
VGAdisablePalette();
}
}
static void vgaHWRestore(const struct fb_info *info,
const struct neofb_par *par)
{
int i;
vga_w(NULL, VGA_MIS_W, par->MiscOutReg);
for (i = 1; i < 5; i++)
vga_wseq(NULL, i, par->Sequencer[i]);
/* Ensure CRTC registers 0-7 are unlocked by clearing bit 7 or CRTC[17] */
vga_wcrt(NULL, 17, par->CRTC[17] & ~0x80);
for (i = 0; i < 25; i++)
vga_wcrt(NULL, i, par->CRTC[i]);
for (i = 0; i < 9; i++)
vga_wgfx(NULL, i, par->Graphics[i]);
VGAenablePalette();
for (i = 0; i < 21; i++)
VGAwATTR(i, par->Attribute[i]);
VGAdisablePalette();
}
/* -------------------- Hardware specific routines ------------------------- */
/*
* Hardware Acceleration for Neo2200+
*/
static inline int neo2200_sync(struct fb_info *info)
{
struct neofb_par *par = info->par;
while (readl(&par->neo2200->bltStat) & 1)
cpu_relax();
return 0;
}
static inline void neo2200_wait_fifo(struct fb_info *info,
int requested_fifo_space)
{
// ndev->neo.waitfifo_calls++;
// ndev->neo.waitfifo_sum += requested_fifo_space;
/* FIXME: does not work
if (neo_fifo_space < requested_fifo_space)
{
neo_fifo_waitcycles++;
while (1)
{
neo_fifo_space = (neo2200->bltStat >> 8);
if (neo_fifo_space >= requested_fifo_space)
break;
}
}
else
{
neo_fifo_cache_hits++;
}
neo_fifo_space -= requested_fifo_space;
*/
neo2200_sync(info);
}
static inline void neo2200_accel_init(struct fb_info *info,
struct fb_var_screeninfo *var)
{
struct neofb_par *par = info->par;
Neo2200 __iomem *neo2200 = par->neo2200;
u32 bltMod, pitch;
neo2200_sync(info);
switch (var->bits_per_pixel) {
case 8:
bltMod = NEO_MODE1_DEPTH8;
pitch = var->xres_virtual;
break;
case 15:
case 16:
bltMod = NEO_MODE1_DEPTH16;
pitch = var->xres_virtual * 2;
break;
case 24:
bltMod = NEO_MODE1_DEPTH24;
pitch = var->xres_virtual * 3;
break;
default:
printk(KERN_ERR
"neofb: neo2200_accel_init: unexpected bits per pixel!\n");
return;
}
writel(bltMod << 16, &neo2200->bltStat);
writel((pitch << 16) | pitch, &neo2200->pitch);
}
/* --------------------------------------------------------------------- */
static int
neofb_open(struct fb_info *info, int user)
{
struct neofb_par *par = info->par;
if (!par->ref_count) {
memset(&par->state, 0, sizeof(struct vgastate));
par->state.flags = VGA_SAVE_MODE | VGA_SAVE_FONTS;
save_vga(&par->state);
}
par->ref_count++;
return 0;
}
static int
neofb_release(struct fb_info *info, int user)
{
struct neofb_par *par = info->par;
if (!par->ref_count)
return -EINVAL;
if (par->ref_count == 1) {
restore_vga(&par->state);
}
par->ref_count--;
return 0;
}
static int
neofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct neofb_par *par = info->par;
int memlen, vramlen;
int mode_ok = 0;
DBG("neofb_check_var");
if (PICOS2KHZ(var->pixclock) > par->maxClock)
return -EINVAL;
/* Is the mode larger than the LCD panel? */
if (par->internal_display &&
((var->xres > par->NeoPanelWidth) ||
(var->yres > par->NeoPanelHeight))) {
printk(KERN_INFO
"Mode (%dx%d) larger than the LCD panel (%dx%d)\n",
var->xres, var->yres, par->NeoPanelWidth,
par->NeoPanelHeight);
return -EINVAL;
}
/* Is the mode one of the acceptable sizes? */
if (!par->internal_display)
mode_ok = 1;
else {
switch (var->xres) {
case 1280:
if (var->yres == 1024)
mode_ok = 1;
break;
case 1024:
if (var->yres == 768)
mode_ok = 1;
break;
case 800:
if (var->yres == (par->libretto ? 480 : 600))
mode_ok = 1;
break;
case 640:
if (var->yres == 480)
mode_ok = 1;
break;
}
}
if (!mode_ok) {
printk(KERN_INFO
"Mode (%dx%d) won't display properly on LCD\n",
var->xres, var->yres);
return -EINVAL;
}
var->red.msb_right = 0;
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
var->transp.offset = 0;
var->transp.length = 0;
switch (var->bits_per_pixel) {
case 8: /* PSEUDOCOLOUR, 256 */
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
break;
case 16: /* DIRECTCOLOUR, 64k */
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
break;
case 24: /* TRUECOLOUR, 16m */
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32: /* TRUECOLOUR, 16m */
var->transp.offset = 24;
var->transp.length = 8;
var->red.offset = 16;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 0;
var->blue.length = 8;
break;
#endif
default:
printk(KERN_WARNING "neofb: no support for %dbpp\n",
var->bits_per_pixel);
return -EINVAL;
}
vramlen = info->fix.smem_len;
if (vramlen > 4 * 1024 * 1024)
vramlen = 4 * 1024 * 1024;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
memlen = var->xres_virtual * var->bits_per_pixel * var->yres_virtual >> 3;
if (memlen > vramlen) {
var->yres_virtual = vramlen * 8 / (var->xres_virtual *
var->bits_per_pixel);
memlen = var->xres_virtual * var->bits_per_pixel *
var->yres_virtual / 8;
}
/* we must round yres/xres down, we already rounded y/xres_virtual up
if it was possible. We should return -EINVAL, but I disagree */
if (var->yres_virtual < var->yres)
var->yres = var->yres_virtual;
if (var->xoffset + var->xres > var->xres_virtual)
var->xoffset = var->xres_virtual - var->xres;
if (var->yoffset + var->yres > var->yres_virtual)
var->yoffset = var->yres_virtual - var->yres;
var->nonstd = 0;
var->height = -1;
var->width = -1;
if (var->bits_per_pixel >= 24 || !par->neo2200)
var->accel_flags &= ~FB_ACCELF_TEXT;
return 0;
}
static int neofb_set_par(struct fb_info *info)
{
struct neofb_par *par = info->par;
unsigned char temp;
int i, clock_hi = 0;
int lcd_stretch;
int hoffset, voffset;
int vsync_start, vtotal;
DBG("neofb_set_par");
neoUnlock();
vgaHWProtect(1); /* Blank the screen */
vsync_start = info->var.yres + info->var.lower_margin;
vtotal = vsync_start + info->var.vsync_len + info->var.upper_margin;
/*
* This will allocate the datastructure and initialize all of the
* generic VGA registers.
*/
if (vgaHWInit(&info->var, par))
return -EINVAL;
/*
* The default value assigned by vgaHW.c is 0x41, but this does
* not work for NeoMagic.
*/
par->Attribute[16] = 0x01;
switch (info->var.bits_per_pixel) {
case 8:
par->CRTC[0x13] = info->var.xres_virtual >> 3;
par->ExtCRTOffset = info->var.xres_virtual >> 11;
par->ExtColorModeSelect = 0x11;
break;
case 16:
par->CRTC[0x13] = info->var.xres_virtual >> 2;
par->ExtCRTOffset = info->var.xres_virtual >> 10;
par->ExtColorModeSelect = 0x13;
break;
case 24:
par->CRTC[0x13] = (info->var.xres_virtual * 3) >> 3;
par->ExtCRTOffset = (info->var.xres_virtual * 3) >> 11;
par->ExtColorModeSelect = 0x14;
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32: /* FIXME: guessed values */
par->CRTC[0x13] = info->var.xres_virtual >> 1;
par->ExtCRTOffset = info->var.xres_virtual >> 9;
par->ExtColorModeSelect = 0x15;
break;
#endif
default:
break;
}
par->ExtCRTDispAddr = 0x10;
/* Vertical Extension */
par->VerticalExt = (((vtotal - 2) & 0x400) >> 10)
| (((info->var.yres - 1) & 0x400) >> 9)
| (((vsync_start) & 0x400) >> 8)
| (((vsync_start) & 0x400) >> 7);
/* Fast write bursts on unless disabled. */
if (par->pci_burst)
par->SysIfaceCntl1 = 0x30;
else
par->SysIfaceCntl1 = 0x00;
par->SysIfaceCntl2 = 0xc0; /* VESA Bios sets this to 0x80! */
/* Initialize: by default, we want display config register to be read */
par->PanelDispCntlRegRead = 1;
/* Enable any user specified display devices. */
par->PanelDispCntlReg1 = 0x00;
if (par->internal_display)
par->PanelDispCntlReg1 |= 0x02;
if (par->external_display)
par->PanelDispCntlReg1 |= 0x01;
/* If the user did not specify any display devices, then... */
if (par->PanelDispCntlReg1 == 0x00) {
/* Default to internal (i.e., LCD) only. */
par->PanelDispCntlReg1 = vga_rgfx(NULL, 0x20) & 0x03;
}
/* If we are using a fixed mode, then tell the chip we are. */
switch (info->var.xres) {
case 1280:
par->PanelDispCntlReg1 |= 0x60;
break;
case 1024:
par->PanelDispCntlReg1 |= 0x40;
break;
case 800:
par->PanelDispCntlReg1 |= 0x20;
break;
case 640:
default:
break;
}
/* Setup shadow register locking. */
switch (par->PanelDispCntlReg1 & 0x03) {
case 0x01: /* External CRT only mode: */
par->GeneralLockReg = 0x00;
/* We need to program the VCLK for external display only mode. */
par->ProgramVCLK = 1;
break;
case 0x02: /* Internal LCD only mode: */
case 0x03: /* Simultaneous internal/external (LCD/CRT) mode: */
par->GeneralLockReg = 0x01;
/* Don't program the VCLK when using the LCD. */
par->ProgramVCLK = 0;
break;
}
/*
* If the screen is to be stretched, turn on stretching for the
* various modes.
*
* OPTION_LCD_STRETCH means stretching should be turned off!
*/
par->PanelDispCntlReg2 = 0x00;
par->PanelDispCntlReg3 = 0x00;
if (par->lcd_stretch && (par->PanelDispCntlReg1 == 0x02) && /* LCD only */
(info->var.xres != par->NeoPanelWidth)) {
switch (info->var.xres) {
case 320: /* Needs testing. KEM -- 24 May 98 */
case 400: /* Needs testing. KEM -- 24 May 98 */
case 640:
case 800:
case 1024:
lcd_stretch = 1;
par->PanelDispCntlReg2 |= 0xC6;
break;
default:
lcd_stretch = 0;
/* No stretching in these modes. */
}
} else
lcd_stretch = 0;
/*
* If the screen is to be centerd, turn on the centering for the
* various modes.
*/
par->PanelVertCenterReg1 = 0x00;
par->PanelVertCenterReg2 = 0x00;
par->PanelVertCenterReg3 = 0x00;
par->PanelVertCenterReg4 = 0x00;
par->PanelVertCenterReg5 = 0x00;
par->PanelHorizCenterReg1 = 0x00;
par->PanelHorizCenterReg2 = 0x00;
par->PanelHorizCenterReg3 = 0x00;
par->PanelHorizCenterReg4 = 0x00;
par->PanelHorizCenterReg5 = 0x00;
if (par->PanelDispCntlReg1 & 0x02) {
if (info->var.xres == par->NeoPanelWidth) {
/*
* No centering required when the requested display width
* equals the panel width.
*/
} else {
par->PanelDispCntlReg2 |= 0x01;
par->PanelDispCntlReg3 |= 0x10;
/* Calculate the horizontal and vertical offsets. */
if (!lcd_stretch) {
hoffset =
((par->NeoPanelWidth -
info->var.xres) >> 4) - 1;
voffset =
((par->NeoPanelHeight -
info->var.yres) >> 1) - 2;
} else {
/* Stretched modes cannot be centered. */
hoffset = 0;
voffset = 0;
}
switch (info->var.xres) {
case 320: /* Needs testing. KEM -- 24 May 98 */
par->PanelHorizCenterReg3 = hoffset;
par->PanelVertCenterReg2 = voffset;
break;
case 400: /* Needs testing. KEM -- 24 May 98 */
par->PanelHorizCenterReg4 = hoffset;
par->PanelVertCenterReg1 = voffset;
break;
case 640:
par->PanelHorizCenterReg1 = hoffset;
par->PanelVertCenterReg3 = voffset;
break;
case 800:
par->PanelHorizCenterReg2 = hoffset;
par->PanelVertCenterReg4 = voffset;
break;
case 1024:
par->PanelHorizCenterReg5 = hoffset;
par->PanelVertCenterReg5 = voffset;
break;
case 1280:
default:
/* No centering in these modes. */
break;
}
}
}
par->biosMode =
neoFindMode(info->var.xres, info->var.yres,
info->var.bits_per_pixel);
/*
* Calculate the VCLK that most closely matches the requested dot
* clock.
*/
neoCalcVCLK(info, par, PICOS2KHZ(info->var.pixclock));
/* Since we program the clocks ourselves, always use VCLK3. */
par->MiscOutReg |= 0x0C;
/* alread unlocked above */
/* BOGUS vga_wgfx(NULL, 0x09, 0x26); */
/* don't know what this is, but it's 0 from bootup anyway */
vga_wgfx(NULL, 0x15, 0x00);
/* was set to 0x01 by my bios in text and vesa modes */
vga_wgfx(NULL, 0x0A, par->GeneralLockReg);
/*
* The color mode needs to be set before calling vgaHWRestore
* to ensure the DAC is initialized properly.
*
* NOTE: Make sure we don't change bits make sure we don't change
* any reserved bits.
*/
temp = vga_rgfx(NULL, 0x90);
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
temp &= 0xF0; /* Save bits 7:4 */
temp |= (par->ExtColorModeSelect & ~0xF0);
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
temp &= 0x70; /* Save bits 6:4 */
temp |= (par->ExtColorModeSelect & ~0x70);
break;
}
vga_wgfx(NULL, 0x90, temp);
/*
* In some rare cases a lockup might occur if we don't delay
* here. (Reported by Miles Lane)
*/
//mdelay(200);
/*
* Disable horizontal and vertical graphics and text expansions so
* that vgaHWRestore works properly.
*/
temp = vga_rgfx(NULL, 0x25);
temp &= 0x39;
vga_wgfx(NULL, 0x25, temp);
/*
* Sleep for 200ms to make sure that the two operations above have
* had time to take effect.
*/
mdelay(200);
/*
* This function handles restoring the generic VGA registers. */
vgaHWRestore(info, par);
/* linear colormap for non palettized modes */
switch (info->var.bits_per_pixel) {
case 8:
/* PseudoColor, 256 */
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
break;
case 16:
/* TrueColor, 64k */
info->fix.visual = FB_VISUAL_TRUECOLOR;
for (i = 0; i < 64; i++) {
outb(i, 0x3c8);
outb(i << 1, 0x3c9);
outb(i, 0x3c9);
outb(i << 1, 0x3c9);
}
break;
case 24:
#ifdef NO_32BIT_SUPPORT_YET
case 32:
#endif
/* TrueColor, 16m */
info->fix.visual = FB_VISUAL_TRUECOLOR;
for (i = 0; i < 256; i++) {
outb(i, 0x3c8);
outb(i, 0x3c9);
outb(i, 0x3c9);
outb(i, 0x3c9);
}
break;
}
vga_wgfx(NULL, 0x0E, par->ExtCRTDispAddr);
vga_wgfx(NULL, 0x0F, par->ExtCRTOffset);
temp = vga_rgfx(NULL, 0x10);
temp &= 0x0F; /* Save bits 3:0 */
temp |= (par->SysIfaceCntl1 & ~0x0F); /* VESA Bios sets bit 1! */
vga_wgfx(NULL, 0x10, temp);
vga_wgfx(NULL, 0x11, par->SysIfaceCntl2);
vga_wgfx(NULL, 0x15, 0 /*par->SingleAddrPage */ );
vga_wgfx(NULL, 0x16, 0 /*par->DualAddrPage */ );
temp = vga_rgfx(NULL, 0x20);
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
temp &= 0xFC; /* Save bits 7:2 */
temp |= (par->PanelDispCntlReg1 & ~0xFC);
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
temp &= 0xDC; /* Save bits 7:6,4:2 */
temp |= (par->PanelDispCntlReg1 & ~0xDC);
break;
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
temp &= 0x98; /* Save bits 7,4:3 */
temp |= (par->PanelDispCntlReg1 & ~0x98);
break;
}
vga_wgfx(NULL, 0x20, temp);
temp = vga_rgfx(NULL, 0x25);
temp &= 0x38; /* Save bits 5:3 */
temp |= (par->PanelDispCntlReg2 & ~0x38);
vga_wgfx(NULL, 0x25, temp);
if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) {
temp = vga_rgfx(NULL, 0x30);
temp &= 0xEF; /* Save bits 7:5 and bits 3:0 */
temp |= (par->PanelDispCntlReg3 & ~0xEF);
vga_wgfx(NULL, 0x30, temp);
}
vga_wgfx(NULL, 0x28, par->PanelVertCenterReg1);
vga_wgfx(NULL, 0x29, par->PanelVertCenterReg2);
vga_wgfx(NULL, 0x2a, par->PanelVertCenterReg3);
if (info->fix.accel != FB_ACCEL_NEOMAGIC_NM2070) {
vga_wgfx(NULL, 0x32, par->PanelVertCenterReg4);
vga_wgfx(NULL, 0x33, par->PanelHorizCenterReg1);
vga_wgfx(NULL, 0x34, par->PanelHorizCenterReg2);
vga_wgfx(NULL, 0x35, par->PanelHorizCenterReg3);
}
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2160)
vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4);
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) {
vga_wgfx(NULL, 0x36, par->PanelHorizCenterReg4);
vga_wgfx(NULL, 0x37, par->PanelVertCenterReg5);
vga_wgfx(NULL, 0x38, par->PanelHorizCenterReg5);
clock_hi = 1;
}
/* Program VCLK3 if needed. */
if (par->ProgramVCLK && ((vga_rgfx(NULL, 0x9B) != par->VCLK3NumeratorLow)
|| (vga_rgfx(NULL, 0x9F) != par->VCLK3Denominator)
|| (clock_hi && ((vga_rgfx(NULL, 0x8F) & ~0x0f)
!= (par->VCLK3NumeratorHigh &
~0x0F))))) {
vga_wgfx(NULL, 0x9B, par->VCLK3NumeratorLow);
if (clock_hi) {
temp = vga_rgfx(NULL, 0x8F);
temp &= 0x0F; /* Save bits 3:0 */
temp |= (par->VCLK3NumeratorHigh & ~0x0F);
vga_wgfx(NULL, 0x8F, temp);
}
vga_wgfx(NULL, 0x9F, par->VCLK3Denominator);
}
if (par->biosMode)
vga_wcrt(NULL, 0x23, par->biosMode);
vga_wgfx(NULL, 0x93, 0xc0); /* Gives 5x faster framebuffer writes !!! */
/* Program vertical extension register */
if (info->fix.accel == FB_ACCEL_NEOMAGIC_NM2200 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2230 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2360 ||
info->fix.accel == FB_ACCEL_NEOMAGIC_NM2380) {
vga_wcrt(NULL, 0x70, par->VerticalExt);
}
vgaHWProtect(0); /* Turn on screen */
/* Calling this also locks offset registers required in update_start */
neoLock(&par->state);
info->fix.line_length =
info->var.xres_virtual * (info->var.bits_per_pixel >> 3);
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_accel_init(info, &info->var);
break;
default:
break;
}
return 0;
}
/*
* Pan or Wrap the Display
*/
static int neofb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct neofb_par *par = info->par;
struct vgastate *state = &par->state;
int oldExtCRTDispAddr;
int Base;
DBG("neofb_update_start");
Base = (var->yoffset * info->var.xres_virtual + var->xoffset) >> 2;
Base *= (info->var.bits_per_pixel + 7) / 8;
neoUnlock();
/*
* These are the generic starting address registers.
*/
vga_wcrt(state->vgabase, 0x0C, (Base & 0x00FF00) >> 8);
vga_wcrt(state->vgabase, 0x0D, (Base & 0x00FF));
/*
* Make sure we don't clobber some other bits that might already
* have been set. NOTE: NM2200 has a writable bit 3, but it shouldn't
* be needed.
*/
oldExtCRTDispAddr = vga_rgfx(NULL, 0x0E);
vga_wgfx(state->vgabase, 0x0E, (((Base >> 16) & 0x0f) | (oldExtCRTDispAddr & 0xf0)));
neoLock(state);
return 0;
}
static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *fb)
{
if (regno >= fb->cmap.len || regno > 255)
return -EINVAL;
if (fb->var.bits_per_pixel <= 8) {
outb(regno, 0x3c8);
outb(red >> 10, 0x3c9);
outb(green >> 10, 0x3c9);
outb(blue >> 10, 0x3c9);
} else if (regno < 16) {
switch (fb->var.bits_per_pixel) {
case 16:
((u32 *) fb->pseudo_palette)[regno] =
((red & 0xf800)) | ((green & 0xfc00) >> 5) |
((blue & 0xf800) >> 11);
break;
case 24:
((u32 *) fb->pseudo_palette)[regno] =
((red & 0xff00) << 8) | ((green & 0xff00)) |
((blue & 0xff00) >> 8);
break;
#ifdef NO_32BIT_SUPPORT_YET
case 32:
((u32 *) fb->pseudo_palette)[regno] =
((transp & 0xff00) << 16) | ((red & 0xff00) << 8) |
((green & 0xff00)) | ((blue & 0xff00) >> 8);
break;
#endif
default:
return 1;
}
}
return 0;
}
/*
* (Un)Blank the display.
*/
static int neofb_blank(int blank_mode, struct fb_info *info)
{
/*
* Blank the screen if blank_mode != 0, else unblank.
* Return 0 if blanking succeeded, != 0 if un-/blanking failed due to
* e.g. a video mode which doesn't support it. Implements VESA suspend
* and powerdown modes for monitors, and backlight control on LCDs.
* blank_mode == 0: unblanked (backlight on)
* blank_mode == 1: blank (backlight on)
* blank_mode == 2: suspend vsync (backlight off)
* blank_mode == 3: suspend hsync (backlight off)
* blank_mode == 4: powerdown (backlight off)
*
* wms...Enable VESA DPMS compatible powerdown mode
* run "setterm -powersave powerdown" to take advantage
*/
struct neofb_par *par = info->par;
int seqflags, lcdflags, dpmsflags, reg, tmpdisp;
/*
* Read back the register bits related to display configuration. They might
* have been changed underneath the driver via Fn key stroke.
*/
neoUnlock();
tmpdisp = vga_rgfx(NULL, 0x20) & 0x03;
neoLock(&par->state);
/* In case we blank the screen, we want to store the possibly new
* configuration in the driver. During un-blank, we re-apply this setting,
* since the LCD bit will be cleared in order to switch off the backlight.
*/
if (par->PanelDispCntlRegRead) {
par->PanelDispCntlReg1 = tmpdisp;
}
par->PanelDispCntlRegRead = !blank_mode;
switch (blank_mode) {
case FB_BLANK_POWERDOWN: /* powerdown - both sync lines down */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
lcdflags = 0; /* LCD off */
dpmsflags = NEO_GR01_SUPPRESS_HSYNC |
NEO_GR01_SUPPRESS_VSYNC;
#ifdef CONFIG_TOSHIBA
/* Do we still need this ? */
/* attempt to turn off backlight on toshiba; also turns off external */
{
SMMRegisters regs;
regs.eax = 0xff00; /* HCI_SET */
regs.ebx = 0x0002; /* HCI_BACKLIGHT */
regs.ecx = 0x0000; /* HCI_DISABLE */
tosh_smm(®s);
}
#endif
break;
case FB_BLANK_HSYNC_SUSPEND: /* hsync off */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
lcdflags = 0; /* LCD off */
dpmsflags = NEO_GR01_SUPPRESS_HSYNC;
break;
case FB_BLANK_VSYNC_SUSPEND: /* vsync off */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
lcdflags = 0; /* LCD off */
dpmsflags = NEO_GR01_SUPPRESS_VSYNC;
break;
case FB_BLANK_NORMAL: /* just blank screen (backlight stays on) */
seqflags = VGA_SR01_SCREEN_OFF; /* Disable sequencer */
/*
* During a blank operation with the LID shut, we might store "LCD off"
* by mistake. Due to timing issues, the BIOS may switch the lights
* back on, and we turn it back off once we "unblank".
*
* So here is an attempt to implement ">=" - if we are in the process
* of unblanking, and the LCD bit is unset in the driver but set in the
* register, we must keep it.
*/
lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
dpmsflags = 0x00; /* no hsync/vsync suppression */
break;
case FB_BLANK_UNBLANK: /* unblank */
seqflags = 0; /* Enable sequencer */
lcdflags = ((par->PanelDispCntlReg1 | tmpdisp) & 0x02); /* LCD normal */
dpmsflags = 0x00; /* no hsync/vsync suppression */
#ifdef CONFIG_TOSHIBA
/* Do we still need this ? */
/* attempt to re-enable backlight/external on toshiba */
{
SMMRegisters regs;
regs.eax = 0xff00; /* HCI_SET */
regs.ebx = 0x0002; /* HCI_BACKLIGHT */
regs.ecx = 0x0001; /* HCI_ENABLE */
tosh_smm(®s);
}
#endif
break;
default: /* Anything else we don't understand; return 1 to tell
* fb_blank we didn't aactually do anything */
return 1;
}
neoUnlock();
reg = (vga_rseq(NULL, 0x01) & ~0x20) | seqflags;
vga_wseq(NULL, 0x01, reg);
reg = (vga_rgfx(NULL, 0x20) & ~0x02) | lcdflags;
vga_wgfx(NULL, 0x20, reg);
reg = (vga_rgfx(NULL, 0x01) & ~0xF0) | 0x80 | dpmsflags;
vga_wgfx(NULL, 0x01, reg);
neoLock(&par->state);
return 0;
}
static void
neo2200_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
struct neofb_par *par = info->par;
u_long dst, rop;
dst = rect->dx + rect->dy * info->var.xres_virtual;
rop = rect->rop ? 0x060000 : 0x0c0000;
neo2200_wait_fifo(info, 4);
/* set blt control */
writel(NEO_BC3_FIFO_EN |
NEO_BC0_SRC_IS_FG | NEO_BC3_SKIP_MAPPING |
// NEO_BC3_DST_XY_ADDR |
// NEO_BC3_SRC_XY_ADDR |
rop, &par->neo2200->bltCntl);
switch (info->var.bits_per_pixel) {
case 8:
writel(rect->color, &par->neo2200->fgColor);
break;
case 16:
case 24:
writel(((u32 *) (info->pseudo_palette))[rect->color],
&par->neo2200->fgColor);
break;
}
writel(dst * ((info->var.bits_per_pixel + 7) >> 3),
&par->neo2200->dstStart);
writel((rect->height << 16) | (rect->width & 0xffff),
&par->neo2200->xyExt);
}
static void
neo2200_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
u32 sx = area->sx, sy = area->sy, dx = area->dx, dy = area->dy;
struct neofb_par *par = info->par;
u_long src, dst, bltCntl;
bltCntl = NEO_BC3_FIFO_EN | NEO_BC3_SKIP_MAPPING | 0x0C0000;
if ((dy > sy) || ((dy == sy) && (dx > sx))) {
/* Start with the lower right corner */
sy += (area->height - 1);
dy += (area->height - 1);
sx += (area->width - 1);
dx += (area->width - 1);
bltCntl |= NEO_BC0_X_DEC | NEO_BC0_DST_Y_DEC | NEO_BC0_SRC_Y_DEC;
}
src = sx * (info->var.bits_per_pixel >> 3) + sy*info->fix.line_length;
dst = dx * (info->var.bits_per_pixel >> 3) + dy*info->fix.line_length;
neo2200_wait_fifo(info, 4);
/* set blt control */
writel(bltCntl, &par->neo2200->bltCntl);
writel(src, &par->neo2200->srcStart);
writel(dst, &par->neo2200->dstStart);
writel((area->height << 16) | (area->width & 0xffff),
&par->neo2200->xyExt);
}
static void
neo2200_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct neofb_par *par = info->par;
int s_pitch = (image->width * image->depth + 7) >> 3;
int scan_align = info->pixmap.scan_align - 1;
int buf_align = info->pixmap.buf_align - 1;
int bltCntl_flags, d_pitch, data_len;
// The data is padded for the hardware
d_pitch = (s_pitch + scan_align) & ~scan_align;
data_len = ((d_pitch * image->height) + buf_align) & ~buf_align;
neo2200_sync(info);
if (image->depth == 1) {
if (info->var.bits_per_pixel == 24 && image->width < 16) {
/* FIXME. There is a bug with accelerated color-expanded
* transfers in 24 bit mode if the image being transferred
* is less than 16 bits wide. This is due to insufficient
* padding when writing the image. We need to adjust
* struct fb_pixmap. Not yet done. */
cfb_imageblit(info, image);
return;
}
bltCntl_flags = NEO_BC0_SRC_MONO;
} else if (image->depth == info->var.bits_per_pixel) {
bltCntl_flags = 0;
} else {
/* We don't currently support hardware acceleration if image
* depth is different from display */
cfb_imageblit(info, image);
return;
}
switch (info->var.bits_per_pixel) {
case 8:
writel(image->fg_color, &par->neo2200->fgColor);
writel(image->bg_color, &par->neo2200->bgColor);
break;
case 16:
case 24:
writel(((u32 *) (info->pseudo_palette))[image->fg_color],
&par->neo2200->fgColor);
writel(((u32 *) (info->pseudo_palette))[image->bg_color],
&par->neo2200->bgColor);
break;
}
writel(NEO_BC0_SYS_TO_VID |
NEO_BC3_SKIP_MAPPING | bltCntl_flags |
// NEO_BC3_DST_XY_ADDR |
0x0c0000, &par->neo2200->bltCntl);
writel(0, &par->neo2200->srcStart);
// par->neo2200->dstStart = (image->dy << 16) | (image->dx & 0xffff);
writel(((image->dx & 0xffff) * (info->var.bits_per_pixel >> 3) +
image->dy * info->fix.line_length), &par->neo2200->dstStart);
writel((image->height << 16) | (image->width & 0xffff),
&par->neo2200->xyExt);
memcpy_toio(par->mmio_vbase + 0x100000, image->data, data_len);
}
static void
neofb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_fillrect(info, rect);
break;
default:
cfb_fillrect(info, rect);
break;
}
}
static void
neofb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_copyarea(info, area);
break;
default:
cfb_copyarea(info, area);
break;
}
}
static void
neofb_imageblit(struct fb_info *info, const struct fb_image *image)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_imageblit(info, image);
break;
default:
cfb_imageblit(info, image);
break;
}
}
static int
neofb_sync(struct fb_info *info)
{
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
neo2200_sync(info);
break;
default:
break;
}
return 0;
}
/*
static void
neofb_draw_cursor(struct fb_info *info, u8 *dst, u8 *src, unsigned int width)
{
//memset_io(info->sprite.addr, 0xff, 1);
}
static int
neofb_cursor(struct fb_info *info, struct fb_cursor *cursor)
{
struct neofb_par *par = (struct neofb_par *) info->par;
* Disable cursor *
write_le32(NEOREG_CURSCNTL, ~NEO_CURS_ENABLE, par);
if (cursor->set & FB_CUR_SETPOS) {
u32 x = cursor->image.dx;
u32 y = cursor->image.dy;
info->cursor.image.dx = x;
info->cursor.image.dy = y;
write_le32(NEOREG_CURSX, x, par);
write_le32(NEOREG_CURSY, y, par);
}
if (cursor->set & FB_CUR_SETSIZE) {
info->cursor.image.height = cursor->image.height;
info->cursor.image.width = cursor->image.width;
}
if (cursor->set & FB_CUR_SETHOT)
info->cursor.hot = cursor->hot;
if (cursor->set & FB_CUR_SETCMAP) {
if (cursor->image.depth == 1) {
u32 fg = cursor->image.fg_color;
u32 bg = cursor->image.bg_color;
info->cursor.image.fg_color = fg;
info->cursor.image.bg_color = bg;
fg = ((fg & 0xff0000) >> 16) | ((fg & 0xff) << 16) | (fg & 0xff00);
bg = ((bg & 0xff0000) >> 16) | ((bg & 0xff) << 16) | (bg & 0xff00);
write_le32(NEOREG_CURSFGCOLOR, fg, par);
write_le32(NEOREG_CURSBGCOLOR, bg, par);
}
}
if (cursor->set & FB_CUR_SETSHAPE)
fb_load_cursor_image(info);
if (info->cursor.enable)
write_le32(NEOREG_CURSCNTL, NEO_CURS_ENABLE, par);
return 0;
}
*/
static struct fb_ops neofb_ops = {
.owner = THIS_MODULE,
.fb_open = neofb_open,
.fb_release = neofb_release,
.fb_check_var = neofb_check_var,
.fb_set_par = neofb_set_par,
.fb_setcolreg = neofb_setcolreg,
.fb_pan_display = neofb_pan_display,
.fb_blank = neofb_blank,
.fb_sync = neofb_sync,
.fb_fillrect = neofb_fillrect,
.fb_copyarea = neofb_copyarea,
.fb_imageblit = neofb_imageblit,
};
/* --------------------------------------------------------------------- */
static struct fb_videomode mode800x480 = {
.xres = 800,
.yres = 480,
.pixclock = 25000,
.left_margin = 88,
.right_margin = 40,
.upper_margin = 23,
.lower_margin = 1,
.hsync_len = 128,
.vsync_len = 4,
.sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
.vmode = FB_VMODE_NONINTERLACED
};
static int neo_map_mmio(struct fb_info *info, struct pci_dev *dev)
{
struct neofb_par *par = info->par;
DBG("neo_map_mmio");
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
info->fix.mmio_start = pci_resource_start(dev, 0)+
0x100000;
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
info->fix.mmio_start = pci_resource_start(dev, 0)+
0x200000;
break;
case FB_ACCEL_NEOMAGIC_NM2160:
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
info->fix.mmio_start = pci_resource_start(dev, 1);
break;
default:
info->fix.mmio_start = pci_resource_start(dev, 0);
}
info->fix.mmio_len = MMIO_SIZE;
if (!request_mem_region
(info->fix.mmio_start, MMIO_SIZE, "memory mapped I/O")) {
printk("neofb: memory mapped IO in use\n");
return -EBUSY;
}
par->mmio_vbase = ioremap(info->fix.mmio_start, MMIO_SIZE);
if (!par->mmio_vbase) {
printk("neofb: unable to map memory mapped IO\n");
release_mem_region(info->fix.mmio_start,
info->fix.mmio_len);
return -ENOMEM;
} else
printk(KERN_INFO "neofb: mapped io at %p\n",
par->mmio_vbase);
return 0;
}
static void neo_unmap_mmio(struct fb_info *info)
{
struct neofb_par *par = info->par;
DBG("neo_unmap_mmio");
iounmap(par->mmio_vbase);
par->mmio_vbase = NULL;
release_mem_region(info->fix.mmio_start,
info->fix.mmio_len);
}
static int neo_map_video(struct fb_info *info, struct pci_dev *dev,
int video_len)
{
//unsigned long addr;
DBG("neo_map_video");
info->fix.smem_start = pci_resource_start(dev, 0);
info->fix.smem_len = video_len;
if (!request_mem_region(info->fix.smem_start, info->fix.smem_len,
"frame buffer")) {
printk("neofb: frame buffer in use\n");
return -EBUSY;
}
info->screen_base =
ioremap(info->fix.smem_start, info->fix.smem_len);
if (!info->screen_base) {
printk("neofb: unable to map screen memory\n");
release_mem_region(info->fix.smem_start,
info->fix.smem_len);
return -ENOMEM;
} else
printk(KERN_INFO "neofb: mapped framebuffer at %p\n",
info->screen_base);
#ifdef CONFIG_MTRR
((struct neofb_par *)(info->par))->mtrr =
mtrr_add(info->fix.smem_start, pci_resource_len(dev, 0),
MTRR_TYPE_WRCOMB, 1);
#endif
/* Clear framebuffer, it's all white in memory after boot */
memset_io(info->screen_base, 0, info->fix.smem_len);
/* Allocate Cursor drawing pad.
info->fix.smem_len -= PAGE_SIZE;
addr = info->fix.smem_start + info->fix.smem_len;
write_le32(NEOREG_CURSMEMPOS, ((0x000f & (addr >> 10)) << 8) |
((0x0ff0 & (addr >> 10)) >> 4), par);
addr = (unsigned long) info->screen_base + info->fix.smem_len;
info->sprite.addr = (u8 *) addr; */
return 0;
}
static void neo_unmap_video(struct fb_info *info)
{
DBG("neo_unmap_video");
#ifdef CONFIG_MTRR
{
struct neofb_par *par = info->par;
mtrr_del(par->mtrr, info->fix.smem_start,
info->fix.smem_len);
}
#endif
iounmap(info->screen_base);
info->screen_base = NULL;
release_mem_region(info->fix.smem_start,
info->fix.smem_len);
}
static int neo_scan_monitor(struct fb_info *info)
{
struct neofb_par *par = info->par;
unsigned char type, display;
int w;
// Eventually we will have i2c support.
info->monspecs.modedb = kmalloc(sizeof(struct fb_videomode), GFP_KERNEL);
if (!info->monspecs.modedb)
return -ENOMEM;
info->monspecs.modedb_len = 1;
/* Determine the panel type */
vga_wgfx(NULL, 0x09, 0x26);
type = vga_rgfx(NULL, 0x21);
display = vga_rgfx(NULL, 0x20);
if (!par->internal_display && !par->external_display) {
par->internal_display = display & 2 || !(display & 3) ? 1 : 0;
par->external_display = display & 1;
printk (KERN_INFO "Autodetected %s display\n",
par->internal_display && par->external_display ? "simultaneous" :
par->internal_display ? "internal" : "external");
}
/* Determine panel width -- used in NeoValidMode. */
w = vga_rgfx(NULL, 0x20);
vga_wgfx(NULL, 0x09, 0x00);
switch ((w & 0x18) >> 3) {
case 0x00:
// 640x480@60
par->NeoPanelWidth = 640;
par->NeoPanelHeight = 480;
memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode));
break;
case 0x01:
par->NeoPanelWidth = 800;
if (par->libretto) {
par->NeoPanelHeight = 480;
memcpy(info->monspecs.modedb, &mode800x480, sizeof(struct fb_videomode));
} else {
// 800x600@60
par->NeoPanelHeight = 600;
memcpy(info->monspecs.modedb, &vesa_modes[8], sizeof(struct fb_videomode));
}
break;
case 0x02:
// 1024x768@60
par->NeoPanelWidth = 1024;
par->NeoPanelHeight = 768;
memcpy(info->monspecs.modedb, &vesa_modes[13], sizeof(struct fb_videomode));
break;
case 0x03:
/* 1280x1024@60 panel support needs to be added */
#ifdef NOT_DONE
par->NeoPanelWidth = 1280;
par->NeoPanelHeight = 1024;
memcpy(info->monspecs.modedb, &vesa_modes[20], sizeof(struct fb_videomode));
break;
#else
printk(KERN_ERR
"neofb: Only 640x480, 800x600/480 and 1024x768 panels are currently supported\n");
return -1;
#endif
default:
// 640x480@60
par->NeoPanelWidth = 640;
par->NeoPanelHeight = 480;
memcpy(info->monspecs.modedb, &vesa_modes[3], sizeof(struct fb_videomode));
break;
}
printk(KERN_INFO "Panel is a %dx%d %s %s display\n",
par->NeoPanelWidth,
par->NeoPanelHeight,
(type & 0x02) ? "color" : "monochrome",
(type & 0x10) ? "TFT" : "dual scan");
return 0;
}
static int neo_init_hw(struct fb_info *info)
{
struct neofb_par *par = info->par;
int videoRam = 896;
int maxClock = 65000;
int CursorMem = 1024;
int CursorOff = 0x100;
DBG("neo_init_hw");
neoUnlock();
#if 0
printk(KERN_DEBUG "--- Neo extended register dump ---\n");
for (int w = 0; w < 0x85; w++)
printk(KERN_DEBUG "CR %p: %p\n", (void *) w,
(void *) vga_rcrt(NULL, w));
for (int w = 0; w < 0xC7; w++)
printk(KERN_DEBUG "GR %p: %p\n", (void *) w,
(void *) vga_rgfx(NULL, w));
#endif
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
videoRam = 896;
maxClock = 65000;
break;
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
case FB_ACCEL_NEOMAGIC_NM2097:
videoRam = 1152;
maxClock = 80000;
break;
case FB_ACCEL_NEOMAGIC_NM2160:
videoRam = 2048;
maxClock = 90000;
break;
case FB_ACCEL_NEOMAGIC_NM2200:
videoRam = 2560;
maxClock = 110000;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
videoRam = 3008;
maxClock = 110000;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
videoRam = 4096;
maxClock = 110000;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
videoRam = 6144;
maxClock = 110000;
break;
}
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
case FB_ACCEL_NEOMAGIC_NM2090:
case FB_ACCEL_NEOMAGIC_NM2093:
CursorMem = 2048;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2097:
case FB_ACCEL_NEOMAGIC_NM2160:
CursorMem = 1024;
CursorOff = 0x100;
break;
case FB_ACCEL_NEOMAGIC_NM2200:
case FB_ACCEL_NEOMAGIC_NM2230:
case FB_ACCEL_NEOMAGIC_NM2360:
case FB_ACCEL_NEOMAGIC_NM2380:
CursorMem = 1024;
CursorOff = 0x1000;
par->neo2200 = (Neo2200 __iomem *) par->mmio_vbase;
break;
}
/*
info->sprite.size = CursorMem;
info->sprite.scan_align = 1;
info->sprite.buf_align = 1;
info->sprite.flags = FB_PIXMAP_IO;
info->sprite.outbuf = neofb_draw_cursor;
*/
par->maxClock = maxClock;
par->cursorOff = CursorOff;
return videoRam * 1024;
}
static struct fb_info *neo_alloc_fb_info(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct fb_info *info;
struct neofb_par *par;
info = framebuffer_alloc(sizeof(struct neofb_par), &dev->dev);
if (!info)
return NULL;
par = info->par;
info->fix.accel = id->driver_data;
par->pci_burst = !nopciburst;
par->lcd_stretch = !nostretch;
par->libretto = libretto;
par->internal_display = internal;
par->external_display = external;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
switch (info->fix.accel) {
case FB_ACCEL_NEOMAGIC_NM2070:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128");
break;
case FB_ACCEL_NEOMAGIC_NM2090:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128V");
break;
case FB_ACCEL_NEOMAGIC_NM2093:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128ZV");
break;
case FB_ACCEL_NEOMAGIC_NM2097:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128ZV+");
break;
case FB_ACCEL_NEOMAGIC_NM2160:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 128XD");
break;
case FB_ACCEL_NEOMAGIC_NM2200:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256AV");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2230:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256AV+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2360:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256ZX");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
case FB_ACCEL_NEOMAGIC_NM2380:
snprintf(info->fix.id, sizeof(info->fix.id),
"MagicGraph 256XL+");
info->flags |= FBINFO_HWACCEL_IMAGEBLIT |
FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
break;
}
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.xpanstep = 0;
info->fix.ypanstep = 4;
info->fix.ywrapstep = 0;
info->fix.accel = id->driver_data;
info->fbops = &neofb_ops;
info->pseudo_palette = par->palette;
return info;
}
static void neo_free_fb_info(struct fb_info *info)
{
if (info) {
/*
* Free the colourmap
*/
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
}
/* --------------------------------------------------------------------- */
static int neofb_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct fb_info *info;
u_int h_sync, v_sync;
int video_len, err;
DBG("neofb_probe");
err = pci_enable_device(dev);
if (err)
return err;
err = -ENOMEM;
info = neo_alloc_fb_info(dev, id);
if (!info)
return err;
err = neo_map_mmio(info, dev);
if (err)
goto err_map_mmio;
err = neo_scan_monitor(info);
if (err)
goto err_scan_monitor;
video_len = neo_init_hw(info);
if (video_len < 0) {
err = video_len;
goto err_init_hw;
}
err = neo_map_video(info, dev, video_len);
if (err)
goto err_init_hw;
if (!fb_find_mode(&info->var, info, mode_option, NULL, 0,
info->monspecs.modedb, 16)) {
printk(KERN_ERR "neofb: Unable to find usable video mode.\n");
err = -EINVAL;
goto err_map_video;
}
/*
* Calculate the hsync and vsync frequencies. Note that
* we split the 1e12 constant up so that we can preserve
* the precision and fit the results into 32-bit registers.
* (1953125000 * 512 = 1e12)
*/
h_sync = 1953125000 / info->var.pixclock;
h_sync =
h_sync * 512 / (info->var.xres + info->var.left_margin +
info->var.right_margin + info->var.hsync_len);
v_sync =
h_sync / (info->var.yres + info->var.upper_margin +
info->var.lower_margin + info->var.vsync_len);
printk(KERN_INFO "neofb v" NEOFB_VERSION
": %dkB VRAM, using %dx%d, %d.%03dkHz, %dHz\n",
info->fix.smem_len >> 10, info->var.xres,
info->var.yres, h_sync / 1000, h_sync % 1000, v_sync);
err = fb_alloc_cmap(&info->cmap, 256, 0);
if (err < 0)
goto err_map_video;
err = register_framebuffer(info);
if (err < 0)
goto err_reg_fb;
fb_info(info, "%s frame buffer device\n", info->fix.id);
/*
* Our driver data
*/
pci_set_drvdata(dev, info);
return 0;
err_reg_fb:
fb_dealloc_cmap(&info->cmap);
err_map_video:
neo_unmap_video(info);
err_init_hw:
fb_destroy_modedb(info->monspecs.modedb);
err_scan_monitor:
neo_unmap_mmio(info);
err_map_mmio:
neo_free_fb_info(info);
return err;
}
static void neofb_remove(struct pci_dev *dev)
{
struct fb_info *info = pci_get_drvdata(dev);
DBG("neofb_remove");
if (info) {
/*
* If unregister_framebuffer fails, then
* we will be leaving hooks that could cause
* oopsen laying around.
*/
if (unregister_framebuffer(info))
printk(KERN_WARNING
"neofb: danger danger! Oopsen imminent!\n");
neo_unmap_video(info);
fb_destroy_modedb(info->monspecs.modedb);
neo_unmap_mmio(info);
neo_free_fb_info(info);
}
}
static struct pci_device_id neofb_devices[] = {
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2070,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2070},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2090,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2090},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2093,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2093},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2097,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2097},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2160,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2160},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2200,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2200},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2230,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2230},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2360,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2360},
{PCI_VENDOR_ID_NEOMAGIC, PCI_CHIP_NM2380,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, FB_ACCEL_NEOMAGIC_NM2380},
{0, 0, 0, 0, 0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, neofb_devices);
static struct pci_driver neofb_driver = {
.name = "neofb",
.id_table = neofb_devices,
.probe = neofb_probe,
.remove = neofb_remove,
};
/* ************************* init in-kernel code ************************** */
#ifndef MODULE
static int __init neofb_setup(char *options)
{
char *this_opt;
DBG("neofb_setup");
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if (!strncmp(this_opt, "internal", 8))
internal = 1;
else if (!strncmp(this_opt, "external", 8))
external = 1;
else if (!strncmp(this_opt, "nostretch", 9))
nostretch = 1;
else if (!strncmp(this_opt, "nopciburst", 10))
nopciburst = 1;
else if (!strncmp(this_opt, "libretto", 8))
libretto = 1;
else
mode_option = this_opt;
}
return 0;
}
#endif /* MODULE */
static int __init neofb_init(void)
{
#ifndef MODULE
char *option = NULL;
if (fb_get_options("neofb", &option))
return -ENODEV;
neofb_setup(option);
#endif
return pci_register_driver(&neofb_driver);
}
module_init(neofb_init);
#ifdef MODULE
static void __exit neofb_exit(void)
{
pci_unregister_driver(&neofb_driver);
}
module_exit(neofb_exit);
#endif /* MODULE */
|
gpl-2.0
|
luisbg/btrfs-next
|
arch/arm/mach-shmobile/board-lager.c
|
2042
|
1401
|
/*
* Lager board support
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <mach/common.h>
#include <mach/r8a7790.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
static void __init lager_add_standard_devices(void)
{
r8a7790_clock_init();
r8a7790_add_standard_devices();
}
static const char *lager_boards_compat_dt[] __initdata = {
"renesas,lager",
NULL,
};
DT_MACHINE_START(LAGER_DT, "lager")
.init_irq = irqchip_init,
.init_time = r8a7790_timer_init,
.init_machine = lager_add_standard_devices,
.dt_compat = lager_boards_compat_dt,
MACHINE_END
|
gpl-2.0
|
TheNameIsNigel/android_kernel_huawei_msm8928
|
drivers/scsi/libsas/sas_scsi_host.c
|
4858
|
27697
|
/*
* Serial Attached SCSI (SAS) class SCSI Host glue.
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
*
* This file is licensed under GPLv2.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include "sas_internal.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
#include "../scsi_sas_internal.h"
#include "../scsi_transport_api.h"
#include "../scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/libata.h>
/* record final status and free the task */
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
int hs = 0, stat = 0;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
hs = DID_NO_CONNECT;
} else { /* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
case SAS_DEV_NO_RESPONSE:
case SAS_INTERRUPTED:
case SAS_PHY_DOWN:
case SAS_NAK_R_ERR:
case SAS_OPEN_TO:
hs = DID_NO_CONNECT;
break;
case SAS_DATA_UNDERRUN:
scsi_set_resid(sc, ts->residual);
if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
hs = DID_ERROR;
break;
case SAS_DATA_OVERRUN:
hs = DID_ERROR;
break;
case SAS_QUEUE_FULL:
hs = DID_SOFT_ERROR; /* retry */
break;
case SAS_DEVICE_UNKNOWN:
hs = DID_BAD_TARGET;
break;
case SAS_SG_ERR:
hs = DID_PARITY;
break;
case SAS_OPEN_REJECT:
if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
hs = DID_SOFT_ERROR; /* retry */
else
hs = DID_ERROR;
break;
case SAS_PROTO_RESPONSE:
SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
"task; please report this\n",
task->dev->port->ha->sas_ha_name);
break;
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
case SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
stat = SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
break;
}
}
sc->result = (hs << 16) | stat;
ASSIGN_SAS_TASK(sc, NULL);
list_del_init(&task->list);
sas_free_task(task);
}
static void sas_scsi_task_done(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
struct domain_device *dev = task->dev;
struct sas_ha_struct *ha = dev->port->ha;
unsigned long flags;
spin_lock_irqsave(&dev->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state))
task = NULL;
else
ASSIGN_SAS_TASK(sc, NULL);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (unlikely(!task)) {
/* task will be completed by the error handler */
SAS_DPRINTK("task done but aborted\n");
return;
}
if (unlikely(!sc)) {
SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
list_del_init(&task->list);
sas_free_task(task);
return;
}
sas_end_task(sc, task);
sc->scsi_done(sc);
}
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
struct domain_device *dev,
gfp_t gfp_flags)
{
struct sas_task *task = sas_alloc_task(gfp_flags);
struct scsi_lun lun;
if (!task)
return NULL;
task->uldd_task = cmd;
ASSIGN_SAS_TASK(cmd, task);
task->dev = dev;
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
task->ssp_task.retry_count = 1;
int_to_scsilun(cmd->device->lun, &lun);
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
task->scatter = scsi_sglist(cmd);
task->num_scatter = scsi_sg_count(cmd);
task->total_xfer_len = scsi_bufflen(cmd);
task->data_dir = cmd->sc_data_direction;
task->task_done = sas_scsi_task_done;
return task;
}
int sas_queue_up(struct sas_task *task)
{
struct sas_ha_struct *sas_ha = task->dev->port->ha;
struct scsi_core *core = &sas_ha->core;
unsigned long flags;
LIST_HEAD(list);
spin_lock_irqsave(&core->task_queue_lock, flags);
if (sas_ha->lldd_queue_size < core->task_queue_size + 1) {
spin_unlock_irqrestore(&core->task_queue_lock, flags);
return -SAS_QUEUE_FULL;
}
list_add_tail(&task->list, &core->task_queue);
core->task_queue_size += 1;
spin_unlock_irqrestore(&core->task_queue_lock, flags);
wake_up_process(core->queue_thread);
return 0;
}
int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct sas_internal *i = to_sas_internal(host->transportt);
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_ha_struct *sas_ha = dev->port->ha;
struct sas_task *task;
int res = 0;
/* If the device fell off, no sense in issuing commands */
if (test_bit(SAS_DEV_GONE, &dev->state)) {
cmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
if (dev_is_sata(dev)) {
spin_lock_irq(dev->sata_dev.ap->lock);
res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
spin_unlock_irq(dev->sata_dev.ap->lock);
return res;
}
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
return SCSI_MLQUEUE_HOST_BUSY;
/* Queue up, Direct Mode or Task Collector Mode. */
if (sas_ha->lldd_max_execute_num < 2)
res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC);
else
res = sas_queue_up(task);
if (res)
goto out_free_task;
return 0;
out_free_task:
SAS_DPRINTK("lldd_execute_task returned: %d\n", res);
ASSIGN_SAS_TASK(cmd, NULL);
sas_free_task(task);
if (res == -SAS_QUEUE_FULL)
cmd->result = DID_SOFT_ERROR << 16; /* retry */
else
cmd->result = DID_ERROR << 16;
out_done:
cmd->scsi_done(cmd);
return 0;
}
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
* of the task, so we should be guaranteed not to be racing with
* any completions from the LLD. Task is freed after this.
*/
sas_end_task(cmd, task);
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list.
*/
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_ha_struct *ha = dev->port->ha;
struct sas_task *task = TO_SAS_TASK(cmd);
if (!dev_is_sata(dev)) {
sas_eh_finish_cmd(cmd);
return;
}
/* report the timeout to libata */
sas_end_task(cmd, task);
list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
}
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
sas_eh_defer_cmd(cmd);
}
}
static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
struct domain_device *dev)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *x = cmd_to_domain_dev(cmd);
if (x == dev)
sas_eh_finish_cmd(cmd);
}
}
static void sas_scsi_clear_queue_port(struct list_head *error_q,
struct asd_sas_port *port)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct asd_sas_port *x = dev->port;
if (x == port)
sas_eh_finish_cmd(cmd);
}
}
enum task_disposition {
TASK_IS_DONE,
TASK_IS_ABORTED,
TASK_IS_AT_LU,
TASK_IS_NOT_AT_HA,
TASK_IS_NOT_AT_LU,
TASK_ABORT_FAILED,
};
static enum task_disposition sas_scsi_find_task(struct sas_task *task)
{
struct sas_ha_struct *ha = task->dev->port->ha;
unsigned long flags;
int i, res;
struct sas_internal *si =
to_sas_internal(task->dev->port->ha->core.shost->transportt);
if (ha->lldd_max_execute_num > 1) {
struct scsi_core *core = &ha->core;
struct sas_task *t, *n;
mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
list_for_each_entry_safe(t, n, &core->task_queue, list)
if (task == t) {
list_del_init(&t->list);
break;
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
mutex_unlock(&core->task_queue_flush);
if (task == t)
return TASK_IS_NOT_AT_HA;
}
for (i = 0; i < 5; i++) {
SAS_DPRINTK("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
return TASK_IS_DONE;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
SAS_DPRINTK("%s: querying task 0x%p\n",
__func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
SAS_DPRINTK("%s: task 0x%p at LU\n",
__func__, task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
SAS_DPRINTK("%s: task 0x%p not at LU\n",
__func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
SAS_DPRINTK("%s: task 0x%p failed to abort\n",
__func__, task);
return TASK_ABORT_FAILED;
}
}
}
return res;
}
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
{
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
if (i->dft->lldd_abort_task_set)
res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_clear_task_set)
res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
}
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_lu_reset)
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
}
return res;
}
static int sas_recover_I_T(struct domain_device *dev)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
SAS_ADDR(dev->sas_addr));
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
return res;
}
/* take a reference on the last known good phy for this device */
struct sas_phy *sas_get_local_phy(struct domain_device *dev)
{
struct sas_ha_struct *ha = dev->port->ha;
struct sas_phy *phy;
unsigned long flags;
/* a published domain device always has a valid phy, it may be
* stale, but it is never NULL
*/
BUG_ON(!dev->phy);
spin_lock_irqsave(&ha->phy_port_lock, flags);
phy = dev->phy;
get_device(&phy->dev);
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return phy;
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
struct scsi_lun lun;
int res;
int_to_scsilun(cmd->device->lun, &lun);
if (!i->dft->lldd_lu_reset)
return FAILED;
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
/* Attempt to send a phy (bus) reset */
int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_phy *phy = sas_get_local_phy(dev);
int res;
res = sas_phy_reset(phy, 1);
if (res)
SAS_DPRINTK("Bus reset of %s failed 0x%x\n",
kobject_name(&phy->dev.kobj),
res);
sas_put_local_phy(phy);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
/* Try to reset a device */
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
{
int res;
struct Scsi_Host *shost = cmd->device->host;
if (!shost->hostt->eh_device_reset_handler)
goto try_bus_reset;
res = shost->hostt->eh_device_reset_handler(cmd);
if (res == SUCCESS)
return res;
try_bus_reset:
if (shost->hostt->eh_bus_reset_handler)
return shost->hostt->eh_bus_reset_handler(cmd);
return FAILED;
}
static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
enum task_disposition res = TASK_IS_DONE;
int tmf_resp, need_reset;
struct sas_internal *i = to_sas_internal(shost->transportt);
unsigned long flags;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
LIST_HEAD(done);
/* clean out any commands that won the completion vs eh race */
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task;
spin_lock_irqsave(&dev->done_lock, flags);
/* by this point the lldd has either observed
* SAS_HA_FROZEN and is leaving the task alone, or has
* won the race with eh and decided to complete it
*/
task = TO_SAS_TASK(cmd);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (!task)
list_move_tail(&cmd->eh_entry, &done);
}
Again:
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry);
spin_lock_irqsave(&task->task_state_lock, flags);
need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (need_reset) {
SAS_DPRINTK("%s: task 0x%p requests reset\n",
__func__, task);
goto reset;
}
SAS_DPRINTK("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
cmd->eh_eflags = 0;
switch (res) {
case TASK_IS_NOT_AT_HA:
SAS_DPRINTK("%s: task 0x%p is not at ha: %s\n",
__func__, task,
cmd->retries ? "retry" : "aborted");
if (cmd->retries)
cmd->retries--;
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
sas_eh_defer_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("dev %016llx LU %x is "
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_defer_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
/* fallthrough */
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
task);
tmf_resp = sas_recover_I_T(task->dev);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
tmf_resp == -ENODEV) {
struct domain_device *dev = task->dev;
SAS_DPRINTK("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr));
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_I_T(work_q, dev);
goto Again;
}
/* Hammer time :-) */
try_to_reset_cmd_device(cmd);
if (i->dft->lldd_clear_nexus_port) {
struct asd_sas_port *port = task->dev->port;
SAS_DPRINTK("clearing nexus for port:%d\n",
port->id);
res = i->dft->lldd_clear_nexus_port(port);
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("clear nexus port:%d "
"succeeded\n", port->id);
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_port(work_q,
port);
goto Again;
}
}
if (i->dft->lldd_clear_nexus_ha) {
SAS_DPRINTK("clear nexus ha\n");
res = i->dft->lldd_clear_nexus_ha(ha);
if (res == TMF_RESP_FUNC_COMPLETE) {
SAS_DPRINTK("clear nexus ha "
"succeeded\n");
sas_eh_finish_cmd(cmd);
goto clear_q;
}
}
/* If we are here -- this means that no amount
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
SAS_DPRINTK("error from device %llx, LUN %x "
"couldn't be recovered in any way\n",
SAS_ADDR(task->dev->sas_addr),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
goto clear_q;
}
}
out:
list_splice_tail(&done, work_q);
list_splice_tail_init(&ha->eh_ata_q, work_q);
return;
clear_q:
SAS_DPRINTK("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
goto out;
}
void sas_scsi_recover_host(struct Scsi_Host *shost)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
unsigned long flags;
LIST_HEAD(eh_work_q);
spin_lock_irqsave(shost->host_lock, flags);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
shost->host_eh_scheduled = 0;
spin_unlock_irqrestore(shost->host_lock, flags);
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
* SAS_HA_FROZEN gives eh dominion over all sas_task completion.
*/
set_bit(SAS_HA_FROZEN, &ha->state);
sas_eh_handle_sas_errors(shost, &eh_work_q);
clear_bit(SAS_HA_FROZEN, &ha->state);
if (list_empty(&eh_work_q))
goto out;
/*
* Now deal with SCSI commands that completed ok but have a an error
* code (and hopefully sense data) attached. This is roughly what
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
sas_ata_eh(shost, &eh_work_q, &ha->eh_done_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
if (ha->lldd_max_execute_num > 1)
wake_up_process(ha->core.queue_thread);
/* now link into libata eh --- if we have any ata devices */
sas_ata_strategy_handler(shost);
scsi_eh_flush_done_q(&ha->eh_done_q);
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d\n",
__func__, shost->host_busy, shost->host_failed);
}
enum blk_eh_timer_return sas_scsi_timed_out(struct scsi_cmnd *cmd)
{
scmd_printk(KERN_DEBUG, cmd, "command %p timed out\n", cmd);
return BLK_EH_NOT_HANDLED;
}
int sas_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
return -EINVAL;
}
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct domain_device *found_dev = NULL;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->phy_port_lock, flags);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
struct domain_device *dev;
spin_lock(&port->dev_list_lock);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (rphy == dev->rphy) {
found_dev = dev;
spin_unlock(&port->dev_list_lock);
goto found;
}
}
spin_unlock(&port->dev_list_lock);
}
found:
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return found_dev;
}
int sas_target_alloc(struct scsi_target *starget)
{
struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
if (!found_dev)
return -ENODEV;
kref_get(&found_dev->kref);
starget->hostdata = found_dev;
return 0;
}
#define SAS_DEF_QD 256
int sas_slave_configure(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
struct sas_ha_struct *sas_ha;
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
return 0;
}
sas_ha = dev->port->ha;
sas_read_port_mode_page(scsi_dev);
if (scsi_dev->tagged_supported) {
scsi_set_tag_type(scsi_dev, MSG_SIMPLE_TAG);
scsi_activate_tcq(scsi_dev, SAS_DEF_QD);
} else {
SAS_DPRINTK("device %llx, LUN %x doesn't support "
"TCQ\n", SAS_ADDR(dev->sas_addr),
scsi_dev->lun);
scsi_dev->tagged_supported = 0;
scsi_set_tag_type(scsi_dev, 0);
scsi_deactivate_tcq(scsi_dev, 1);
}
scsi_dev->allow_restart = 1;
return 0;
}
int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth,
reason);
switch (reason) {
case SCSI_QDEPTH_DEFAULT:
case SCSI_QDEPTH_RAMP_UP:
if (!sdev->tagged_supported)
depth = 1;
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
break;
case SCSI_QDEPTH_QFULL:
scsi_track_queue_full(sdev, depth);
break;
default:
return -EOPNOTSUPP;
}
return depth;
}
int sas_change_queue_type(struct scsi_device *scsi_dev, int qt)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
if (dev_is_sata(dev))
return -EINVAL;
if (!scsi_dev->tagged_supported)
return 0;
scsi_deactivate_tcq(scsi_dev, 1);
scsi_set_tag_type(scsi_dev, qt);
scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
return qt;
}
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
sector_t capacity, int *hsc)
{
hsc[0] = 255;
hsc[1] = 63;
sector_div(capacity, 255*63);
hsc[2] = capacity;
return 0;
}
/* ---------- Task Collector Thread implementation ---------- */
static void sas_queue(struct sas_ha_struct *sas_ha)
{
struct scsi_core *core = &sas_ha->core;
unsigned long flags;
LIST_HEAD(q);
int can_queue;
int res;
struct sas_internal *i = to_sas_internal(core->shost->transportt);
mutex_lock(&core->task_queue_flush);
spin_lock_irqsave(&core->task_queue_lock, flags);
while (!kthread_should_stop() &&
!list_empty(&core->task_queue) &&
!test_bit(SAS_HA_FROZEN, &sas_ha->state)) {
can_queue = sas_ha->lldd_queue_size - core->task_queue_size;
if (can_queue >= 0) {
can_queue = core->task_queue_size;
list_splice_init(&core->task_queue, &q);
} else {
struct list_head *a, *n;
can_queue = sas_ha->lldd_queue_size;
list_for_each_safe(a, n, &core->task_queue) {
list_move_tail(a, &q);
if (--can_queue == 0)
break;
}
can_queue = sas_ha->lldd_queue_size;
}
core->task_queue_size -= can_queue;
spin_unlock_irqrestore(&core->task_queue_lock, flags);
{
struct sas_task *task = list_entry(q.next,
struct sas_task,
list);
list_del_init(&q);
res = i->dft->lldd_execute_task(task, can_queue,
GFP_KERNEL);
if (unlikely(res))
__list_add(&q, task->list.prev, &task->list);
}
spin_lock_irqsave(&core->task_queue_lock, flags);
if (res) {
list_splice_init(&q, &core->task_queue); /*at head*/
core->task_queue_size += can_queue;
}
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
mutex_unlock(&core->task_queue_flush);
}
/**
* sas_queue_thread -- The Task Collector thread
* @_sas_ha: pointer to struct sas_ha
*/
static int sas_queue_thread(void *_sas_ha)
{
struct sas_ha_struct *sas_ha = _sas_ha;
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
sas_queue(sas_ha);
if (kthread_should_stop())
break;
}
return 0;
}
int sas_init_queue(struct sas_ha_struct *sas_ha)
{
struct scsi_core *core = &sas_ha->core;
spin_lock_init(&core->task_queue_lock);
mutex_init(&core->task_queue_flush);
core->task_queue_size = 0;
INIT_LIST_HEAD(&core->task_queue);
core->queue_thread = kthread_run(sas_queue_thread, sas_ha,
"sas_queue_%d", core->shost->host_no);
if (IS_ERR(core->queue_thread))
return PTR_ERR(core->queue_thread);
return 0;
}
void sas_shutdown_queue(struct sas_ha_struct *sas_ha)
{
unsigned long flags;
struct scsi_core *core = &sas_ha->core;
struct sas_task *task, *n;
kthread_stop(core->queue_thread);
if (!list_empty(&core->task_queue))
SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
SAS_ADDR(sas_ha->sas_addr));
spin_lock_irqsave(&core->task_queue_lock, flags);
list_for_each_entry_safe(task, n, &core->task_queue, list) {
struct scsi_cmnd *cmd = task->uldd_task;
list_del_init(&task->list);
ASSIGN_SAS_TASK(cmd, NULL);
sas_free_task(task);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
}
spin_unlock_irqrestore(&core->task_queue_lock, flags);
}
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
*/
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
/* Escape for libsas internal commands */
if (!sc) {
if (!del_timer(&task->timer))
return;
task->timer.function(task->timer.data);
return;
}
if (dev_is_sata(task->dev)) {
sas_ata_task_abort(task);
} else {
struct request_queue *q = sc->device->request_queue;
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
blk_abort_request(sc->request);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_schedule_eh(sc->device->host);
}
}
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = starget->hostdata;
if (!found_dev)
return;
starget->hostdata = NULL;
sas_put_device(found_dev);
}
static void sas_parse_addr(u8 *sas_addr, const char *p)
{
int i;
for (i = 0; i < SAS_ADDR_SIZE; i++) {
u8 h, l;
if (!*p)
break;
h = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
p++;
l = isdigit(*p) ? *p-'0' : toupper(*p)-'A'+10;
p++;
sas_addr[i] = (h<<4) | l;
}
}
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
{
int res;
const struct firmware *fw;
res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
if (res)
return res;
if (fw->size < SAS_STRING_ADDR_SIZE) {
res = -ENODEV;
goto out;
}
sas_parse_addr(addr, fw->data);
out:
release_firmware(fw);
return res;
}
EXPORT_SYMBOL_GPL(sas_request_addr);
EXPORT_SYMBOL_GPL(sas_queuecommand);
EXPORT_SYMBOL_GPL(sas_target_alloc);
EXPORT_SYMBOL_GPL(sas_slave_configure);
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
EXPORT_SYMBOL_GPL(sas_change_queue_type);
EXPORT_SYMBOL_GPL(sas_bios_param);
EXPORT_SYMBOL_GPL(sas_task_abort);
EXPORT_SYMBOL_GPL(sas_phy_reset);
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
EXPORT_SYMBOL_GPL(sas_eh_bus_reset_handler);
EXPORT_SYMBOL_GPL(sas_target_destroy);
EXPORT_SYMBOL_GPL(sas_ioctl);
|
gpl-2.0
|
Pillar1989/BBG_linux-3.8
|
drivers/crypto/ux500/cryp/cryp_irq.c
|
5114
|
1273
|
/**
* Copyright (C) ST-Ericsson SA 2010
* Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson.
* Author: Jonas Linde <jonas.linde@stericsson.com> for ST-Ericsson.
* Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson.
* Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
* Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
* License terms: GNU General Public License (GPL) version 2.
*/
#include <linux/kernel.h>
#include <linux/bitmap.h>
#include <linux/device.h>
#include "cryp.h"
#include "cryp_p.h"
#include "cryp_irq.h"
#include "cryp_irqp.h"
void cryp_enable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
{
u32 i;
dev_dbg(device_data->dev, "[%s]", __func__);
i = readl_relaxed(&device_data->base->imsc);
i = i | irq_src;
writel_relaxed(i, &device_data->base->imsc);
}
void cryp_disable_irq_src(struct cryp_device_data *device_data, u32 irq_src)
{
u32 i;
dev_dbg(device_data->dev, "[%s]", __func__);
i = readl_relaxed(&device_data->base->imsc);
i = i & ~irq_src;
writel_relaxed(i, &device_data->base->imsc);
}
bool cryp_pending_irq_src(struct cryp_device_data *device_data, u32 irq_src)
{
return (readl_relaxed(&device_data->base->mis) & irq_src) > 0;
}
|
gpl-2.0
|
spica234/HP-Krnl-2.6.32.9
|
mm/slab.c
|
251
|
119563
|
/*
* linux/mm/slab.c
* Written by Mark Hemment, 1996/97.
* (markhe@nextd.demon.co.uk)
*
* kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
*
* Major cleanup, different bufctl logic, per-cpu arrays
* (c) 2000 Manfred Spraul
*
* Cleanup, make the head arrays unconditional, preparation for NUMA
* (c) 2002 Manfred Spraul
*
* An implementation of the Slab Allocator as described in outline in;
* UNIX Internals: The New Frontiers by Uresh Vahalia
* Pub: Prentice Hall ISBN 0-13-101908-2
* or with a little more detail in;
* The Slab Allocator: An Object-Caching Kernel Memory Allocator
* Jeff Bonwick (Sun Microsystems).
* Presented at: USENIX Summer 1994 Technical Conference
*
* The memory is organized in caches, one cache for each object type.
* (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
* Each cache consists out of many slabs (they are small (usually one
* page long) and always contiguous), and each slab contains multiple
* initialized objects.
*
* This means, that your constructor is used only for newly allocated
* slabs and you must pass objects with the same initializations to
* kmem_cache_free.
*
* Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
* normal). If you need a special memory type, then must create a new
* cache for that memory type.
*
* In order to reduce fragmentation, the slabs are sorted in 3 groups:
* full slabs with 0 free objects
* partial slabs
* empty slabs with no allocated objects
*
* If partial slabs exist, then new allocations come from these slabs,
* otherwise from empty slabs or new slabs are allocated.
*
* kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
* during kmem_cache_destroy(). The caller must prevent concurrent allocs.
*
* Each cache has a short per-cpu head array, most allocs
* and frees go into that array, and if that array overflows, then 1/2
* of the entries in the array are given back into the global cache.
* The head array is strictly LIFO and should improve the cache hit rates.
* On SMP, it additionally reduces the spinlock operations.
*
* The c_cpuarray may not be read with enabled local interrupts -
* it's changed with a smp_call_function().
*
* SMP synchronization:
* constructors and destructors are called without any locking.
* Several members in struct kmem_cache and struct slab never change, they
* are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking,
* and local interrupts are disabled so slab code is preempt-safe.
* The non-constant members are protected with a per-cache irq spinlock.
*
* Many thanks to Mark Hemment, who wrote another per-cpu slab patch
* in 2000 - many ideas in the current implementation are derived from
* his patch.
*
* Further notes from the original documentation:
*
* 11 April '97. Started multi-threading - markhe
* The global cache-chain is protected by the mutex 'cache_chain_mutex'.
* The sem is only needed when accessing/extending the cache-chain, which
* can never happen inside an interrupt (kmem_cache_create(),
* kmem_cache_shrink() and kmem_cache_reap()).
*
* At present, each engine can be growing a cache. This should be blocked.
*
* 15 March 2005. NUMA slab allocator.
* Shai Fultheim <shai@scalex86.org>.
* Shobhit Dayal <shobhit@calsoftinc.com>
* Alok N Kataria <alokk@calsoftinc.com>
* Christoph Lameter <christoph@lameter.com>
*
* Modified the slab allocator to be node aware on NUMA systems.
* Each node has its own list of partial, free and full slabs.
* All object allocations for a node occur from node specific slab lists.
*/
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/cpuset.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/notifier.h>
#include <linux/kallsyms.h>
#include <linux/cpu.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/kmemtrace.h>
#include <linux/rcupdate.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/nodemask.h>
#include <linux/kmemleak.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
#include <linux/fault-inject.h>
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
#include <linux/kmemcheck.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
*
* STATS - 1 to collect stats for /proc/slabinfo.
* 0 for faster, smaller code (especially in the critical paths).
*
* FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
*/
#ifdef CONFIG_DEBUG_SLAB
#define DEBUG 1
#define STATS 1
#define FORCED_DEBUG 1
#else
#define DEBUG 0
#define STATS 0
#define FORCED_DEBUG 0
#endif
/* Shouldn't this be in a header file somewhere? */
#define BYTES_PER_WORD sizeof(void *)
#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
#ifndef ARCH_KMALLOC_MINALIGN
/*
* Enforce a minimum alignment for the kmalloc caches.
* Usually, the kmalloc caches are cache_line_size() aligned, except when
* DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
* ARCH_KMALLOC_MINALIGN allows that.
* Note that increasing this value may disable some debug features.
*/
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
#endif
#ifndef ARCH_SLAB_MINALIGN
/*
* Enforce a minimum alignment for all caches.
* Intended for archs that get misalignment faults even for BYTES_PER_WORD
* aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
* If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
* some debug features.
*/
#define ARCH_SLAB_MINALIGN 0
#endif
#ifndef ARCH_KMALLOC_FLAGS
#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
#endif
/* Legal flag mask for kmem_cache_create(). */
#if DEBUG
# define CREATE_MASK (SLAB_RED_ZONE | \
SLAB_POISON | SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
#endif
/*
* kmem_bufctl_t:
*
* Bufctl's are used for linking objs within a slab
* linked offsets.
*
* This implementation relies on "struct page" for locating the cache &
* slab an object belongs to.
* This allows the bufctl structure to be small (one int), but limits
* the number of objects a slab (not a cache) can contain when off-slab
* bufctls are used. The limit is the size of the largest general cache
* that does not use off-slab slabs.
* For 32bit archs with 4 kB pages, is this 56.
* This is not serious, as it is only for large objects, when it is unwise
* to have too many per slab.
* Note: This limit can be raised by introducing a general cache whose size
* is less than 512 (PAGE_SIZE<<3), but greater than 256.
*/
typedef unsigned int kmem_bufctl_t;
#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
/*
* struct slab
*
* Manages the objs in a slab. Placed either at the beginning of mem allocated
* for a slab, or allocated from an general cache.
* Slabs are chained into three list: fully used, partial, fully free slabs.
*/
struct slab {
struct list_head list;
unsigned long colouroff;
void *s_mem; /* including colour offset */
unsigned int inuse; /* num of objs active in slab */
kmem_bufctl_t free;
unsigned short nodeid;
};
/*
* struct slab_rcu
*
* slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
* arrange for kmem_freepages to be called via RCU. This is useful if
* we need to approach a kernel structure obliquely, from its address
* obtained without the usual locking. We can lock the structure to
* stabilize it and check it's still at the given address, only if we
* can be sure that the memory has not been meanwhile reused for some
* other kind of object (which our subsystem's lock might corrupt).
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
*
* We assume struct slab_rcu can overlay struct slab when destroying.
*/
struct slab_rcu {
struct rcu_head head;
struct kmem_cache *cachep;
void *addr;
};
/*
* struct array_cache
*
* Purpose:
* - LIFO ordering, to hand out cache-warm objects from _alloc
* - reduce the number of linked list operations
* - reduce spinlock operations
*
* The limit is stored in the per-cpu structure to reduce the data cache
* footprint.
*
*/
struct array_cache {
unsigned int avail;
unsigned int limit;
unsigned int batchcount;
unsigned int touched;
spinlock_t lock;
void *entry[]; /*
* Must have this definition in here for the proper
* alignment of array_cache. Also simplifies accessing
* the entries.
*/
};
/*
* bootstrap: The caches do not work without cpuarrays anymore, but the
* cpuarrays are allocated from the generic caches...
*/
#define BOOT_CPUCACHE_ENTRIES 1
struct arraycache_init {
struct array_cache cache;
void *entries[BOOT_CPUCACHE_ENTRIES];
};
/*
* The slab lists for all objects.
*/
struct kmem_list3 {
struct list_head slabs_partial; /* partial list first, better asm code */
struct list_head slabs_full;
struct list_head slabs_free;
unsigned long free_objects;
unsigned int free_limit;
unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
unsigned long next_reap; /* updated without locking */
int free_touched; /* updated without locking */
};
/*
* Need this for bootstrapping a per node allocator.
*/
#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
#define CACHE_CACHE 0
#define SIZE_AC MAX_NUMNODES
#define SIZE_L3 (2 * MAX_NUMNODES)
static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree);
static void free_block(struct kmem_cache *cachep, void **objpp, int len,
int node);
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
static void cache_reap(struct work_struct *unused);
/*
* This function must be completely optimized away if a constant is passed to
* it. Mostly the same as what is in linux/slab.h except it returns an index.
*/
static __always_inline int index_of(const size_t size)
{
extern void __bad_size(void);
if (__builtin_constant_p(size)) {
int i = 0;
#define CACHE(x) \
if (size <=x) \
return i; \
else \
i++;
#include <linux/kmalloc_sizes.h>
#undef CACHE
__bad_size();
} else
__bad_size();
return 0;
}
static int slab_early_init = 1;
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
static void kmem_list3_init(struct kmem_list3 *parent)
{
INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial);
INIT_LIST_HEAD(&parent->slabs_free);
parent->shared = NULL;
parent->alien = NULL;
parent->colour_next = 0;
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
}
#define MAKE_LIST(cachep, listp, slab, nodeid) \
do { \
INIT_LIST_HEAD(listp); \
list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
} while (0)
#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
do { \
MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
} while (0)
#define CFLGS_OFF_SLAB (0x80000000UL)
#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
#define BATCHREFILL_LIMIT 16
/*
* Optimization question: fewer reaps means less probability for unnessary
* cpucache drain/refill cycles.
*
* OTOH the cpuarrays can contain lots of objects,
* which could lock up otherwise freeable slabs.
*/
#define REAPTIMEOUT_CPUC (2*HZ)
#define REAPTIMEOUT_LIST3 (4*HZ)
#if STATS
#define STATS_INC_ACTIVE(x) ((x)->num_active++)
#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
#define STATS_INC_GROWN(x) ((x)->grown++)
#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
#define STATS_SET_HIGH(x) \
do { \
if ((x)->num_active > (x)->high_mark) \
(x)->high_mark = (x)->num_active; \
} while (0)
#define STATS_INC_ERR(x) ((x)->errors++)
#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
#define STATS_SET_FREEABLE(x, i) \
do { \
if ((x)->max_freeable < i) \
(x)->max_freeable = i; \
} while (0)
#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
#else
#define STATS_INC_ACTIVE(x) do { } while (0)
#define STATS_DEC_ACTIVE(x) do { } while (0)
#define STATS_INC_ALLOCED(x) do { } while (0)
#define STATS_INC_GROWN(x) do { } while (0)
#define STATS_ADD_REAPED(x,y) do { } while (0)
#define STATS_SET_HIGH(x) do { } while (0)
#define STATS_INC_ERR(x) do { } while (0)
#define STATS_INC_NODEALLOCS(x) do { } while (0)
#define STATS_INC_NODEFREES(x) do { } while (0)
#define STATS_INC_ACOVERFLOW(x) do { } while (0)
#define STATS_SET_FREEABLE(x, i) do { } while (0)
#define STATS_INC_ALLOCHIT(x) do { } while (0)
#define STATS_INC_ALLOCMISS(x) do { } while (0)
#define STATS_INC_FREEHIT(x) do { } while (0)
#define STATS_INC_FREEMISS(x) do { } while (0)
#endif
#if DEBUG
/*
* memory layout of objects:
* 0 : objp
* 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
* allocation. Catches writes behind the end of the allocation.
* cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
* redzone word.
* cachep->obj_offset: The real object.
* cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
* cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
* [BYTES_PER_WORD long]
*/
static int obj_offset(struct kmem_cache *cachep)
{
return cachep->obj_offset;
}
static int obj_size(struct kmem_cache *cachep)
{
return cachep->obj_size;
}
static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
return (unsigned long long*) (objp + obj_offset(cachep) -
sizeof(unsigned long long));
}
static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
return (unsigned long long *)(objp + cachep->buffer_size -
sizeof(unsigned long long) -
REDZONE_ALIGN);
return (unsigned long long *) (objp + cachep->buffer_size -
sizeof(unsigned long long));
}
static void **dbg_userword(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
#else
#define obj_offset(x) 0
#define obj_size(cachep) (cachep->buffer_size)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
#endif
#ifdef CONFIG_KMEMTRACE
size_t slab_buffer_size(struct kmem_cache *cachep)
{
return cachep->buffer_size;
}
EXPORT_SYMBOL(slab_buffer_size);
#endif
/*
* Do not go above this order unless 0 objects fit into the slab.
*/
#define BREAK_GFP_ORDER_HI 1
#define BREAK_GFP_ORDER_LO 0
static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
/*
* Functions for storing/retrieving the cachep and or slab from the page
* allocator. These are used to find the slab an obj belongs to. With kfree(),
* these are used to find the cache which an obj belongs to.
*/
static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
{
page->lru.next = (struct list_head *)cache;
}
static inline struct kmem_cache *page_get_cache(struct page *page)
{
page = compound_head(page);
BUG_ON(!PageSlab(page));
return (struct kmem_cache *)page->lru.next;
}
static inline void page_set_slab(struct page *page, struct slab *slab)
{
page->lru.prev = (struct list_head *)slab;
}
static inline struct slab *page_get_slab(struct page *page)
{
BUG_ON(!PageSlab(page));
return (struct slab *)page->lru.prev;
}
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct page *page = virt_to_head_page(obj);
return page_get_cache(page);
}
static inline struct slab *virt_to_slab(const void *obj)
{
struct page *page = virt_to_head_page(obj);
return page_get_slab(page);
}
static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
unsigned int idx)
{
return slab->s_mem + cache->buffer_size * idx;
}
/*
* We want to avoid an expensive divide : (offset / cache->buffer_size)
* Using the fact that buffer_size is a constant for a particular cache,
* we can replace (offset / cache->buffer_size) by
* reciprocal_divide(offset, cache->reciprocal_buffer_size)
*/
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct slab *slab, void *obj)
{
u32 offset = (obj - slab->s_mem);
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}
/*
* These are the default caches for kmalloc. Custom caches can have other sizes.
*/
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
#include <linux/kmalloc_sizes.h>
CACHE(ULONG_MAX)
#undef CACHE
};
EXPORT_SYMBOL(malloc_sizes);
/* Must match cache_sizes above. Out of line to keep cache footprint low. */
struct cache_names {
char *name;
char *name_dma;
};
static struct cache_names __initdata cache_names[] = {
#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
#include <linux/kmalloc_sizes.h>
{NULL,}
#undef CACHE
};
static struct arraycache_init initarray_cache __initdata =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
static struct kmem_cache cache_cache = {
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
.buffer_size = sizeof(struct kmem_cache),
.name = "kmem_cache",
};
#define BAD_ALIEN_MAGIC 0x01020304ul
#ifdef CONFIG_LOCKDEP
/*
* Slab sometimes uses the kmalloc slabs to store the slab headers
* for other slabs "off slab".
* The locking for this is tricky in that it nests within the locks
* of all other slabs in a few places; to deal with this special
* locking we put on-slab caches into a separate lock-class.
*
* We set lock class for alien array caches which are up during init.
* The lock annotation will be lost if all cpus of a node goes down and
* then comes back up during hotplug
*/
static struct lock_class_key on_slab_l3_key;
static struct lock_class_key on_slab_alc_key;
static inline void init_lock_keys(void)
{
int q;
struct cache_sizes *s = malloc_sizes;
while (s->cs_size != ULONG_MAX) {
for_each_node(q) {
struct array_cache **alc;
int r;
struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
if (!l3 || OFF_SLAB(s->cs_cachep))
continue;
lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
alc = l3->alien;
/*
* FIXME: This check for BAD_ALIEN_MAGIC
* should go away when common slab code is taught to
* work even without alien caches.
* Currently, non NUMA code returns BAD_ALIEN_MAGIC
* for alloc_alien_cache,
*/
if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
continue;
for_each_node(r) {
if (alc[r])
lockdep_set_class(&alc[r]->lock,
&on_slab_alc_key);
}
}
s++;
}
}
#else
static inline void init_lock_keys(void)
{
}
#endif
/*
* Guard access to the cache-chain.
*/
static DEFINE_MUTEX(cache_chain_mutex);
static struct list_head cache_chain;
/*
* chicken and egg problem: delay the per-cpu array allocation
* until the general caches are up.
*/
static enum {
NONE,
PARTIAL_AC,
PARTIAL_L3,
EARLY,
FULL
} g_cpucache_up;
/*
* used by boot code to determine if it can use slab based allocator
*/
int slab_is_available(void)
{
return g_cpucache_up >= EARLY;
}
static DEFINE_PER_CPU(struct delayed_work, reap_work);
static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
static inline struct kmem_cache *__find_general_cachep(size_t size,
gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
#if DEBUG
/* This happens if someone tries to call
* kmem_cache_create(), or __kmalloc(), before
* the generic caches are initialized.
*/
BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
#endif
if (!size)
return ZERO_SIZE_PTR;
while (size > csizep->cs_size)
csizep++;
/*
* Really subtle: The last entry with cs->cs_size==ULONG_MAX
* has cs_{dma,}cachep==NULL. Thus no special case
* for large kmalloc calls required.
*/
#ifdef CONFIG_ZONE_DMA
if (unlikely(gfpflags & GFP_DMA))
return csizep->cs_dmacachep;
#endif
return csizep->cs_cachep;
}
static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
}
/*
* Calculate the number of objects and left-over bytes for a given buffer size.
*/
static void cache_estimate(unsigned long gfporder, size_t buffer_size,
size_t align, int flags, size_t *left_over,
unsigned int *num)
{
int nr_objs;
size_t mgmt_size;
size_t slab_size = PAGE_SIZE << gfporder;
/*
* The slab management structure can be either off the slab or
* on it. For the latter case, the memory allocated for a
* slab is used for:
*
* - The struct slab
* - One kmem_bufctl_t for each object
* - Padding to respect alignment of @align
* - @buffer_size bytes for each object
*
* If the slab management structure is off the slab, then the
* alignment will already be calculated into the size. Because
* the slabs are all pages aligned, the objects will be at the
* correct alignment when allocated.
*/
if (flags & CFLGS_OFF_SLAB) {
mgmt_size = 0;
nr_objs = slab_size / buffer_size;
if (nr_objs > SLAB_LIMIT)
nr_objs = SLAB_LIMIT;
} else {
/*
* Ignore padding for the initial guess. The padding
* is at most @align-1 bytes, and @buffer_size is at
* least @align. In the worst case, this result will
* be one greater than the number of objects that fit
* into the memory allocation when taking the padding
* into account.
*/
nr_objs = (slab_size - sizeof(struct slab)) /
(buffer_size + sizeof(kmem_bufctl_t));
/*
* This calculated number will be either the right
* amount, or one greater than what we want.
*/
if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
> slab_size)
nr_objs--;
if (nr_objs > SLAB_LIMIT)
nr_objs = SLAB_LIMIT;
mgmt_size = slab_mgmt_size(nr_objs, align);
}
*num = nr_objs;
*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}
#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
static void __slab_error(const char *function, struct kmem_cache *cachep,
char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
dump_stack();
}
/*
* By default on NUMA we use alien caches to stage the freeing of
* objects allocated from other nodes. This causes massive memory
* inefficiencies when using fake NUMA setup to split memory into a
* large number of small nodes, so it can be disabled on the command
* line
*/
static int use_alien_caches __read_mostly = 1;
static int __init noaliencache_setup(char *s)
{
use_alien_caches = 0;
return 1;
}
__setup("noaliencache", noaliencache_setup);
#ifdef CONFIG_NUMA
/*
* Special reaping functions for NUMA systems called from cache_reap().
* These take care of doing round robin flushing of alien caches (containing
* objects freed on different nodes from which they were allocated) and the
* flushing of remote pcps by calling drain_node_pages.
*/
static DEFINE_PER_CPU(unsigned long, reap_node);
static void init_reap_node(int cpu)
{
int node;
node = next_node(cpu_to_node(cpu), node_online_map);
if (node == MAX_NUMNODES)
node = first_node(node_online_map);
per_cpu(reap_node, cpu) = node;
}
static void next_reap_node(void)
{
int node = __get_cpu_var(reap_node);
node = next_node(node, node_online_map);
if (unlikely(node >= MAX_NUMNODES))
node = first_node(node_online_map);
__get_cpu_var(reap_node) = node;
}
#else
#define init_reap_node(cpu) do { } while (0)
#define next_reap_node(void) do { } while (0)
#endif
/*
* Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
* via the workqueue/eventd.
* Add the CPU number into the expiration time to minimize the possibility of
* the CPUs getting into lockstep and contending for the global cache chain
* lock.
*/
static void __cpuinit start_cpu_timer(int cpu)
{
struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
/*
* When this gets called from do_initcalls via cpucache_init(),
* init_workqueues() has already run, so keventd will be setup
* at that time.
*/
if (keventd_up() && reap_work->work.func == NULL) {
init_reap_node(cpu);
INIT_DELAYED_WORK(reap_work, cache_reap);
schedule_delayed_work_on(cpu, reap_work,
__round_jiffies_relative(HZ, cpu));
}
}
static struct array_cache *alloc_arraycache(int node, int entries,
int batchcount, gfp_t gfp)
{
int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
struct array_cache *nc = NULL;
nc = kmalloc_node(memsize, gfp, node);
/*
* The array_cache structures contain pointers to free object.
* However, when such objects are allocated or transfered to another
* cache the pointers are not cleared and they could be counted as
* valid references during a kmemleak scan. Therefore, kmemleak must
* not scan such objects.
*/
kmemleak_no_scan(nc);
if (nc) {
nc->avail = 0;
nc->limit = entries;
nc->batchcount = batchcount;
nc->touched = 0;
spin_lock_init(&nc->lock);
}
return nc;
}
/*
* Transfer objects in one arraycache to another.
* Locking must be handled by the caller.
*
* Return the number of entries transferred.
*/
static int transfer_objects(struct array_cache *to,
struct array_cache *from, unsigned int max)
{
/* Figure out how many entries to transfer */
int nr = min(min(from->avail, max), to->limit - to->avail);
if (!nr)
return 0;
memcpy(to->entry + to->avail, from->entry + from->avail -nr,
sizeof(void *) *nr);
from->avail -= nr;
to->avail += nr;
to->touched = 1;
return nr;
}
#ifndef CONFIG_NUMA
#define drain_alien_cache(cachep, alien) do { } while (0)
#define reap_alien(cachep, l3) do { } while (0)
static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
return (struct array_cache **)BAD_ALIEN_MAGIC;
}
static inline void free_alien_cache(struct array_cache **ac_ptr)
{
}
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
return 0;
}
static inline void *alternate_node_alloc(struct kmem_cache *cachep,
gfp_t flags)
{
return NULL;
}
static inline void *____cache_alloc_node(struct kmem_cache *cachep,
gfp_t flags, int nodeid)
{
return NULL;
}
#else /* CONFIG_NUMA */
static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
{
struct array_cache **ac_ptr;
int memsize = sizeof(void *) * nr_node_ids;
int i;
if (limit > 1)
limit = 12;
ac_ptr = kmalloc_node(memsize, gfp, node);
if (ac_ptr) {
for_each_node(i) {
if (i == node || !node_online(i)) {
ac_ptr[i] = NULL;
continue;
}
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
if (!ac_ptr[i]) {
for (i--; i >= 0; i--)
kfree(ac_ptr[i]);
kfree(ac_ptr);
return NULL;
}
}
}
return ac_ptr;
}
static void free_alien_cache(struct array_cache **ac_ptr)
{
int i;
if (!ac_ptr)
return;
for_each_node(i)
kfree(ac_ptr[i]);
kfree(ac_ptr);
}
static void __drain_alien_cache(struct kmem_cache *cachep,
struct array_cache *ac, int node)
{
struct kmem_list3 *rl3 = cachep->nodelists[node];
if (ac->avail) {
spin_lock(&rl3->list_lock);
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
* into the free lists and getting them back later.
*/
if (rl3->shared)
transfer_objects(rl3->shared, ac, ac->limit);
free_block(cachep, ac->entry, ac->avail, node);
ac->avail = 0;
spin_unlock(&rl3->list_lock);
}
}
/*
* Called from cache_reap() to regularly drain alien caches round robin.
*/
static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
{
int node = __get_cpu_var(reap_node);
if (l3->alien) {
struct array_cache *ac = l3->alien[node];
if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
__drain_alien_cache(cachep, ac, node);
spin_unlock_irq(&ac->lock);
}
}
}
static void drain_alien_cache(struct kmem_cache *cachep,
struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
unsigned long flags;
for_each_online_node(i) {
ac = alien[i];
if (ac) {
spin_lock_irqsave(&ac->lock, flags);
__drain_alien_cache(cachep, ac, i);
spin_unlock_irqrestore(&ac->lock, flags);
}
}
}
static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
{
struct slab *slabp = virt_to_slab(objp);
int nodeid = slabp->nodeid;
struct kmem_list3 *l3;
struct array_cache *alien = NULL;
int node;
node = numa_node_id();
/*
* Make sure we are not freeing a object from another node to the array
* cache on this cpu.
*/
if (likely(slabp->nodeid == node))
return 0;
l3 = cachep->nodelists[node];
STATS_INC_NODEFREES(cachep);
if (l3->alien && l3->alien[nodeid]) {
alien = l3->alien[nodeid];
spin_lock(&alien->lock);
if (unlikely(alien->avail == alien->limit)) {
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, alien, nodeid);
}
alien->entry[alien->avail++] = objp;
spin_unlock(&alien->lock);
} else {
spin_lock(&(cachep->nodelists[nodeid])->list_lock);
free_block(cachep, &objp, 1, nodeid);
spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
}
return 1;
}
#endif
static void __cpuinit cpuup_canceled(long cpu)
{
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
const struct cpumask *mask = cpumask_of_node(node);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
struct array_cache **alien;
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->nodelists[node];
if (!l3)
goto free_array_cache;
spin_lock_irq(&l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
if (nc)
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(*mask)) {
spin_unlock_irq(&l3->list_lock);
goto free_array_cache;
}
shared = l3->shared;
if (shared) {
free_block(cachep, shared->entry,
shared->avail, node);
l3->shared = NULL;
}
alien = l3->alien;
l3->alien = NULL;
spin_unlock_irq(&l3->list_lock);
kfree(shared);
if (alien) {
drain_alien_cache(cachep, alien);
free_alien_cache(alien);
}
free_array_cache:
kfree(nc);
}
/*
* In the previous loop, all the objects were freed to
* the respective cache's slabs, now we can go ahead and
* shrink each nodelist to its limit.
*/
list_for_each_entry(cachep, &cache_chain, next) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
drain_freelist(cachep, l3, l3->free_objects);
}
}
static int __cpuinit cpuup_prepare(long cpu)
{
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
const int memsize = sizeof(struct kmem_list3);
/*
* We need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
* kmalloc_node allows us to add the slab to the right
* kmem_list3 and not this cpu's kmem_list3
*/
list_for_each_entry(cachep, &cache_chain, next) {
/*
* Set up the size64 kmemlist for cpu before we can
* begin anything. Make sure some other cpu on this
* node has not already allocated this
*/
if (!cachep->nodelists[node]) {
l3 = kmalloc_node(memsize, GFP_KERNEL, node);
if (!l3)
goto bad;
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
/*
* The l3s don't come and go as CPUs come and
* go. cache_chain_mutex is sufficient
* protection here.
*/
cachep->nodelists[node] = l3;
}
spin_lock_irq(&cachep->nodelists[node]->list_lock);
cachep->nodelists[node]->free_limit =
(1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&cachep->nodelists[node]->list_lock);
}
/*
* Now we can go ahead with allocating the shared arrays and
* array caches
*/
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared = NULL;
struct array_cache **alien = NULL;
nc = alloc_arraycache(node, cachep->limit,
cachep->batchcount, GFP_KERNEL);
if (!nc)
goto bad;
if (cachep->shared) {
shared = alloc_arraycache(node,
cachep->shared * cachep->batchcount,
0xbaadf00d, GFP_KERNEL);
if (!shared) {
kfree(nc);
goto bad;
}
}
if (use_alien_caches) {
alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
if (!alien) {
kfree(shared);
kfree(nc);
goto bad;
}
}
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
spin_lock_irq(&l3->list_lock);
if (!l3->shared) {
/*
* We are serialised from CPU_DEAD or
* CPU_UP_CANCELLED by the cpucontrol lock
*/
l3->shared = shared;
shared = NULL;
}
#ifdef CONFIG_NUMA
if (!l3->alien) {
l3->alien = alien;
alien = NULL;
}
#endif
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(alien);
}
return 0;
bad:
cpuup_canceled(cpu);
return -ENOMEM;
}
static int __cpuinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
int err = 0;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&cache_chain_mutex);
err = cpuup_prepare(cpu);
mutex_unlock(&cache_chain_mutex);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
start_cpu_timer(cpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
/*
* Shutdown cache reaper. Note that the cache_chain_mutex is
* held so that if cache_reap() is invoked it cannot do
* anything expensive but will only modify reap_work
* and reschedule the timer.
*/
cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
/* Now the cache_reaper is guaranteed to be not running. */
per_cpu(reap_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
start_cpu_timer(cpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
/*
* Even if all the cpus of a node are down, we don't free the
* kmem_list3 of any cache. This to avoid a race between
* cpu_down, and a kmalloc allocation from another cpu for
* memory from the node of the cpu going down. The list3
* structure is usually allocated from kmem_cache_create() and
* gets destroyed at kmem_cache_destroy().
*/
/* fall through */
#endif
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
mutex_lock(&cache_chain_mutex);
cpuup_canceled(cpu);
mutex_unlock(&cache_chain_mutex);
break;
}
return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block __cpuinitdata cpucache_notifier = {
&cpuup_callback, NULL, 0
};
/*
* swap the static kmem_list3 with kmalloced memory
*/
static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
int nodeid)
{
struct kmem_list3 *ptr;
ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
BUG_ON(!ptr);
memcpy(ptr, list, sizeof(struct kmem_list3));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->list_lock);
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->nodelists[nodeid] = ptr;
}
/*
* For setting up all the kmem_list3s for cache whose buffer_size is same as
* size of kmem_list3.
*/
static void __init set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
for_each_online_node(node) {
cachep->nodelists[node] = &initkmem_list3[index + node];
cachep->nodelists[node]->next_reap = jiffies +
REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
}
}
/*
* Initialisation. Called after the page allocator have been initialised and
* before smp_init().
*/
void __init kmem_cache_init(void)
{
size_t left_over;
struct cache_sizes *sizes;
struct cache_names *names;
int i;
int order;
int node;
if (num_possible_nodes() == 1)
use_alien_caches = 0;
for (i = 0; i < NUM_INIT_LISTS; i++) {
kmem_list3_init(&initkmem_list3[i]);
if (i < MAX_NUMNODES)
cache_cache.nodelists[i] = NULL;
}
set_up_list3s(&cache_cache, CACHE_CACHE);
/*
* Fragmentation resistance on low memory - only use bigger
* page orders on machines with more than 32MB of memory.
*/
if (totalram_pages > (32 << 20) >> PAGE_SHIFT)
slab_break_gfp_order = BREAK_GFP_ORDER_HI;
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
* 1) initialize the cache_cache cache: it contains the struct
* kmem_cache structures of all caches, except cache_cache itself:
* cache_cache is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
* 2) Create the first kmalloc cache.
* The struct kmem_cache for the new cache is allocated normally.
* An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized
* head arrays.
* 4) Replace the __init data head arrays for cache_cache and the first
* kmalloc cache with kmalloc allocated arrays.
* 5) Replace the __init data for kmem_list3 for cache_cache and
* the other cache's with kmalloc allocated memory.
* 6) Resize the head arrays of the kmalloc caches to their final sizes.
*/
node = numa_node_id();
/* 1) create the cache_cache */
INIT_LIST_HEAD(&cache_chain);
list_add(&cache_cache.next, &cache_chain);
cache_cache.colour_off = cache_line_size();
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
* struct kmem_cache size depends on nr_node_ids, which
* can be less than MAX_NUMNODES.
*/
cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG
cache_cache.obj_size = cache_cache.buffer_size;
#endif
cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
cache_line_size());
cache_cache.reciprocal_buffer_size =
reciprocal_value(cache_cache.buffer_size);
for (order = 0; order < MAX_ORDER; order++) {
cache_estimate(order, cache_cache.buffer_size,
cache_line_size(), 0, &left_over, &cache_cache.num);
if (cache_cache.num)
break;
}
BUG_ON(!cache_cache.num);
cache_cache.gfporder = order;
cache_cache.colour = left_over / cache_cache.colour_off;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
/* 2+3) create the kmalloc caches */
sizes = malloc_sizes;
names = cache_names;
/*
* Initialize the caches that provide memory for the array cache and the
* kmem_list3 structures first. Without this, further allocations will
* bug.
*/
sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
sizes[INDEX_AC].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
if (INDEX_AC != INDEX_L3) {
sizes[INDEX_L3].cs_cachep =
kmem_cache_create(names[INDEX_L3].name,
sizes[INDEX_L3].cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
}
slab_early_init = 0;
while (sizes->cs_size != ULONG_MAX) {
/*
* For performance, all the general caches are L1 aligned.
* This should be particularly beneficial on SMP boxes, as it
* eliminates "false sharing".
* Note for systems short on memory removing the alignment will
* allow tighter packing of the smaller caches.
*/
if (!sizes->cs_cachep) {
sizes->cs_cachep = kmem_cache_create(names->name,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_PANIC,
NULL);
}
#ifdef CONFIG_ZONE_DMA
sizes->cs_dmacachep = kmem_cache_create(
names->name_dma,
sizes->cs_size,
ARCH_KMALLOC_MINALIGN,
ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
SLAB_PANIC,
NULL);
#endif
sizes++;
names++;
}
/* 4) Replace the bootstrap head arrays */
{
struct array_cache *ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->lock);
cache_cache.array[smp_processor_id()] = ptr;
ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
!= &initarray_generic.cache);
memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init));
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
spin_lock_init(&ptr->lock);
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
}
/* 5) Replace the bootstrap kmem_list3's */
{
int nid;
for_each_online_node(nid) {
init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
init_list(malloc_sizes[INDEX_AC].cs_cachep,
&initkmem_list3[SIZE_AC + nid], nid);
if (INDEX_AC != INDEX_L3) {
init_list(malloc_sizes[INDEX_L3].cs_cachep,
&initkmem_list3[SIZE_L3 + nid], nid);
}
}
}
g_cpucache_up = EARLY;
}
void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
if (enable_cpucache(cachep, GFP_NOWAIT))
BUG();
mutex_unlock(&cache_chain_mutex);
/* Done! */
g_cpucache_up = FULL;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
/*
* The reap timers are started later, with a module init call: That part
* of the kernel is not yet operational.
*/
}
static int __init cpucache_init(void)
{
int cpu;
/*
* Register the timers that return unneeded pages to the page allocator
*/
for_each_online_cpu(cpu)
start_cpu_timer(cpu);
return 0;
}
__initcall(cpucache_init);
/*
* Interface to system's page allocator. No need to hold the cache-lock.
*
* If we requested dmaable memory, we will get it. Even if we
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
int nr_pages;
int i;
#ifndef CONFIG_MMU
/*
* Nommu uses slab's for process anonymous memory allocations, and thus
* requires __GFP_COMP to properly refcount higher order allocations
*/
flags |= __GFP_COMP;
#endif
flags |= cachep->gfpflags;
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
flags |= __GFP_RECLAIMABLE;
page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
if (!page)
return NULL;
nr_pages = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
add_zone_page_state(page_zone(page),
NR_SLAB_RECLAIMABLE, nr_pages);
else
add_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_pages);
for (i = 0; i < nr_pages; i++)
__SetPageSlab(page + i);
if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
if (cachep->ctor)
kmemcheck_mark_uninitialized_pages(page, nr_pages);
else
kmemcheck_mark_unallocated_pages(page, nr_pages);
}
return page_address(page);
}
/*
* Interface to system's page release.
*/
static void kmem_freepages(struct kmem_cache *cachep, void *addr)
{
unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
const unsigned long nr_freed = i;
kmemcheck_free_shadow(page, cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
sub_zone_page_state(page_zone(page),
NR_SLAB_RECLAIMABLE, nr_freed);
else
sub_zone_page_state(page_zone(page),
NR_SLAB_UNRECLAIMABLE, nr_freed);
while (i--) {
BUG_ON(!PageSlab(page));
__ClearPageSlab(page);
page++;
}
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += nr_freed;
free_pages((unsigned long)addr, cachep->gfporder);
}
static void kmem_rcu_free(struct rcu_head *head)
{
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
struct kmem_cache *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr);
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slab_rcu);
}
#if DEBUG
#ifdef CONFIG_DEBUG_PAGEALLOC
static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
int size = obj_size(cachep);
addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
if (size < 5 * sizeof(unsigned long))
return;
*addr++ = 0x12345678;
*addr++ = caller;
*addr++ = smp_processor_id();
size -= 3 * sizeof(unsigned long);
{
unsigned long *sptr = &caller;
unsigned long svalue;
while (!kstack_end(sptr)) {
svalue = *sptr++;
if (kernel_text_address(svalue)) {
*addr++ = svalue;
size -= sizeof(unsigned long);
if (size <= sizeof(unsigned long))
break;
}
}
}
*addr++ = 0x87654321;
}
#endif
static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
{
int size = obj_size(cachep);
addr = &((char *)addr)[obj_offset(cachep)];
memset(addr, val, size);
*(unsigned char *)(addr + size - 1) = POISON_END;
}
static void dump_line(char *data, int offset, int limit)
{
int i;
unsigned char error = 0;
int bad_count = 0;
printk(KERN_ERR "%03x:", offset);
for (i = 0; i < limit; i++) {
if (data[offset + i] != POISON_FREE) {
error = data[offset + i];
bad_count++;
}
printk(" %02x", (unsigned char)data[offset + i]);
}
printk("\n");
if (bad_count == 1) {
error ^= POISON_FREE;
if (!(error & (error - 1))) {
printk(KERN_ERR "Single bit error detected. Probably "
"bad RAM.\n");
#ifdef CONFIG_X86
printk(KERN_ERR "Run memtest86+ or a similar memory "
"test tool.\n");
#else
printk(KERN_ERR "Run a memory test tool.\n");
#endif
}
}
}
#endif
#if DEBUG
static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
{
int i, size;
char *realobj;
if (cachep->flags & SLAB_RED_ZONE) {
printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
*dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
if (cachep->flags & SLAB_STORE_USER) {
printk(KERN_ERR "Last user: [<%p>]",
*dbg_userword(cachep, objp));
print_symbol("(%s)",
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
realobj = (char *)objp + obj_offset(cachep);
size = obj_size(cachep);
for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
if (i + limit > size)
limit = size - i;
dump_line(realobj, i, limit);
}
}
static void check_poison_obj(struct kmem_cache *cachep, void *objp)
{
char *realobj;
int size, i;
int lines = 0;
realobj = (char *)objp + obj_offset(cachep);
size = obj_size(cachep);
for (i = 0; i < size; i++) {
char exp = POISON_FREE;
if (i == size - 1)
exp = POISON_END;
if (realobj[i] != exp) {
int limit;
/* Mismatch ! */
/* Print header */
if (lines == 0) {
printk(KERN_ERR
"Slab corruption: %s start=%p, len=%d\n",
cachep->name, realobj, size);
print_objinfo(cachep, objp, 0);
}
/* Hexdump the affected line */
i = (i / 16) * 16;
limit = 16;
if (i + limit > size)
limit = size - i;
dump_line(realobj, i, limit);
i += 16;
lines++;
/* Limit to 5 lines */
if (lines > 5)
break;
}
}
if (lines != 0) {
/* Print some data about the neighboring objects, if they
* exist:
*/
struct slab *slabp = virt_to_slab(objp);
unsigned int objnr;
objnr = obj_to_index(cachep, slabp, objp);
if (objnr) {
objp = index_to_obj(cachep, slabp, objnr - 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
objp = index_to_obj(cachep, slabp, objnr + 1);
realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
}
}
#endif
#if DEBUG
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
{
int i;
for (i = 0; i < cachep->num; i++) {
void *objp = index_to_obj(cachep, slabp, i);
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if (cachep->buffer_size % PAGE_SIZE == 0 &&
OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
check_poison_obj(cachep, objp);
#endif
}
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "start of a freed object "
"was overwritten");
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "end of a freed object "
"was overwritten");
}
}
}
#else
static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
{
}
#endif
/**
* slab_destroy - destroy and release all objects in a slab
* @cachep: cache pointer being destroyed
* @slabp: slab pointer being destroyed
*
* Destroy all the objs in a slab, and release the mem back to the system.
* Before calling the slab must have been unlinked from the cache. The
* cache-lock is not held/needed.
*/
static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
{
void *addr = slabp->s_mem - slabp->colouroff;
slab_destroy_debugcheck(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
slab_rcu = (struct slab_rcu *)slabp;
slab_rcu->cachep = cachep;
slab_rcu->addr = addr;
call_rcu(&slab_rcu->head, kmem_rcu_free);
} else {
kmem_freepages(cachep, addr);
if (OFF_SLAB(cachep))
kmem_cache_free(cachep->slabp_cache, slabp);
}
}
static void __kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
struct kmem_list3 *l3;
for_each_online_cpu(i)
kfree(cachep->array[i]);
/* NUMA: free the list3 structures */
for_each_online_node(i) {
l3 = cachep->nodelists[i];
if (l3) {
kfree(l3->shared);
free_alien_cache(l3->alien);
kfree(l3);
}
}
kmem_cache_free(&cache_cache, cachep);
}
/**
* calculate_slab_order - calculate size (page order) of slabs
* @cachep: pointer to the cache that is being created
* @size: size of objects to be created in this cache.
* @align: required alignment for the objects.
* @flags: slab allocation flags
*
* Also calculates the number of objects per slab.
*
* This could be made much more intelligent. For now, try to avoid using
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t size, size_t align, unsigned long flags)
{
unsigned long offslab_limit;
size_t left_over = 0;
int gfporder;
for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
cache_estimate(gfporder, size, align, flags, &remainder, &num);
if (!num)
continue;
if (flags & CFLGS_OFF_SLAB) {
/*
* Max number of objs-per-slab for caches which
* use off-slab slabs. Needed to avoid a possible
* looping condition in cache_grow().
*/
offslab_limit = size - sizeof(struct slab);
offslab_limit /= sizeof(kmem_bufctl_t);
if (num > offslab_limit)
break;
}
/* Found something acceptable - save it away */
cachep->num = num;
cachep->gfporder = gfporder;
left_over = remainder;
/*
* A VFS-reclaimable slab tends to have most allocations
* as GFP_NOFS and we really don't want to have to be allocating
* higher-order pages when we are unable to shrink dcache.
*/
if (flags & SLAB_RECLAIM_ACCOUNT)
break;
/*
* Large number of objects is good, but very large slabs are
* currently bad for the gfp()s.
*/
if (gfporder >= slab_break_gfp_order)
break;
/*
* Acceptable internal fragmentation?
*/
if (left_over * 8 <= (PAGE_SIZE << gfporder))
break;
}
return left_over;
}
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
if (g_cpucache_up == FULL)
return enable_cpucache(cachep, gfp);
if (g_cpucache_up == NONE) {
/*
* Note: the first kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of
* further caches will BUG().
*/
cachep->array[smp_processor_id()] = &initarray_generic.cache;
/*
* If the cache that's used by kmalloc(sizeof(kmem_list3)) is
* the first cache, then we need to set up all its list3s,
* otherwise the creation of further caches will BUG().
*/
set_up_list3s(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3)
g_cpucache_up = PARTIAL_L3;
else
g_cpucache_up = PARTIAL_AC;
} else {
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp);
if (g_cpucache_up == PARTIAL_AC) {
set_up_list3s(cachep, SIZE_L3);
g_cpucache_up = PARTIAL_L3;
} else {
int node;
for_each_online_node(node) {
cachep->nodelists[node] =
kmalloc_node(sizeof(struct kmem_list3),
gfp, node);
BUG_ON(!cachep->nodelists[node]);
kmem_list3_init(cachep->nodelists[node]);
}
}
}
cachep->nodelists[numa_node_id()]->next_reap =
jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cpu_cache_get(cachep)->avail = 0;
cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
cpu_cache_get(cachep)->batchcount = 1;
cpu_cache_get(cachep)->touched = 0;
cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES;
return 0;
}
/**
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
* @size: The size of objects to be created in this cache.
* @align: The required alignment for the objects.
* @flags: SLAB flags
* @ctor: A constructor for the objects.
*
* Returns a ptr to the cache on success, NULL on failure.
* Cannot be called within a int, but can be interrupted.
* The @ctor is run when new pages are allocated by the cache.
*
* @name must be valid until the cache is destroyed. This implies that
* the module calling this has to destroy the cache before getting unloaded.
* Note that kmem_cache_name() is not guaranteed to return the same pointer,
* therefore applications must manage it themselves.
*
* The flags are
*
* %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
* to catch references to uninitialised memory.
*
* %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
* for buffer overruns.
*
* %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
unsigned long flags, void (*ctor)(void *))
{
size_t left_over, slab_size, ralign;
struct kmem_cache *cachep = NULL, *pc;
gfp_t gfp;
/*
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
size > KMALLOC_MAX_SIZE) {
printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
name);
BUG();
}
/*
* We use cache_chain_mutex to ensure a consistent view of
* cpu_online_mask as well. Please see cpuup_callback
*/
if (slab_is_available()) {
get_online_cpus();
mutex_lock(&cache_chain_mutex);
}
list_for_each_entry(pc, &cache_chain, next) {
char tmp;
int res;
/*
* This happens when the module gets unloaded and doesn't
* destroy its slab cache and no-one else reuses the vmalloc
* area of the module. Print a warning.
*/
res = probe_kernel_address(pc->name, tmp);
if (res) {
printk(KERN_ERR
"SLAB: cache with size %d has lost its name\n",
pc->buffer_size);
continue;
}
if (!strcmp(pc->name, name)) {
printk(KERN_ERR
"kmem_cache_create: duplicate cache %s\n", name);
dump_stack();
goto oops;
}
}
#if DEBUG
WARN_ON(strchr(name, ' ')); /* It confuses parsers */
#if FORCED_DEBUG
/*
* Enable redzoning and last user accounting, except for caches with
* large objects, if the increased size would increase the object size
* above the next power of two: caches with object sizes just above a
* power of two have a significant amount of internal fragmentation.
*/
if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2 * sizeof(unsigned long long)))
flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
if (!(flags & SLAB_DESTROY_BY_RCU))
flags |= SLAB_POISON;
#endif
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(flags & SLAB_POISON);
#endif
/*
* Always checks flags, a caller might be expecting debug support which
* isn't available.
*/
BUG_ON(flags & ~CREATE_MASK);
/*
* Check that size is in terms of words. This is needed to avoid
* unaligned accesses for some archs when redzoning is used, and makes
* sure any on-slab bufctl's are also correctly aligned.
*/
if (size & (BYTES_PER_WORD - 1)) {
size += (BYTES_PER_WORD - 1);
size &= ~(BYTES_PER_WORD - 1);
}
/* calculate the final buffer alignment: */
/* 1) arch recommendation: can be overridden for debug */
if (flags & SLAB_HWCACHE_ALIGN) {
/*
* Default alignment: as specified by the arch code. Except if
* an object is really small, then squeeze multiple objects into
* one cacheline.
*/
ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
} else {
ralign = BYTES_PER_WORD;
}
/*
* Redzoning and user store require word alignment or possibly larger.
* Note this will be overridden by architecture or caller mandated
* alignment if either is greater than BYTES_PER_WORD.
*/
if (flags & SLAB_STORE_USER)
ralign = BYTES_PER_WORD;
if (flags & SLAB_RED_ZONE) {
ralign = REDZONE_ALIGN;
/* If redzoning, ensure that the second redzone is suitably
* aligned, by adjusting the object size accordingly. */
size += REDZONE_ALIGN - 1;
size &= ~(REDZONE_ALIGN - 1);
}
/* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
}
/* 3) caller mandated alignment */
if (ralign < align) {
ralign = align;
}
/* disable debug if necessary */
if (ralign > __alignof__(unsigned long long))
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
*/
align = ralign;
if (slab_is_available())
gfp = GFP_KERNEL;
else
gfp = GFP_NOWAIT;
/* Get cache's description obj. */
cachep = kmem_cache_zalloc(&cache_cache, gfp);
if (!cachep)
goto oops;
#if DEBUG
cachep->obj_size = size;
/*
* Both debugging options require word-alignment which is calculated
* into align above.
*/
if (flags & SLAB_RED_ZONE) {
/* add space for red zone words */
cachep->obj_offset += sizeof(unsigned long long);
size += 2 * sizeof(unsigned long long);
}
if (flags & SLAB_STORE_USER) {
/* user store requires one word storage behind the end of
* the real object. But if the second red zone needs to be
* aligned to 64 bits, we must allow that much space.
*/
if (flags & SLAB_RED_ZONE)
size += REDZONE_ALIGN;
else
size += BYTES_PER_WORD;
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
&& cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
cachep->obj_offset += PAGE_SIZE - size;
size = PAGE_SIZE;
}
#endif
#endif
/*
* Determine if the slab management is 'on' or 'off' slab.
* (bootstrapping cannot cope with offslab caches so don't do
* it too early on.)
*/
if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
/*
* Size is large, assume best to place the slab management obj
* off-slab (should allow better packing of objs).
*/
flags |= CFLGS_OFF_SLAB;
size = ALIGN(size, align);
left_over = calculate_slab_order(cachep, size, align, flags);
if (!cachep->num) {
printk(KERN_ERR
"kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
cachep = NULL;
goto oops;
}
slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
/*
* If the slab has been placed off-slab, and we have enough space then
* move it on-slab. This is at the expense of any extra colouring.
*/
if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
flags &= ~CFLGS_OFF_SLAB;
left_over -= slab_size;
}
if (flags & CFLGS_OFF_SLAB) {
/* really off slab. No need for manual alignment */
slab_size =
cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
#ifdef CONFIG_PAGE_POISONING
/* If we're going to use the generic kernel_map_pages()
* poisoning, then it's going to smash the contents of
* the redzone and userword anyhow, so switch them off.
*/
if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
#endif
}
cachep->colour_off = cache_line_size();
/* Offset must be a multiple of the alignment. */
if (cachep->colour_off < align)
cachep->colour_off = align;
cachep->colour = left_over / cachep->colour_off;
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
cachep->gfpflags |= GFP_DMA;
cachep->buffer_size = size;
cachep->reciprocal_buffer_size = reciprocal_value(size);
if (flags & CFLGS_OFF_SLAB) {
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
/*
* This is a possibility for one of the malloc_sizes caches.
* But since we go off slab only for object size greater than
* PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
* this should not happen at all.
* But leave a BUG_ON for some lucky dude.
*/
BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
}
cachep->ctor = ctor;
cachep->name = name;
if (setup_cpu_cache(cachep, gfp)) {
__kmem_cache_destroy(cachep);
cachep = NULL;
goto oops;
}
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
if (slab_is_available()) {
mutex_unlock(&cache_chain_mutex);
put_online_cpus();
}
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);
#if DEBUG
static void check_irq_off(void)
{
BUG_ON(!irqs_disabled());
}
static void check_irq_on(void)
{
BUG_ON(irqs_disabled());
}
static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
#endif
}
static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
assert_spin_locked(&cachep->nodelists[node]->list_lock);
#endif
}
#else
#define check_irq_off() do { } while(0)
#define check_irq_on() do { } while(0)
#define check_spinlock_acquired(x) do { } while(0)
#define check_spinlock_acquired_node(x, y) do { } while(0)
#endif
static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
struct array_cache *ac,
int force, int node);
static void do_drain(void *arg)
{
struct kmem_cache *cachep = arg;
struct array_cache *ac;
int node = numa_node_id();
check_irq_off();
ac = cpu_cache_get(cachep);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&cachep->nodelists[node]->list_lock);
ac->avail = 0;
}
static void drain_cpu_caches(struct kmem_cache *cachep)
{
struct kmem_list3 *l3;
int node;
on_each_cpu(do_drain, cachep, 1);
check_irq_on();
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (l3 && l3->alien)
drain_alien_cache(cachep, l3->alien);
}
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (l3)
drain_array(cachep, l3, l3->shared, 1, node);
}
}
/*
* Remove slabs from the list of free slabs.
* Specify the number of slabs to drain in tofree.
*
* Returns the actual number of slabs released.
*/
static int drain_freelist(struct kmem_cache *cache,
struct kmem_list3 *l3, int tofree)
{
struct list_head *p;
int nr_freed;
struct slab *slabp;
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
spin_lock_irq(&l3->list_lock);
p = l3->slabs_free.prev;
if (p == &l3->slabs_free) {
spin_unlock_irq(&l3->list_lock);
goto out;
}
slabp = list_entry(p, struct slab, list);
#if DEBUG
BUG_ON(slabp->inuse);
#endif
list_del(&slabp->list);
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
*/
l3->free_objects -= cache->num;
spin_unlock_irq(&l3->list_lock);
slab_destroy(cache, slabp);
nr_freed++;
}
out:
return nr_freed;
}
/* Called with cache_chain_mutex held to protect against cpu hotplug */
static int __cache_shrink(struct kmem_cache *cachep)
{
int ret = 0, i = 0;
struct kmem_list3 *l3;
drain_cpu_caches(cachep);
check_irq_on();
for_each_online_node(i) {
l3 = cachep->nodelists[i];
if (!l3)
continue;
drain_freelist(cachep, l3, l3->free_objects);
ret += !list_empty(&l3->slabs_full) ||
!list_empty(&l3->slabs_partial);
}
return (ret ? 1 : 0);
}
/**
* kmem_cache_shrink - Shrink a cache.
* @cachep: The cache to shrink.
*
* Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
*/
int kmem_cache_shrink(struct kmem_cache *cachep)
{
int ret;
BUG_ON(!cachep || in_interrupt());
get_online_cpus();
mutex_lock(&cache_chain_mutex);
ret = __cache_shrink(cachep);
mutex_unlock(&cache_chain_mutex);
put_online_cpus();
return ret;
}
EXPORT_SYMBOL(kmem_cache_shrink);
/**
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
* Remove a &struct kmem_cache object from the slab cache.
*
* It is expected this function will be called by a module when it is
* unloaded. This will remove the cache completely, and avoid a duplicate
* cache being allocated each time a module is loaded and unloaded, if the
* module doesn't have persistent in-kernel storage across loads and unloads.
*
* The cache must be empty before calling this function.
*
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
void kmem_cache_destroy(struct kmem_cache *cachep)
{
BUG_ON(!cachep || in_interrupt());
/* Find the cache in the chain of caches. */
get_online_cpus();
mutex_lock(&cache_chain_mutex);
/*
* the chain is never empty, cache_cache is never destroyed
*/
list_del(&cachep->next);
if (__cache_shrink(cachep)) {
slab_error(cachep, "Can't free all objects");
list_add(&cachep->next, &cache_chain);
mutex_unlock(&cache_chain_mutex);
put_online_cpus();
return;
}
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
rcu_barrier();
__kmem_cache_destroy(cachep);
mutex_unlock(&cache_chain_mutex);
put_online_cpus();
}
EXPORT_SYMBOL(kmem_cache_destroy);
/*
* Get the memory for a slab management obj.
* For a slab cache when the slab descriptor is off-slab, slab descriptors
* always come from malloc_sizes caches. The slab descriptor cannot
* come from the same cache which is getting created because,
* when we are searching for an appropriate cache for these
* descriptors in kmem_cache_create, we search through the malloc_sizes array.
* If we are creating a malloc_sizes cache here it would not be visible to
* kmem_find_general_cachep till the initialization is complete.
* Hence we cannot have slabp_cache same as the original cache.
*/
static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
int colour_off, gfp_t local_flags,
int nodeid)
{
struct slab *slabp;
if (OFF_SLAB(cachep)) {
/* Slab management obj is off-slab. */
slabp = kmem_cache_alloc_node(cachep->slabp_cache,
local_flags, nodeid);
/*
* If the first object in the slab is leaked (it's allocated
* but no one has a reference to it), we want to make sure
* kmemleak does not treat the ->s_mem pointer as a reference
* to the object. Otherwise we will not report the leak.
*/
kmemleak_scan_area(slabp, offsetof(struct slab, list),
sizeof(struct list_head), local_flags);
if (!slabp)
return NULL;
} else {
slabp = objp + colour_off;
colour_off += cachep->slab_size;
}
slabp->inuse = 0;
slabp->colouroff = colour_off;
slabp->s_mem = objp + colour_off;
slabp->nodeid = nodeid;
slabp->free = 0;
return slabp;
}
static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
{
return (kmem_bufctl_t *) (slabp + 1);
}
static void cache_init_objs(struct kmem_cache *cachep,
struct slab *slabp)
{
int i;
for (i = 0; i < cachep->num; i++) {
void *objp = index_to_obj(cachep, slabp, i);
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
poison_obj(cachep, objp, POISON_FREE);
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = NULL;
if (cachep->flags & SLAB_RED_ZONE) {
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
/*
* Constructors are not allowed to allocate memory from the same
* cache which they are a constructor for. Otherwise, deadlock.
* They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp + obj_offset(cachep));
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
" end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp);
#endif
slab_bufctl(slabp)[i] = i + 1;
}
slab_bufctl(slabp)[i - 1] = BUFCTL_END;
}
static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
if (CONFIG_ZONE_DMA_FLAG) {
if (flags & GFP_DMA)
BUG_ON(!(cachep->gfpflags & GFP_DMA));
else
BUG_ON(cachep->gfpflags & GFP_DMA);
}
}
static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
int nodeid)
{
void *objp = index_to_obj(cachep, slabp, slabp->free);
kmem_bufctl_t next;
slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free];
#if DEBUG
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
WARN_ON(slabp->nodeid != nodeid);
#endif
slabp->free = next;
return objp;
}
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
void *objp, int nodeid)
{
unsigned int objnr = obj_to_index(cachep, slabp, objp);
#if DEBUG
/* Verify that the slab belongs to the intended node */
WARN_ON(slabp->nodeid != nodeid);
if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
slab_bufctl(slabp)[objnr] = slabp->free;
slabp->free = objnr;
slabp->inuse--;
}
/*
* Map pages beginning at addr to the given cache and slab. This is required
* for the slab allocator to be able to lookup the cache and slab of a
* virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
*/
static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
void *addr)
{
int nr_pages;
struct page *page;
page = virt_to_page(addr);
nr_pages = 1;
if (likely(!PageCompound(page)))
nr_pages <<= cache->gfporder;
do {
page_set_cache(page, cache);
page_set_slab(page, slab);
page++;
} while (--nr_pages);
}
/*
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
static int cache_grow(struct kmem_cache *cachep,
gfp_t flags, int nodeid, void *objp)
{
struct slab *slabp;
size_t offset;
gfp_t local_flags;
struct kmem_list3 *l3;
/*
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
BUG_ON(flags & GFP_SLAB_BUG_MASK);
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
/* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
l3 = cachep->nodelists[nodeid];
spin_lock(&l3->list_lock);
/* Get colour for the slab, and cal the next value. */
offset = l3->colour_next;
l3->colour_next++;
if (l3->colour_next >= cachep->colour)
l3->colour_next = 0;
spin_unlock(&l3->list_lock);
offset *= cachep->colour_off;
if (local_flags & __GFP_WAIT)
local_irq_enable();
/*
* The test for missing atomic flag is performed here, rather than
* the more obvious place, simply to reduce the critical path length
* in kmem_cache_alloc(). If a caller is seriously mis-behaving they
* will eventually be caught here (where it matters).
*/
kmem_flagcheck(cachep, flags);
/*
* Get mem for the objs. Attempt to allocate a physical page from
* 'nodeid'.
*/
if (!objp)
objp = kmem_getpages(cachep, local_flags, nodeid);
if (!objp)
goto failed;
/* Get slab management. */
slabp = alloc_slabmgmt(cachep, objp, offset,
local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
if (!slabp)
goto opps1;
slab_map_pages(cachep, slabp, objp);
cache_init_objs(cachep, slabp);
if (local_flags & __GFP_WAIT)
local_irq_disable();
check_irq_off();
spin_lock(&l3->list_lock);
/* Make slab active. */
list_add_tail(&slabp->list, &(l3->slabs_free));
STATS_INC_GROWN(cachep);
l3->free_objects += cachep->num;
spin_unlock(&l3->list_lock);
return 1;
opps1:
kmem_freepages(cachep, objp);
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
return 0;
}
#if DEBUG
/*
* Perform extra freeing checks:
* - detect bad pointers.
* - POISON/RED_ZONE checking
*/
static void kfree_debugcheck(const void *objp)
{
if (!virt_addr_valid(objp)) {
printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
(unsigned long)objp);
BUG();
}
}
static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
{
unsigned long long redzone1, redzone2;
redzone1 = *dbg_redzone1(cache, obj);
redzone2 = *dbg_redzone2(cache, obj);
/*
* Redzone is ok.
*/
if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
return;
if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
slab_error(cache, "double free detected");
else
slab_error(cache, "memory outside object was overwritten");
printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
obj, redzone1, redzone2);
}
static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
struct page *page;
unsigned int objnr;
struct slab *slabp;
BUG_ON(virt_to_cache(objp) != cachep);
objp -= obj_offset(cachep);
kfree_debugcheck(objp);
page = virt_to_head_page(objp);
slabp = page_get_slab(page);
if (cachep->flags & SLAB_RED_ZONE) {
verify_redzone_free(cachep, objp);
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
objnr = obj_to_index(cachep, slabp, objp);
BUG_ON(objnr >= cachep->num);
BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
#ifdef CONFIG_DEBUG_SLAB_LEAK
slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
#endif
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
#else
poison_obj(cachep, objp, POISON_FREE);
#endif
}
return objp;
}
static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
{
kmem_bufctl_t i;
int entries = 0;
/* Check slab's freelist to see if this obj is there. */
for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
entries++;
if (entries > cachep->num || i >= cachep->num)
goto bad;
}
if (entries != cachep->num - slabp->inuse) {
bad:
printk(KERN_ERR "slab: Internal list corruption detected in "
"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
cachep->name, cachep->num, slabp, slabp->inuse);
for (i = 0;
i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
i++) {
if (i % 16 == 0)
printk("\n%03x:", i);
printk(" %02x", ((unsigned char *)slabp)[i]);
}
printk("\n");
BUG();
}
}
#else
#define kfree_debugcheck(x) do { } while(0)
#define cache_free_debugcheck(x,objp,z) (objp)
#define check_slabp(x,y) do { } while(0)
#endif
static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
int node;
retry:
check_irq_off();
node = numa_node_id();
ac = cpu_cache_get(cachep);
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
/*
* If there was little recent activity on this cache, then
* perform only a partial refill. Otherwise we could generate
* refill bouncing.
*/
batchcount = BATCHREFILL_LIMIT;
}
l3 = cachep->nodelists[node];
BUG_ON(ac->avail > 0 || !l3);
spin_lock(&l3->list_lock);
/* See if we can refill from the shared array */
if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
goto alloc_done;
while (batchcount > 0) {
struct list_head *entry;
struct slab *slabp;
/* Get slab alloc is to come from. */
entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
l3->free_touched = 1;
entry = l3->slabs_free.next;
if (entry == &l3->slabs_free)
goto must_grow;
}
slabp = list_entry(entry, struct slab, list);
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep);
/*
* The slab was either on partial or free list so
* there must be at least one object available for
* allocation.
*/
BUG_ON(slabp->inuse >= cachep->num);
while (slabp->inuse < cachep->num && batchcount--) {
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
node);
}
check_slabp(cachep, slabp);
/* move slabp to correct slabp list: */
list_del(&slabp->list);
if (slabp->free == BUFCTL_END)
list_add(&slabp->list, &l3->slabs_full);
else
list_add(&slabp->list, &l3->slabs_partial);
}
must_grow:
l3->free_objects -= ac->avail;
alloc_done:
spin_unlock(&l3->list_lock);
if (unlikely(!ac->avail)) {
int x;
x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
/* cache_grow can reenable interrupts, then ac could change. */
ac = cpu_cache_get(cachep);
if (!x && ac->avail == 0) /* no objects in sight? abort */
return NULL;
if (!ac->avail) /* objects refilled by interrupt? */
goto retry;
}
ac->touched = 1;
return ac->entry[--ac->avail];
}
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
kmem_flagcheck(cachep, flags);
#endif
}
#if DEBUG
static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
gfp_t flags, void *objp, void *caller)
{
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
check_poison_obj(cachep, objp);
#endif
poison_obj(cachep, objp, POISON_INUSE);
}
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
slab_error(cachep, "double free, or memory outside"
" object was overwritten");
printk(KERN_ERR
"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
objp, *dbg_redzone1(cachep, objp),
*dbg_redzone2(cachep, objp));
}
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
#ifdef CONFIG_DEBUG_SLAB_LEAK
{
struct slab *slabp;
unsigned objnr;
slabp = page_get_slab(virt_to_head_page(objp));
objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
}
#endif
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
#if ARCH_SLAB_MINALIGN
if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
objp, ARCH_SLAB_MINALIGN);
}
#endif
return objp;
}
#else
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
{
if (cachep == &cache_cache)
return false;
return should_failslab(obj_size(cachep), flags);
}
static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *objp;
struct array_cache *ac;
check_irq_off();
ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
ac->touched = 1;
objp = ac->entry[--ac->avail];
} else {
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
}
/*
* To avoid a false negative, if an object that is in one of the
* per-CPU caches is leaked, we need to make sure kmemleak doesn't
* treat the array pointers as a reference to the object.
*/
kmemleak_erase(&ac->entry[ac->avail]);
return objp;
}
#ifdef CONFIG_NUMA
/*
* Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
*
* If we are in_interrupt, then process context, including cpusets and
* mempolicy, may not apply and should not be used for allocation policy.
*/
static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
{
int nid_alloc, nid_here;
if (in_interrupt() || (flags & __GFP_THISNODE))
return NULL;
nid_alloc = nid_here = numa_node_id();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_mem_spread_node();
else if (current->mempolicy)
nid_alloc = slab_node(current->mempolicy);
if (nid_alloc != nid_here)
return ____cache_alloc_node(cachep, flags, nid_alloc);
return NULL;
}
/*
* Fallback function if there was no memory available and no objects on a
* certain node and fall back is permitted. First we scan all the
* available nodelists for available objects. If that fails then we
* perform an allocation without specifying a node. This allows the page
* allocator to do its reclaim / fallback magic. We then insert the
* slab into the proper nodelist and then allocate from it.
*/
static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
{
struct zonelist *zonelist;
gfp_t local_flags;
struct zoneref *z;
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
void *obj = NULL;
int nid;
if (flags & __GFP_THISNODE)
return NULL;
zonelist = node_zonelist(slab_node(current->mempolicy), flags);
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
retry:
/*
* Look through allowed nodes for objects available
* from existing per node queues.
*/
for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
nid = zone_to_nid(zone);
if (cpuset_zone_allowed_hardwall(zone, flags) &&
cache->nodelists[nid] &&
cache->nodelists[nid]->free_objects) {
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
if (obj)
break;
}
}
if (!obj) {
/*
* This allocation will be performed within the constraints
* of the current cpuset / memory policy requirements.
* We may trigger various forms of reclaim on the allowed
* set and go into memory reserves if necessary.
*/
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cache, flags);
obj = kmem_getpages(cache, local_flags, numa_node_id());
if (local_flags & __GFP_WAIT)
local_irq_disable();
if (obj) {
/*
* Insert into the appropriate per node queues
*/
nid = page_to_nid(virt_to_page(obj));
if (cache_grow(cache, flags, nid, obj)) {
obj = ____cache_alloc_node(cache,
flags | GFP_THISNODE, nid);
if (!obj)
/*
* Another processor may allocate the
* objects in the slab since we are
* not holding any locks.
*/
goto retry;
} else {
/* cache_grow already freed obj */
obj = NULL;
}
}
}
return obj;
}
/*
* A interface to enable slab creation on nodeid
*/
static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
int nodeid)
{
struct list_head *entry;
struct slab *slabp;
struct kmem_list3 *l3;
void *obj;
int x;
l3 = cachep->nodelists[nodeid];
BUG_ON(!l3);
retry:
check_irq_off();
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
l3->free_touched = 1;
entry = l3->slabs_free.next;
if (entry == &l3->slabs_free)
goto must_grow;
}
slabp = list_entry(entry, struct slab, list);
check_spinlock_acquired_node(cachep, nodeid);
check_slabp(cachep, slabp);
STATS_INC_NODEALLOCS(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
BUG_ON(slabp->inuse == cachep->num);
obj = slab_get_obj(cachep, slabp, nodeid);
check_slabp(cachep, slabp);
l3->free_objects--;
/* move slabp to correct slabp list: */
list_del(&slabp->list);
if (slabp->free == BUFCTL_END)
list_add(&slabp->list, &l3->slabs_full);
else
list_add(&slabp->list, &l3->slabs_partial);
spin_unlock(&l3->list_lock);
goto done;
must_grow:
spin_unlock(&l3->list_lock);
x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
if (x)
goto retry;
return fallback_alloc(cachep, flags);
done:
return obj;
}
/**
* kmem_cache_alloc_node - Allocate an object on the specified node
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
* @nodeid: node number of the target node.
* @caller: return address of caller, used for debug information
*
* Identical to kmem_cache_alloc but it will allocate memory on the given
* node, which can improve the performance for cpu bound structures.
*
* Fallback to other node is possible if __GFP_THISNODE is not set.
*/
static __always_inline void *
__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
void *caller)
{
unsigned long save_flags;
void *ptr;
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
if (unlikely(nodeid == -1))
nodeid = numa_node_id();
if (unlikely(!cachep->nodelists[nodeid])) {
/* Node not bootstrapped yet */
ptr = fallback_alloc(cachep, flags);
goto out;
}
if (nodeid == numa_node_id()) {
/*
* Use the locally cached objects if possible.
* However ____cache_alloc does not allow fallback
* to other nodes. It may fail while we still have
* objects on other nodes available.
*/
ptr = ____cache_alloc(cachep, flags);
if (ptr)
goto out;
}
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags,
flags);
if (likely(ptr))
kmemcheck_slab_alloc(cachep, flags, ptr, obj_size(cachep));
if (unlikely((flags & __GFP_ZERO) && ptr))
memset(ptr, 0, obj_size(cachep));
return ptr;
}
static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
void *objp;
if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
objp = alternate_node_alloc(cache, flags);
if (objp)
goto out;
}
objp = ____cache_alloc(cache, flags);
/*
* We may just have run out of memory on the local node.
* ____cache_alloc_node() knows how to locate memory on other nodes
*/
if (!objp)
objp = ____cache_alloc_node(cache, flags, numa_node_id());
out:
return objp;
}
#else
static __always_inline void *
__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
return ____cache_alloc(cachep, flags);
}
#endif /* CONFIG_NUMA */
static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;
flags &= gfp_allowed_mask;
lockdep_trace_alloc(flags);
if (slab_should_failslab(cachep, flags))
return NULL;
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
objp = __do_cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
flags);
prefetchw(objp);
if (likely(objp))
kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));
if (unlikely((flags & __GFP_ZERO) && objp))
memset(objp, 0, obj_size(cachep));
return objp;
}
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node)
{
int i;
struct kmem_list3 *l3;
for (i = 0; i < nr_objects; i++) {
void *objp = objpp[i];
struct slab *slabp;
slabp = virt_to_slab(objp);
l3 = cachep->nodelists[node];
list_del(&slabp->list);
check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);
slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep);
l3->free_objects++;
check_slabp(cachep, slabp);
/* fixup slab chains */
if (slabp->inuse == 0) {
if (l3->free_objects > l3->free_limit) {
l3->free_objects -= cachep->num;
/* No need to drop any previously held
* lock here, even if we have a off-slab slab
* descriptor it is guaranteed to come from
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
slab_destroy(cachep, slabp);
} else {
list_add(&slabp->list, &l3->slabs_free);
}
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
list_add_tail(&slabp->list, &l3->slabs_partial);
}
}
}
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
int node = numa_node_id();
batchcount = ac->batchcount;
#if DEBUG
BUG_ON(!batchcount || batchcount > ac->avail);
#endif
check_irq_off();
l3 = cachep->nodelists[node];
spin_lock(&l3->list_lock);
if (l3->shared) {
struct array_cache *shared_array = l3->shared;
int max = shared_array->limit - shared_array->avail;
if (max) {
if (batchcount > max)
batchcount = max;
memcpy(&(shared_array->entry[shared_array->avail]),
ac->entry, sizeof(void *) * batchcount);
shared_array->avail += batchcount;
goto free_done;
}
}
free_block(cachep, ac->entry, batchcount, node);
free_done:
#if STATS
{
int i = 0;
struct list_head *p;
p = l3->slabs_free.next;
while (p != &(l3->slabs_free)) {
struct slab *slabp;
slabp = list_entry(p, struct slab, list);
BUG_ON(slabp->inuse);
i++;
p = p->next;
}
STATS_SET_FREEABLE(cachep, i);
}
#endif
spin_unlock(&l3->list_lock);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}
/*
* Release an obj back to its cache. If the obj has a constructed state, it must
* be in this state _before_ it is released. Called with disabled ints.
*/
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which
* is per page memory reference) to get nodeid. Instead use a global
* variable to skip the call, which is mostly likely to be present in
* the cache.
*/
if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
return;
if (likely(ac->avail < ac->limit)) {
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
return;
} else {
STATS_INC_FREEMISS(cachep);
cache_flusharray(cachep, ac);
ac->entry[ac->avail++] = objp;
}
}
/**
* kmem_cache_alloc - Allocate an object
* @cachep: The cache to allocate from.
* @flags: See kmalloc().
*
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
trace_kmem_cache_alloc(_RET_IP_, ret,
obj_size(cachep), cachep->buffer_size, flags);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
{
return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_notrace);
#endif
/**
* kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
* @cachep: the cache we're checking against
* @ptr: pointer to validate
*
* This verifies that the untrusted pointer looks sane;
* it is _not_ a guarantee that the pointer is actually
* part of the slab cache in question, but it at least
* validates that the pointer can be dereferenced and
* looks half-way sane.
*
* Currently only used for dentry validation.
*/
int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
{
unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = BYTES_PER_WORD - 1;
unsigned long size = cachep->buffer_size;
struct page *page;
if (unlikely(addr < min_addr))
goto out;
if (unlikely(addr > (unsigned long)high_memory - size))
goto out;
if (unlikely(addr & align_mask))
goto out;
if (unlikely(!kern_addr_valid(addr)))
goto out;
if (unlikely(!kern_addr_valid(addr + size - 1)))
goto out;
page = virt_to_page(ptr);
if (unlikely(!PageSlab(page)))
goto out;
if (unlikely(page_get_cache(page) != cachep))
goto out;
return 1;
out:
return 0;
}
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
void *ret = __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
trace_kmem_cache_alloc_node(_RET_IP_, ret,
obj_size(cachep), cachep->buffer_size,
flags, nodeid);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#ifdef CONFIG_KMEMTRACE
void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
gfp_t flags,
int nodeid)
{
return __cache_alloc_node(cachep, flags, nodeid,
__builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
#endif
static __always_inline void *
__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
{
struct kmem_cache *cachep;
void *ret;
cachep = kmem_find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
trace_kmalloc_node((unsigned long) caller, ret,
size, cachep->buffer_size, flags, node);
return ret;
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node,
__builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc_node);
void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
int node, unsigned long caller)
{
return __do_kmalloc_node(size, flags, node, (void *)caller);
}
EXPORT_SYMBOL(__kmalloc_node_track_caller);
#else
void *__kmalloc_node(size_t size, gfp_t flags, int node)
{
return __do_kmalloc_node(size, flags, node, NULL);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif /* CONFIG_DEBUG_SLAB */
#endif /* CONFIG_NUMA */
/**
* __do_kmalloc - allocate memory
* @size: how many bytes of memory are required.
* @flags: the type of memory to allocate (see kmalloc).
* @caller: function caller for debug tracking of the caller
*/
static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
void *caller)
{
struct kmem_cache *cachep;
void *ret;
/* If you want to save a few bytes .text space: replace
* __ with kmem_.
* Then kmalloc uses the uninlined functions instead of the inline
* functions.
*/
cachep = __find_general_cachep(size, flags);
if (unlikely(ZERO_OR_NULL_PTR(cachep)))
return cachep;
ret = __cache_alloc(cachep, flags, caller);
trace_kmalloc((unsigned long) caller, ret,
size, cachep->buffer_size, flags);
return ret;
}
#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE)
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(__kmalloc);
void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
{
return __do_kmalloc(size, flags, (void *)caller);
}
EXPORT_SYMBOL(__kmalloc_track_caller);
#else
void *__kmalloc(size_t size, gfp_t flags)
{
return __do_kmalloc(size, flags, NULL);
}
EXPORT_SYMBOL(__kmalloc);
#endif
/**
* kmem_cache_free - Deallocate an object
* @cachep: The cache the allocation was from.
* @objp: The previously allocated object.
*
* Free an object which was previously allocated from this
* cache.
*/
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
local_irq_save(flags);
debug_check_no_locks_freed(objp, obj_size(cachep));
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);
trace_kmem_cache_free(_RET_IP_, objp);
}
EXPORT_SYMBOL(kmem_cache_free);
/**
* kfree - free previously allocated memory
* @objp: pointer returned by kmalloc.
*
* If @objp is NULL, no operation is performed.
*
* Don't free memory not originally allocated by kmalloc()
* or you will run into trouble.
*/
void kfree(const void *objp)
{
struct kmem_cache *c;
unsigned long flags;
trace_kfree(_RET_IP_, objp);
if (unlikely(ZERO_OR_NULL_PTR(objp)))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
debug_check_no_locks_freed(objp, obj_size(c));
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
EXPORT_SYMBOL(kfree);
unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
return obj_size(cachep);
}
EXPORT_SYMBOL(kmem_cache_size);
const char *kmem_cache_name(struct kmem_cache *cachep)
{
return cachep->name;
}
EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
* This initializes kmem_list3 or resizes various caches for all nodes.
*/
static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
{
int node;
struct kmem_list3 *l3;
struct array_cache *new_shared;
struct array_cache **new_alien = NULL;
for_each_online_node(node) {
if (use_alien_caches) {
new_alien = alloc_alien_cache(node, cachep->limit, gfp);
if (!new_alien)
goto fail;
}
new_shared = NULL;
if (cachep->shared) {
new_shared = alloc_arraycache(node,
cachep->shared*cachep->batchcount,
0xbaadf00d, gfp);
if (!new_shared) {
free_alien_cache(new_alien);
goto fail;
}
}
l3 = cachep->nodelists[node];
if (l3) {
struct array_cache *shared = l3->shared;
spin_lock_irq(&l3->list_lock);
if (shared)
free_block(cachep, shared->entry,
shared->avail, node);
l3->shared = new_shared;
if (!l3->alien) {
l3->alien = new_alien;
new_alien = NULL;
}
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
spin_unlock_irq(&l3->list_lock);
kfree(shared);
free_alien_cache(new_alien);
continue;
}
l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
if (!l3) {
free_alien_cache(new_alien);
kfree(new_shared);
goto fail;
}
kmem_list3_init(l3);
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
l3->shared = new_shared;
l3->alien = new_alien;
l3->free_limit = (1 + nr_cpus_node(node)) *
cachep->batchcount + cachep->num;
cachep->nodelists[node] = l3;
}
return 0;
fail:
if (!cachep->next.next) {
/* Cache is not active yet. Roll back what we did */
node--;
while (node >= 0) {
if (cachep->nodelists[node]) {
l3 = cachep->nodelists[node];
kfree(l3->shared);
free_alien_cache(l3->alien);
kfree(l3);
cachep->nodelists[node] = NULL;
}
node--;
}
}
return -ENOMEM;
}
struct ccupdate_struct {
struct kmem_cache *cachep;
struct array_cache *new[NR_CPUS];
};
static void do_ccupdate_local(void *info)
{
struct ccupdate_struct *new = info;
struct array_cache *old;
check_irq_off();
old = cpu_cache_get(new->cachep);
new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
new->new[smp_processor_id()] = old;
}
/* Always called with the cache_chain_mutex held */
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
int batchcount, int shared, gfp_t gfp)
{
struct ccupdate_struct *new;
int i;
new = kzalloc(sizeof(*new), gfp);
if (!new)
return -ENOMEM;
for_each_online_cpu(i) {
new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
batchcount, gfp);
if (!new->new[i]) {
for (i--; i >= 0; i--)
kfree(new->new[i]);
kfree(new);
return -ENOMEM;
}
}
new->cachep = cachep;
on_each_cpu(do_ccupdate_local, (void *)new, 1);
check_irq_on();
cachep->batchcount = batchcount;
cachep->limit = limit;
cachep->shared = shared;
for_each_online_cpu(i) {
struct array_cache *ccold = new->new[i];
if (!ccold)
continue;
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
kfree(ccold);
}
kfree(new);
return alloc_kmemlist(cachep, gfp);
}
/* Called with cache_chain_mutex held always */
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
{
int err;
int limit, shared;
/*
* The head array serves three purposes:
* - create a LIFO ordering, i.e. return objects that are cache-warm
* - reduce the number of spinlock operations.
* - reduce the number of linked list operations on the slab and
* bufctl chains: array operations are cheaper.
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
*/
if (cachep->buffer_size > 131072)
limit = 1;
else if (cachep->buffer_size > PAGE_SIZE)
limit = 8;
else if (cachep->buffer_size > 1024)
limit = 24;
else if (cachep->buffer_size > 256)
limit = 54;
else
limit = 120;
/*
* CPU bound tasks (e.g. network routing) can exhibit cpu bound
* allocation behaviour: Most allocs on one cpu, most free operations
* on another cpu. For these cases, an efficient object passing between
* cpus is necessary. This is provided by a shared array. The array
* replaces Bonwick's magazine layer.
* On uniprocessor, it's functionally equivalent (but less efficient)
* to a larger limit. Thus disabled by default.
*/
shared = 0;
if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
shared = 8;
#if DEBUG
/*
* With debugging enabled, large batchcount lead to excessively long
* periods with disabled local interrupts. Limit the batchcount
*/
if (limit > 32)
limit = 32;
#endif
err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
if (err)
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
cachep->name, -err);
return err;
}
/*
* Drain an array if it contains any elements taking the l3 lock only if
* necessary. Note that the l3 listlock also protects the array_cache
* if drain_array() is used on the shared array.
*/
void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
struct array_cache *ac, int force, int node)
{
int tofree;
if (!ac || !ac->avail)
return;
if (ac->touched && !force) {
ac->touched = 0;
} else {
spin_lock_irq(&l3->list_lock);
if (ac->avail) {
tofree = force ? ac->avail : (ac->limit + 4) / 5;
if (tofree > ac->avail)
tofree = (ac->avail + 1) / 2;
free_block(cachep, ac->entry, tofree, node);
ac->avail -= tofree;
memmove(ac->entry, &(ac->entry[tofree]),
sizeof(void *) * ac->avail);
}
spin_unlock_irq(&l3->list_lock);
}
}
/**
* cache_reap - Reclaim memory from caches.
* @w: work descriptor
*
* Called from workqueue/eventd every few seconds.
* Purpose:
* - clear the per-cpu caches for this CPU.
* - return freeable pages to the main free memory pool.
*
* If we cannot acquire the cache chain mutex then just give up - we'll try
* again on the next iteration.
*/
static void cache_reap(struct work_struct *w)
{
struct kmem_cache *searchp;
struct kmem_list3 *l3;
int node = numa_node_id();
struct delayed_work *work = to_delayed_work(w);
if (!mutex_trylock(&cache_chain_mutex))
/* Give up. Setup the next iteration. */
goto out;
list_for_each_entry(searchp, &cache_chain, next) {
check_irq_on();
/*
* We only take the l3 lock if absolutely necessary and we
* have established with reasonable certainty that
* we can do some work if the lock was obtained.
*/
l3 = searchp->nodelists[node];
reap_alien(searchp, l3);
drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
/*
* These are racy checks but it does not matter
* if we skip one check or scan twice.
*/
if (time_after(l3->next_reap, jiffies))
goto next;
l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
drain_array(searchp, l3, l3->shared, 0, node);
if (l3->free_touched)
l3->free_touched = 0;
else {
int freed;
freed = drain_freelist(searchp, l3, (l3->free_limit +
5 * searchp->num - 1) / (5 * searchp->num));
STATS_ADD_REAPED(searchp, freed);
}
next:
cond_resched();
}
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
out:
/* Set up the next iteration */
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
}
#ifdef CONFIG_SLABINFO
static void print_slabinfo_header(struct seq_file *m)
{
/*
* Output format version, so at least we can change it
* without _too_ many complaints.
*/
#if STATS
seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
#else
seq_puts(m, "slabinfo - version: 2.1\n");
#endif
seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
"<objperslab> <pagesperslab>");
seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
#if STATS
seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
"<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
#endif
seq_putc(m, '\n');
}
static void *s_start(struct seq_file *m, loff_t *pos)
{
loff_t n = *pos;
mutex_lock(&cache_chain_mutex);
if (!n)
print_slabinfo_header(m);
return seq_list_start(&cache_chain, *pos);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
return seq_list_next(p, &cache_chain, pos);
}
static void s_stop(struct seq_file *m, void *p)
{
mutex_unlock(&cache_chain_mutex);
}
static int s_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
struct slab *slabp;
unsigned long active_objs;
unsigned long num_objs;
unsigned long active_slabs = 0;
unsigned long num_slabs, free_objects = 0, shared_avail = 0;
const char *name;
char *error = NULL;
int node;
struct kmem_list3 *l3;
active_objs = 0;
num_slabs = 0;
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
check_irq_on();
spin_lock_irq(&l3->list_lock);
list_for_each_entry(slabp, &l3->slabs_full, list) {
if (slabp->inuse != cachep->num && !error)
error = "slabs_full accounting error";
active_objs += cachep->num;
active_slabs++;
}
list_for_each_entry(slabp, &l3->slabs_partial, list) {
if (slabp->inuse == cachep->num && !error)
error = "slabs_partial inuse accounting error";
if (!slabp->inuse && !error)
error = "slabs_partial/inuse accounting error";
active_objs += slabp->inuse;
active_slabs++;
}
list_for_each_entry(slabp, &l3->slabs_free, list) {
if (slabp->inuse && !error)
error = "slabs_free/inuse accounting error";
num_slabs++;
}
free_objects += l3->free_objects;
if (l3->shared)
shared_avail += l3->shared->avail;
spin_unlock_irq(&l3->list_lock);
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
if (num_objs - active_objs != free_objects && !error)
error = "free_objects accounting error";
name = cachep->name;
if (error)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
name, active_objs, num_objs, cachep->buffer_size,
cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
cachep->limit, cachep->batchcount, cachep->shared);
seq_printf(m, " : slabdata %6lu %6lu %6lu",
active_slabs, num_slabs, shared_avail);
#if STATS
{ /* list3 stats */
unsigned long high = cachep->high_mark;
unsigned long allocs = cachep->num_allocations;
unsigned long grown = cachep->grown;
unsigned long reaped = cachep->reaped;
unsigned long errors = cachep->errors;
unsigned long max_freeable = cachep->max_freeable;
unsigned long node_allocs = cachep->node_allocs;
unsigned long node_frees = cachep->node_frees;
unsigned long overflows = cachep->node_overflow;
seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
reaped, errors, max_freeable, node_allocs,
node_frees, overflows);
}
/* cpu stats */
{
unsigned long allochit = atomic_read(&cachep->allochit);
unsigned long allocmiss = atomic_read(&cachep->allocmiss);
unsigned long freehit = atomic_read(&cachep->freehit);
unsigned long freemiss = atomic_read(&cachep->freemiss);
seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
allochit, allocmiss, freehit, freemiss);
}
#endif
seq_putc(m, '\n');
return 0;
}
/*
* slabinfo_op - iterator that generates /proc/slabinfo
*
* Output layout:
* cache-name
* num-active-objs
* total-objs
* object size
* num-active-slabs
* total-slabs
* num-pages-per-slab
* + further values on SMP and with statistics enabled
*/
static const struct seq_operations slabinfo_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
#define MAX_SLABINFO_WRITE 128
/**
* slabinfo_write - Tuning for the slab allocator
* @file: unused
* @buffer: user buffer
* @count: data length
* @ppos: unused
*/
ssize_t slabinfo_write(struct file *file, const char __user * buffer,
size_t count, loff_t *ppos)
{
char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
int limit, batchcount, shared, res;
struct kmem_cache *cachep;
if (count > MAX_SLABINFO_WRITE)
return -EINVAL;
if (copy_from_user(&kbuf, buffer, count))
return -EFAULT;
kbuf[MAX_SLABINFO_WRITE] = '\0';
tmp = strchr(kbuf, ' ');
if (!tmp)
return -EINVAL;
*tmp = '\0';
tmp++;
if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
return -EINVAL;
/* Find the cache in the chain of caches. */
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
list_for_each_entry(cachep, &cache_chain, next) {
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 || batchcount < 1 ||
batchcount > limit || shared < 0) {
res = 0;
} else {
res = do_tune_cpucache(cachep, limit,
batchcount, shared,
GFP_KERNEL);
}
break;
}
}
mutex_unlock(&cache_chain_mutex);
if (res >= 0)
res = count;
return res;
}
static int slabinfo_open(struct inode *inode, struct file *file)
{
return seq_open(file, &slabinfo_op);
}
static const struct file_operations proc_slabinfo_operations = {
.open = slabinfo_open,
.read = seq_read,
.write = slabinfo_write,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_DEBUG_SLAB_LEAK
static void *leaks_start(struct seq_file *m, loff_t *pos)
{
mutex_lock(&cache_chain_mutex);
return seq_list_start(&cache_chain, *pos);
}
static inline int add_caller(unsigned long *n, unsigned long v)
{
unsigned long *p;
int l;
if (!v)
return 1;
l = n[1];
p = n + 2;
while (l) {
int i = l/2;
unsigned long *q = p + 2 * i;
if (*q == v) {
q[1]++;
return 1;
}
if (*q > v) {
l = i;
} else {
p = q + 2;
l -= i + 1;
}
}
if (++n[1] == n[0])
return 0;
memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
p[0] = v;
p[1] = 1;
return 1;
}
static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
{
void *p;
int i;
if (n[0] == n[1])
return;
for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
continue;
if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
return;
}
}
static void show_symbol(struct seq_file *m, unsigned long address)
{
#ifdef CONFIG_KALLSYMS
unsigned long offset, size;
char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
if (modname[0])
seq_printf(m, " [%s]", modname);
return;
}
#endif
seq_printf(m, "%p", (void *)address);
}
static int leaks_show(struct seq_file *m, void *p)
{
struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
struct slab *slabp;
struct kmem_list3 *l3;
const char *name;
unsigned long *n = m->private;
int node;
int i;
if (!(cachep->flags & SLAB_STORE_USER))
return 0;
if (!(cachep->flags & SLAB_RED_ZONE))
return 0;
/* OK, we can do it */
n[1] = 0;
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (!l3)
continue;
check_irq_on();
spin_lock_irq(&l3->list_lock);
list_for_each_entry(slabp, &l3->slabs_full, list)
handle_slab(n, cachep, slabp);
list_for_each_entry(slabp, &l3->slabs_partial, list)
handle_slab(n, cachep, slabp);
spin_unlock_irq(&l3->list_lock);
}
name = cachep->name;
if (n[0] == n[1]) {
/* Increase the buffer size */
mutex_unlock(&cache_chain_mutex);
m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
if (!m->private) {
/* Too bad, we are really out */
m->private = n;
mutex_lock(&cache_chain_mutex);
return -ENOMEM;
}
*(unsigned long *)m->private = n[0] * 2;
kfree(n);
mutex_lock(&cache_chain_mutex);
/* Now make sure this entry will be retried */
m->count = m->size;
return 0;
}
for (i = 0; i < n[1]; i++) {
seq_printf(m, "%s: %lu ", name, n[2*i+3]);
show_symbol(m, n[2*i+2]);
seq_putc(m, '\n');
}
return 0;
}
static const struct seq_operations slabstats_op = {
.start = leaks_start,
.next = s_next,
.stop = s_stop,
.show = leaks_show,
};
static int slabstats_open(struct inode *inode, struct file *file)
{
unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
int ret = -ENOMEM;
if (n) {
ret = seq_open(file, &slabstats_op);
if (!ret) {
struct seq_file *m = file->private_data;
*n = PAGE_SIZE / (2 * sizeof(unsigned long));
m->private = n;
n = NULL;
}
kfree(n);
}
return ret;
}
static const struct file_operations proc_slabstats_operations = {
.open = slabstats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
#endif
static int __init slab_proc_init(void)
{
proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
#ifdef CONFIG_DEBUG_SLAB_LEAK
proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
#endif
return 0;
}
module_init(slab_proc_init);
#endif
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
*
* kmalloc may internally round up allocations and return more memory
* than requested. ksize() can be used to determine the actual amount of
* memory allocated. The caller may use this additional memory, even though
* a smaller amount of memory was initially specified with the kmalloc call.
* The caller must guarantee that objp points to a valid object previously
* allocated with either kmalloc() or kmem_cache_alloc(). The object
* must not be freed during the duration of the call.
*/
size_t ksize(const void *objp)
{
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
return obj_size(virt_to_cache(objp));
}
EXPORT_SYMBOL(ksize);
|
gpl-2.0
|
weizhenwei/fastsocket
|
kernel/arch/mips/mti-malta/malta-init.c
|
507
|
9997
|
/*
* Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* PROM library initialisation code.
*/
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <asm/gt64120.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
#include <asm/gcmpregs.h>
#include <asm/mips-boards/prom.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/bonito64.h>
#include <asm/mips-boards/msc01_pci.h>
#include <asm/mips-boards/malta.h>
int prom_argc;
int *_prom_argv, *_prom_envp;
/*
* YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
* This macro take care of sign extension, if running in 64-bit mode.
*/
#define prom_envp(index) ((char *)(long)_prom_envp[(index)])
int init_debug;
static int mips_revision_corid;
int mips_revision_sconid;
/* Bonito64 system controller register base. */
unsigned long _pcictrl_bonito;
unsigned long _pcictrl_bonito_pcicfg;
/* GT64120 system controller register base */
unsigned long _pcictrl_gt64120;
/* MIPS System controller register base */
unsigned long _pcictrl_msc;
char *prom_getenv(char *envname)
{
/*
* Return a pointer to the given environment variable.
* In 64-bit mode: we're using 64-bit pointers, but all pointers
* in the PROM structures are only 32-bit, so we need some
* workarounds, if we are running in 64-bit mode.
*/
int i, index=0;
i = strlen(envname);
while (prom_envp(index)) {
if(strncmp(envname, prom_envp(index), i) == 0) {
return(prom_envp(index+1));
}
index += 2;
}
return NULL;
}
static inline unsigned char str2hexnum(unsigned char c)
{
if (c >= '0' && c <= '9')
return c - '0';
if (c >= 'a' && c <= 'f')
return c - 'a' + 10;
return 0; /* foo */
}
static inline void str2eaddr(unsigned char *ea, unsigned char *str)
{
int i;
for (i = 0; i < 6; i++) {
unsigned char num;
if((*str == '.') || (*str == ':'))
str++;
num = str2hexnum(*str++) << 4;
num |= (str2hexnum(*str++));
ea[i] = num;
}
}
int get_ethernet_addr(char *ethernet_addr)
{
char *ethaddr_str;
ethaddr_str = prom_getenv("ethaddr");
if (!ethaddr_str) {
printk("ethaddr not set in boot prom\n");
return -1;
}
str2eaddr(ethernet_addr, ethaddr_str);
if (init_debug > 1) {
int i;
printk("get_ethernet_addr: ");
for (i=0; i<5; i++)
printk("%02x:", (unsigned char)*(ethernet_addr+i));
printk("%02x\n", *(ethernet_addr+i));
}
return 0;
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
static void __init console_config(void)
{
char console_string[40];
int baud = 0;
char parity = '\0', bits = '\0', flow = '\0';
char *s;
if ((strstr(prom_getcmdline(), "console=")) == NULL) {
s = prom_getenv("modetty0");
if (s) {
while (*s >= '0' && *s <= '9')
baud = baud*10 + *s++ - '0';
if (*s == ',') s++;
if (*s) parity = *s++;
if (*s == ',') s++;
if (*s) bits = *s++;
if (*s == ',') s++;
if (*s == 'h') flow = 'r';
}
if (baud == 0)
baud = 38400;
if (parity != 'n' && parity != 'o' && parity != 'e')
parity = 'n';
if (bits != '7' && bits != '8')
bits = '8';
if (flow == '\0')
flow = 'r';
sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow);
strcat(prom_getcmdline(), console_string);
pr_info("Config serial console:%s\n", console_string);
}
}
#endif
static void __init mips_nmi_setup(void)
{
void *base;
extern char except_vec_nmi;
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa80) :
(void *)(CAC_BASE + 0x380);
memcpy(base, &except_vec_nmi, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
static void __init mips_ejtag_setup(void)
{
void *base;
extern char except_vec_ejtag_debug;
base = cpu_has_veic ?
(void *)(CAC_BASE + 0xa00) :
(void *)(CAC_BASE + 0x300);
memcpy(base, &except_vec_ejtag_debug, 0x80);
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void)
{
int result;
prom_argc = fw_arg0;
_prom_argv = (int *) fw_arg1;
_prom_envp = (int *) fw_arg2;
mips_display_message("LINUX");
/*
* early setup of _pcictrl_bonito so that we can determine
* the system controller on a CORE_EMUL board
*/
_pcictrl_bonito = (unsigned long)ioremap(BONITO_REG_BASE, BONITO_REG_SIZE);
mips_revision_corid = MIPS_REVISION_CORID;
if (mips_revision_corid == MIPS_REVISION_CORID_CORE_EMUL) {
if (BONITO_PCIDID == 0x0001df53 ||
BONITO_PCIDID == 0x0003df53)
mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_BON;
else
mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
}
mips_revision_sconid = MIPS_REVISION_SCONID;
if (mips_revision_sconid == MIPS_REVISION_SCON_OTHER) {
switch (mips_revision_corid) {
case MIPS_REVISION_CORID_QED_RM5261:
case MIPS_REVISION_CORID_CORE_LV:
case MIPS_REVISION_CORID_CORE_FPGA:
case MIPS_REVISION_CORID_CORE_FPGAR2:
mips_revision_sconid = MIPS_REVISION_SCON_GT64120;
break;
case MIPS_REVISION_CORID_CORE_EMUL_BON:
case MIPS_REVISION_CORID_BONITO64:
case MIPS_REVISION_CORID_CORE_20K:
mips_revision_sconid = MIPS_REVISION_SCON_BONITO;
break;
case MIPS_REVISION_CORID_CORE_MSC:
case MIPS_REVISION_CORID_CORE_FPGA2:
case MIPS_REVISION_CORID_CORE_24K:
/*
* SOCit/ROCit support is essentially identical
* but make an attempt to distinguish them
*/
mips_revision_sconid = MIPS_REVISION_SCON_SOCIT;
break;
case MIPS_REVISION_CORID_CORE_FPGA3:
case MIPS_REVISION_CORID_CORE_FPGA4:
case MIPS_REVISION_CORID_CORE_FPGA5:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
default:
/* See above */
mips_revision_sconid = MIPS_REVISION_SCON_ROCIT;
break;
}
}
switch (mips_revision_sconid) {
u32 start, map, mask, data;
case MIPS_REVISION_SCON_GT64120:
/*
* Setup the North bridge to do Master byte-lane swapping
* when running in bigendian.
*/
_pcictrl_gt64120 = (unsigned long)ioremap(MIPS_GT_BASE, 0x2000);
#ifdef CONFIG_CPU_LITTLE_ENDIAN
GT_WRITE(GT_PCI0_CMD_OFS, GT_PCI0_CMD_MBYTESWAP_BIT |
GT_PCI0_CMD_SBYTESWAP_BIT);
#else
GT_WRITE(GT_PCI0_CMD_OFS, 0);
#endif
/* Fix up PCI I/O mapping if necessary (for Atlas). */
start = GT_READ(GT_PCI0IOLD_OFS);
map = GT_READ(GT_PCI0IOREMAP_OFS);
if ((start & map) != 0) {
map &= ~start;
GT_WRITE(GT_PCI0IOREMAP_OFS, map);
}
set_io_port_base(MALTA_GT_PORT_BASE);
break;
case MIPS_REVISION_SCON_BONITO:
_pcictrl_bonito_pcicfg = (unsigned long)ioremap(BONITO_PCICFG_BASE, BONITO_PCICFG_SIZE);
/*
* Disable Bonito IOBC.
*/
BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG &
~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED |
BONITO_PCIMEMBASECFG_MEMBASE1_CACHED);
/*
* Setup the North bridge to do Master byte-lane swapping
* when running in bigendian.
*/
#ifdef CONFIG_CPU_LITTLE_ENDIAN
BONITO_BONGENCFG = BONITO_BONGENCFG &
~(BONITO_BONGENCFG_MSTRBYTESWAP |
BONITO_BONGENCFG_BYTESWAP);
#else
BONITO_BONGENCFG = BONITO_BONGENCFG |
BONITO_BONGENCFG_MSTRBYTESWAP |
BONITO_BONGENCFG_BYTESWAP;
#endif
set_io_port_base(MALTA_BONITO_PORT_BASE);
break;
case MIPS_REVISION_SCON_SOCIT:
case MIPS_REVISION_SCON_ROCIT:
_pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000);
mips_pci_controller:
mb();
MSC_READ(MSC01_PCI_CFG, data);
MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT);
wmb();
/* Fix up lane swapping. */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
MSC_WRITE(MSC01_PCI_SWAP, MSC01_PCI_SWAP_NOSWAP);
#else
MSC_WRITE(MSC01_PCI_SWAP,
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_IO_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
#endif
/* Fix up target memory mapping. */
MSC_READ(MSC01_PCI_BAR0, mask);
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
MSC01_PCI_CFG_MAXRTRY_MSK)
data = (data & ~(MSC01_PCI_CFG_MAXRTRY_MSK <<
MSC01_PCI_CFG_MAXRTRY_SHF)) |
((MSC01_PCI_CFG_MAXRTRY_MSK - 1) <<
MSC01_PCI_CFG_MAXRTRY_SHF);
wmb();
MSC_WRITE(MSC01_PCI_CFG, data);
mb();
set_io_port_base(MALTA_MSC_PORT_BASE);
break;
case MIPS_REVISION_SCON_SOCITSC:
case MIPS_REVISION_SCON_SOCITSCP:
_pcictrl_msc = (unsigned long)ioremap(MIPS_SOCITSC_PCI_REG_BASE, 0x2000);
goto mips_pci_controller;
default:
/* Unknown system controller */
mips_display_message("SC Error");
while (1); /* We die here... */
}
board_nmi_handler_setup = mips_nmi_setup;
board_ejtag_handler_setup = mips_ejtag_setup;
pr_info("\nLINUX started...\n");
prom_init_cmdline();
prom_meminit();
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
/* Early detection of CMP support */
result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
#ifdef CONFIG_MIPS_CMP
if (result)
register_smp_ops(&cmp_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMP
#ifdef CONFIG_MIPS_CMP
if (!result)
register_smp_ops(&vsmp_smp_ops);
#else
register_smp_ops(&vsmp_smp_ops);
#endif
#endif
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
}
|
gpl-2.0
|
kerneldevs/RM-35-KERNEL-UNIVA
|
drivers/net/wireless/ath/ar9170/main.c
|
763
|
53493
|
/*
* Atheros AR9170 driver
*
* mac80211 interaction code
*
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2009, Christian Lamparter <chunkeey@web.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, see
* http://www.gnu.org/licenses/.
*
* This file incorporates work covered by the following copyright and
* permission notice:
* Copyright (c) 2007-2008 Atheros Communications, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "ar9170.h"
#include "hw.h"
#include "cmd.h"
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
.bitrate = (_bitrate), \
.flags = (_flags), \
.hw_value = (_hw_rate) | (_txpidx) << 4, \
}
static struct ieee80211_rate __ar9170_ratetable[] = {
RATE(10, 0, 0, 0),
RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(60, 0xb, 0, 0),
RATE(90, 0xf, 0, 0),
RATE(120, 0xa, 0, 0),
RATE(180, 0xe, 0, 0),
RATE(240, 0x9, 0, 0),
RATE(360, 0xd, 1, 0),
RATE(480, 0x8, 2, 0),
RATE(540, 0xc, 3, 0),
};
#undef RATE
#define ar9170_g_ratetable (__ar9170_ratetable + 0)
#define ar9170_g_ratetable_size 12
#define ar9170_a_ratetable (__ar9170_ratetable + 4)
#define ar9170_a_ratetable_size 8
/*
* NB: The hw_value is used as an index into the ar9170_phy_freq_params
* array in phy.c so that we don't have to do frequency lookups!
*/
#define CHAN(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 18, /* XXX */ \
}
static struct ieee80211_channel ar9170_2ghz_chantable[] = {
CHAN(2412, 0),
CHAN(2417, 1),
CHAN(2422, 2),
CHAN(2427, 3),
CHAN(2432, 4),
CHAN(2437, 5),
CHAN(2442, 6),
CHAN(2447, 7),
CHAN(2452, 8),
CHAN(2457, 9),
CHAN(2462, 10),
CHAN(2467, 11),
CHAN(2472, 12),
CHAN(2484, 13),
};
static struct ieee80211_channel ar9170_5ghz_chantable[] = {
CHAN(4920, 14),
CHAN(4940, 15),
CHAN(4960, 16),
CHAN(4980, 17),
CHAN(5040, 18),
CHAN(5060, 19),
CHAN(5080, 20),
CHAN(5180, 21),
CHAN(5200, 22),
CHAN(5220, 23),
CHAN(5240, 24),
CHAN(5260, 25),
CHAN(5280, 26),
CHAN(5300, 27),
CHAN(5320, 28),
CHAN(5500, 29),
CHAN(5520, 30),
CHAN(5540, 31),
CHAN(5560, 32),
CHAN(5580, 33),
CHAN(5600, 34),
CHAN(5620, 35),
CHAN(5640, 36),
CHAN(5660, 37),
CHAN(5680, 38),
CHAN(5700, 39),
CHAN(5745, 40),
CHAN(5765, 41),
CHAN(5785, 42),
CHAN(5805, 43),
CHAN(5825, 44),
CHAN(5170, 45),
CHAN(5190, 46),
CHAN(5210, 47),
CHAN(5230, 48),
};
#undef CHAN
#define AR9170_HT_CAP \
{ \
.ht_supported = true, \
.cap = IEEE80211_HT_CAP_MAX_AMSDU | \
IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
IEEE80211_HT_CAP_SGI_40 | \
IEEE80211_HT_CAP_GRN_FLD | \
IEEE80211_HT_CAP_DSSSCCK40 | \
IEEE80211_HT_CAP_SM_PS, \
.ampdu_factor = 3, \
.ampdu_density = 6, \
.mcs = { \
.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
.rx_highest = cpu_to_le16(300), \
.tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
}, \
}
static struct ieee80211_supported_band ar9170_band_2GHz = {
.channels = ar9170_2ghz_chantable,
.n_channels = ARRAY_SIZE(ar9170_2ghz_chantable),
.bitrates = ar9170_g_ratetable,
.n_bitrates = ar9170_g_ratetable_size,
.ht_cap = AR9170_HT_CAP,
};
static struct ieee80211_supported_band ar9170_band_5GHz = {
.channels = ar9170_5ghz_chantable,
.n_channels = ARRAY_SIZE(ar9170_5ghz_chantable),
.bitrates = ar9170_a_ratetable,
.n_bitrates = ar9170_a_ratetable_size,
.ht_cap = AR9170_HT_CAP,
};
static void ar9170_tx(struct ar9170 *ar);
static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
{
return le16_to_cpu(hdr->seq_ctrl) >> 4;
}
static inline u16 ar9170_get_seq(struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
return ar9170_get_seq_h((void *) txc->frame_data);
}
#ifdef AR9170_QUEUE_DEBUG
static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
"mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
jiffies_to_msecs(arinfo->timeout - jiffies));
}
static void __ar9170_dump_txqueue(struct ar9170 *ar,
struct sk_buff_head *queue)
{
struct sk_buff *skb;
int i = 0;
printk(KERN_DEBUG "---[ cut here ]---\n");
printk(KERN_DEBUG "%s: %d entries in queue.\n",
wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
skb_queue_walk(queue, skb) {
printk(KERN_DEBUG "index:%d =>\n", i++);
ar9170_print_txheader(ar, skb);
}
if (i != skb_queue_len(queue))
printk(KERN_DEBUG "WARNING: queue frame counter "
"mismatch %d != %d\n", skb_queue_len(queue), i);
printk(KERN_DEBUG "---[ end ]---\n");
}
#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_DEBUG
static void ar9170_dump_txqueue(struct ar9170 *ar,
struct sk_buff_head *queue)
{
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
__ar9170_dump_txqueue(ar, queue);
spin_unlock_irqrestore(&queue->lock, flags);
}
#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_STOP_DEBUG
static void __ar9170_dump_txstats(struct ar9170 *ar)
{
int i;
printk(KERN_DEBUG "%s: QoS queue stats\n",
wiphy_name(ar->hw->wiphy));
for (i = 0; i < __AR9170_NUM_TXQ; i++)
printk(KERN_DEBUG "%s: queue:%d limit:%d len:%d waitack:%d "
" stopped:%d\n", wiphy_name(ar->hw->wiphy), i,
ar->tx_stats[i].limit, ar->tx_stats[i].len,
skb_queue_len(&ar->tx_status[i]),
ieee80211_queue_stopped(ar->hw, i));
}
#endif /* AR9170_QUEUE_STOP_DEBUG */
/* caller must guarantee exclusive access for _bin_ queue. */
static void ar9170_recycle_expired(struct ar9170 *ar,
struct sk_buff_head *queue,
struct sk_buff_head *bin)
{
struct sk_buff *skb, *old = NULL;
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
while ((skb = skb_peek(queue))) {
struct ieee80211_tx_info *txinfo;
struct ar9170_tx_info *arinfo;
txinfo = IEEE80211_SKB_CB(skb);
arinfo = (void *) txinfo->rate_driver_data;
if (time_is_before_jiffies(arinfo->timeout)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
"recycle\n", wiphy_name(ar->hw->wiphy),
jiffies, arinfo->timeout);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
__skb_unlink(skb, queue);
__skb_queue_tail(bin, skb);
} else {
break;
}
if (unlikely(old == skb)) {
/* bail out - queue is shot. */
WARN_ON(1);
break;
}
old = skb;
}
spin_unlock_irqrestore(&queue->lock, flags);
}
static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
u16 tx_status)
{
struct ieee80211_tx_info *txinfo;
unsigned int retries = 0;
txinfo = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(txinfo);
switch (tx_status) {
case AR9170_TX_STATUS_RETRY:
retries = 2;
case AR9170_TX_STATUS_COMPLETE:
txinfo->flags |= IEEE80211_TX_STAT_ACK;
break;
case AR9170_TX_STATUS_FAILED:
retries = ar->hw->conf.long_frame_max_tx_count;
break;
default:
printk(KERN_ERR "%s: invalid tx_status response (%x).\n",
wiphy_name(ar->hw->wiphy), tx_status);
break;
}
txinfo->status.rates[0].count = retries + 1;
skb_pull(skb, sizeof(struct ar9170_tx_control));
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ar9170_tx_info *arinfo = (void *) info->rate_driver_data;
unsigned int queue = skb_get_queue_mapping(skb);
unsigned long flags;
spin_lock_irqsave(&ar->tx_stats_lock, flags);
ar->tx_stats[queue].len--;
if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), queue);
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_wake_queue(ar->hw, queue);
}
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
skb_queue_tail(&ar->tx_status[queue], skb);
}
if (!ar->tx_stats[queue].len &&
!skb_queue_empty(&ar->tx_pending[queue])) {
ar9170_tx(ar);
}
}
static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
const u8 *mac,
struct sk_buff_head *queue,
const u32 rate)
{
unsigned long flags;
struct sk_buff *skb;
/*
* Unfortunately, the firmware does not tell to which (queued) frame
* this transmission status report belongs to.
*
* So we have to make risky guesses - with the scarce information
* the firmware provided (-> destination MAC, and phy_control) -
* and hope that we picked the right one...
*/
spin_lock_irqsave(&queue->lock, flags);
skb_queue_walk(queue, skb) {
struct ar9170_tx_control *txc = (void *) skb->data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
u32 r;
if (mac && compare_ether_addr(ieee80211_get_DA(hdr), mac)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: skip frame => DA %pM != %pM\n",
wiphy_name(ar->hw->wiphy), mac,
ieee80211_get_DA(hdr));
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
continue;
}
r = (le32_to_cpu(txc->phy_control) & AR9170_TX_PHY_MCS_MASK) >>
AR9170_TX_PHY_MCS_SHIFT;
if ((rate != AR9170_TX_INVALID_RATE) && (r != rate)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: skip frame => rate %d != %d\n",
wiphy_name(ar->hw->wiphy), rate, r);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
continue;
}
__skb_unlink(skb, queue);
spin_unlock_irqrestore(&queue->lock, flags);
return skb;
}
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_ERR "%s: ESS:[%pM] does not have any "
"outstanding frames in queue.\n",
wiphy_name(ar->hw->wiphy), mac);
__ar9170_dump_txqueue(ar, queue);
#endif /* AR9170_QUEUE_DEBUG */
spin_unlock_irqrestore(&queue->lock, flags);
return NULL;
}
/*
* This worker tries to keeps an maintain tx_status queues.
* So we can guarantee that incoming tx_status reports are
* actually for a pending frame.
*/
static void ar9170_tx_janitor(struct work_struct *work)
{
struct ar9170 *ar = container_of(work, struct ar9170,
tx_janitor.work);
struct sk_buff_head waste;
unsigned int i;
bool resched = false;
if (unlikely(!IS_STARTED(ar)))
return ;
skb_queue_head_init(&waste);
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: garbage collector scans queue:%d\n",
wiphy_name(ar->hw->wiphy), i);
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
#endif /* AR9170_QUEUE_DEBUG */
ar9170_recycle_expired(ar, &ar->tx_status[i], &waste);
ar9170_recycle_expired(ar, &ar->tx_pending[i], &waste);
skb_queue_purge(&waste);
if (!skb_queue_empty(&ar->tx_status[i]) ||
!skb_queue_empty(&ar->tx_pending[i]))
resched = true;
}
if (!resched)
return;
ieee80211_queue_delayed_work(ar->hw,
&ar->tx_janitor,
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
{
struct ar9170_cmd_response *cmd = (void *) buf;
if ((cmd->type & 0xc0) != 0xc0) {
ar->callback_cmd(ar, len, buf);
return;
}
/* hardware event handlers */
switch (cmd->type) {
case 0xc1: {
/*
* TX status notification:
* bytes: 0c c1 XX YY M1 M2 M3 M4 M5 M6 R4 R3 R2 R1 S2 S1
*
* XX always 81
* YY always 00
* M1-M6 is the MAC address
* R1-R4 is the transmit rate
* S1-S2 is the transmit status
*/
struct sk_buff *skb;
u32 phy = le32_to_cpu(cmd->tx_status.rate);
u32 q = (phy & AR9170_TX_PHY_QOS_MASK) >>
AR9170_TX_PHY_QOS_SHIFT;
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: recv tx_status for %pM, p:%08x, q:%d\n",
wiphy_name(ar->hw->wiphy), cmd->tx_status.dst, phy, q);
#endif /* AR9170_QUEUE_DEBUG */
skb = ar9170_get_queued_skb(ar, cmd->tx_status.dst,
&ar->tx_status[q],
AR9170_TX_INVALID_RATE);
if (unlikely(!skb))
return ;
ar9170_tx_status(ar, skb, le16_to_cpu(cmd->tx_status.status));
break;
}
case 0xc0:
/*
* pre-TBTT event
*/
if (ar->vif && ar->vif->type == NL80211_IFTYPE_AP)
ieee80211_queue_work(ar->hw, &ar->beacon_work);
break;
case 0xc2:
/*
* (IBSS) beacon send notification
* bytes: 04 c2 XX YY B4 B3 B2 B1
*
* XX always 80
* YY always 00
* B1-B4 "should" be the number of send out beacons.
*/
break;
case 0xc3:
/* End of Atim Window */
break;
case 0xc4:
/* BlockACK bitmap */
break;
case 0xc5:
/* BlockACK events */
break;
case 0xc6:
/* Watchdog Interrupt */
break;
case 0xc9:
/* retransmission issue / SIFS/EIFS collision ?! */
break;
/* firmware debug */
case 0xca:
printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
(char *)buf + 4);
break;
case 0xcb:
len -= 4;
switch (len) {
case 1:
printk(KERN_DEBUG "ar9170 FW: u8: %#.2x\n",
*((char *)buf + 4));
break;
case 2:
printk(KERN_DEBUG "ar9170 FW: u8: %#.4x\n",
le16_to_cpup((__le16 *)((char *)buf + 4)));
break;
case 4:
printk(KERN_DEBUG "ar9170 FW: u8: %#.8x\n",
le32_to_cpup((__le32 *)((char *)buf + 4)));
break;
case 8:
printk(KERN_DEBUG "ar9170 FW: u8: %#.16lx\n",
(unsigned long)le64_to_cpup(
(__le64 *)((char *)buf + 4)));
break;
}
break;
case 0xcc:
print_hex_dump_bytes("ar9170 FW:", DUMP_PREFIX_NONE,
(char *)buf + 4, len - 4);
break;
default:
printk(KERN_INFO "received unhandled event %x\n", cmd->type);
print_hex_dump_bytes("dump:", DUMP_PREFIX_NONE, buf, len);
break;
}
}
static void ar9170_rx_reset_rx_mpdu(struct ar9170 *ar)
{
memset(&ar->rx_mpdu.plcp, 0, sizeof(struct ar9170_rx_head));
ar->rx_mpdu.has_plcp = false;
}
int ar9170_nag_limiter(struct ar9170 *ar)
{
bool print_message;
/*
* we expect all sorts of errors in promiscuous mode.
* don't bother with it, it's OK!
*/
if (ar->sniffer_enabled)
return false;
/*
* only go for frequent errors! The hardware tends to
* do some stupid thing once in a while under load, in
* noisy environments or just for fun!
*/
if (time_before(jiffies, ar->bad_hw_nagger) && net_ratelimit())
print_message = true;
else
print_message = false;
/* reset threshold for "once in a while" */
ar->bad_hw_nagger = jiffies + HZ / 4;
return print_message;
}
static int ar9170_rx_mac_status(struct ar9170 *ar,
struct ar9170_rx_head *head,
struct ar9170_rx_macstatus *mac,
struct ieee80211_rx_status *status)
{
u8 error, decrypt;
BUILD_BUG_ON(sizeof(struct ar9170_rx_head) != 12);
BUILD_BUG_ON(sizeof(struct ar9170_rx_macstatus) != 4);
error = mac->error;
if (error & AR9170_RX_ERROR_MMIC) {
status->flag |= RX_FLAG_MMIC_ERROR;
error &= ~AR9170_RX_ERROR_MMIC;
}
if (error & AR9170_RX_ERROR_PLCP) {
status->flag |= RX_FLAG_FAILED_PLCP_CRC;
error &= ~AR9170_RX_ERROR_PLCP;
if (!(ar->filter_state & FIF_PLCPFAIL))
return -EINVAL;
}
if (error & AR9170_RX_ERROR_FCS) {
status->flag |= RX_FLAG_FAILED_FCS_CRC;
error &= ~AR9170_RX_ERROR_FCS;
if (!(ar->filter_state & FIF_FCSFAIL))
return -EINVAL;
}
decrypt = ar9170_get_decrypt_type(mac);
if (!(decrypt & AR9170_RX_ENC_SOFTWARE) &&
decrypt != AR9170_ENC_ALG_NONE)
status->flag |= RX_FLAG_DECRYPTED;
/* ignore wrong RA errors */
error &= ~AR9170_RX_ERROR_WRONG_RA;
if (error & AR9170_RX_ERROR_DECRYPT) {
error &= ~AR9170_RX_ERROR_DECRYPT;
/*
* Rx decryption is done in place,
* the original data is lost anyway.
*/
return -EINVAL;
}
/* drop any other error frames */
if (unlikely(error)) {
/* TODO: update netdevice's RX dropped/errors statistics */
if (ar9170_nag_limiter(ar))
printk(KERN_DEBUG "%s: received frame with "
"suspicious error code (%#x).\n",
wiphy_name(ar->hw->wiphy), error);
return -EINVAL;
}
status->band = ar->channel->band;
status->freq = ar->channel->center_freq;
switch (mac->status & AR9170_RX_STATUS_MODULATION_MASK) {
case AR9170_RX_STATUS_MODULATION_CCK:
if (mac->status & AR9170_RX_STATUS_SHORT_PREAMBLE)
status->flag |= RX_FLAG_SHORTPRE;
switch (head->plcp[0]) {
case 0x0a:
status->rate_idx = 0;
break;
case 0x14:
status->rate_idx = 1;
break;
case 0x37:
status->rate_idx = 2;
break;
case 0x6e:
status->rate_idx = 3;
break;
default:
if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: invalid plcp cck rate "
"(%x).\n", wiphy_name(ar->hw->wiphy),
head->plcp[0]);
return -EINVAL;
}
break;
case AR9170_RX_STATUS_MODULATION_DUPOFDM:
case AR9170_RX_STATUS_MODULATION_OFDM:
switch (head->plcp[0] & 0xf) {
case 0xb:
status->rate_idx = 0;
break;
case 0xf:
status->rate_idx = 1;
break;
case 0xa:
status->rate_idx = 2;
break;
case 0xe:
status->rate_idx = 3;
break;
case 0x9:
status->rate_idx = 4;
break;
case 0xd:
status->rate_idx = 5;
break;
case 0x8:
status->rate_idx = 6;
break;
case 0xc:
status->rate_idx = 7;
break;
default:
if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: invalid plcp ofdm rate "
"(%x).\n", wiphy_name(ar->hw->wiphy),
head->plcp[0]);
return -EINVAL;
}
if (status->band == IEEE80211_BAND_2GHZ)
status->rate_idx += 4;
break;
case AR9170_RX_STATUS_MODULATION_HT:
if (head->plcp[3] & 0x80)
status->flag |= RX_FLAG_40MHZ;
if (head->plcp[6] & 0x80)
status->flag |= RX_FLAG_SHORT_GI;
status->rate_idx = clamp(0, 75, head->plcp[6] & 0x7f);
status->flag |= RX_FLAG_HT;
break;
default:
if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: invalid modulation\n",
wiphy_name(ar->hw->wiphy));
return -EINVAL;
}
return 0;
}
static void ar9170_rx_phy_status(struct ar9170 *ar,
struct ar9170_rx_phystatus *phy,
struct ieee80211_rx_status *status)
{
int i;
BUILD_BUG_ON(sizeof(struct ar9170_rx_phystatus) != 20);
for (i = 0; i < 3; i++)
if (phy->rssi[i] != 0x80)
status->antenna |= BIT(i);
/* post-process RSSI */
for (i = 0; i < 7; i++)
if (phy->rssi[i] & 0x80)
phy->rssi[i] = ((phy->rssi[i] & 0x7f) + 1) & 0x7f;
/* TODO: we could do something with phy_errors */
status->signal = ar->noise[0] + phy->rssi_combined;
}
static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
{
struct sk_buff *skb;
int reserved = 0;
struct ieee80211_hdr *hdr = (void *) buf;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
reserved += NET_IP_ALIGN;
if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
reserved += NET_IP_ALIGN;
}
if (ieee80211_has_a4(hdr->frame_control))
reserved += NET_IP_ALIGN;
reserved = 32 + (reserved & NET_IP_ALIGN);
skb = dev_alloc_skb(len + reserved);
if (likely(skb)) {
skb_reserve(skb, reserved);
memcpy(skb_put(skb, len), buf, len);
}
return skb;
}
/*
* If the frame alignment is right (or the kernel has
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS), and there
* is only a single MPDU in the USB frame, then we could
* submit to mac80211 the SKB directly. However, since
* there may be multiple packets in one SKB in stream
* mode, and we need to observe the proper ordering,
* this is non-trivial.
*/
static void ar9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
{
struct ar9170_rx_head *head;
struct ar9170_rx_macstatus *mac;
struct ar9170_rx_phystatus *phy = NULL;
struct ieee80211_rx_status status;
struct sk_buff *skb;
int mpdu_len;
if (unlikely(!IS_STARTED(ar) || len < (sizeof(*mac))))
return ;
/* Received MPDU */
mpdu_len = len - sizeof(*mac);
mac = (void *)(buf + mpdu_len);
if (unlikely(mac->error & AR9170_RX_ERROR_FATAL)) {
/* this frame is too damaged and can't be used - drop it */
return ;
}
switch (mac->status & AR9170_RX_STATUS_MPDU_MASK) {
case AR9170_RX_STATUS_MPDU_FIRST:
/* first mpdu packet has the plcp header */
if (likely(mpdu_len >= sizeof(struct ar9170_rx_head))) {
head = (void *) buf;
memcpy(&ar->rx_mpdu.plcp, (void *) buf,
sizeof(struct ar9170_rx_head));
mpdu_len -= sizeof(struct ar9170_rx_head);
buf += sizeof(struct ar9170_rx_head);
ar->rx_mpdu.has_plcp = true;
} else {
if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: plcp info is clipped.\n",
wiphy_name(ar->hw->wiphy));
return ;
}
break;
case AR9170_RX_STATUS_MPDU_LAST:
/* last mpdu has a extra tail with phy status information */
if (likely(mpdu_len >= sizeof(struct ar9170_rx_phystatus))) {
mpdu_len -= sizeof(struct ar9170_rx_phystatus);
phy = (void *)(buf + mpdu_len);
} else {
if (ar9170_nag_limiter(ar))
printk(KERN_ERR "%s: frame tail is clipped.\n",
wiphy_name(ar->hw->wiphy));
return ;
}
case AR9170_RX_STATUS_MPDU_MIDDLE:
/* middle mpdus are just data */
if (unlikely(!ar->rx_mpdu.has_plcp)) {
if (!ar9170_nag_limiter(ar))
return ;
printk(KERN_ERR "%s: rx stream did not start "
"with a first_mpdu frame tag.\n",
wiphy_name(ar->hw->wiphy));
return ;
}
head = &ar->rx_mpdu.plcp;
break;
case AR9170_RX_STATUS_MPDU_SINGLE:
/* single mpdu - has plcp (head) and phy status (tail) */
head = (void *) buf;
mpdu_len -= sizeof(struct ar9170_rx_head);
mpdu_len -= sizeof(struct ar9170_rx_phystatus);
buf += sizeof(struct ar9170_rx_head);
phy = (void *)(buf + mpdu_len);
break;
default:
BUG_ON(1);
break;
}
if (unlikely(mpdu_len < FCS_LEN))
return ;
memset(&status, 0, sizeof(status));
if (unlikely(ar9170_rx_mac_status(ar, head, mac, &status)))
return ;
if (phy)
ar9170_rx_phy_status(ar, phy, &status);
skb = ar9170_rx_copy_data(buf, mpdu_len);
if (likely(skb)) {
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_irqsafe(ar->hw, skb);
}
}
void ar9170_rx(struct ar9170 *ar, struct sk_buff *skb)
{
unsigned int i, tlen, resplen, wlen = 0, clen = 0;
u8 *tbuf, *respbuf;
tbuf = skb->data;
tlen = skb->len;
while (tlen >= 4) {
clen = tbuf[1] << 8 | tbuf[0];
wlen = ALIGN(clen, 4);
/* check if this is stream has a valid tag.*/
if (tbuf[2] != 0 || tbuf[3] != 0x4e) {
/*
* TODO: handle the highly unlikely event that the
* corrupted stream has the TAG at the right position.
*/
/* check if the frame can be repaired. */
if (!ar->rx_failover_missing) {
/* this is no "short read". */
if (ar9170_nag_limiter(ar)) {
printk(KERN_ERR "%s: missing tag!\n",
wiphy_name(ar->hw->wiphy));
goto err_telluser;
} else
goto err_silent;
}
if (ar->rx_failover_missing > tlen) {
if (ar9170_nag_limiter(ar)) {
printk(KERN_ERR "%s: possible multi "
"stream corruption!\n",
wiphy_name(ar->hw->wiphy));
goto err_telluser;
} else
goto err_silent;
}
memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
ar->rx_failover_missing -= tlen;
if (ar->rx_failover_missing <= 0) {
/*
* nested ar9170_rx call!
* termination is guranteed, even when the
* combined frame also have a element with
* a bad tag.
*/
ar->rx_failover_missing = 0;
ar9170_rx(ar, ar->rx_failover);
skb_reset_tail_pointer(ar->rx_failover);
skb_trim(ar->rx_failover, 0);
}
return ;
}
/* check if stream is clipped */
if (wlen > tlen - 4) {
if (ar->rx_failover_missing) {
/* TODO: handle double stream corruption. */
if (ar9170_nag_limiter(ar)) {
printk(KERN_ERR "%s: double rx stream "
"corruption!\n",
wiphy_name(ar->hw->wiphy));
goto err_telluser;
} else
goto err_silent;
}
/*
* save incomplete data set.
* the firmware will resend the missing bits when
* the rx - descriptor comes round again.
*/
memcpy(skb_put(ar->rx_failover, tlen), tbuf, tlen);
ar->rx_failover_missing = clen - tlen;
return ;
}
resplen = clen;
respbuf = tbuf + 4;
tbuf += wlen + 4;
tlen -= wlen + 4;
i = 0;
/* weird thing, but this is the same in the original driver */
while (resplen > 2 && i < 12 &&
respbuf[0] == 0xff && respbuf[1] == 0xff) {
i += 2;
resplen -= 2;
respbuf += 2;
}
if (resplen < 4)
continue;
/* found the 6 * 0xffff marker? */
if (i == 12)
ar9170_handle_command_response(ar, respbuf, resplen);
else
ar9170_handle_mpdu(ar, respbuf, clen);
}
if (tlen) {
if (net_ratelimit())
printk(KERN_ERR "%s: %d bytes of unprocessed "
"data left in rx stream!\n",
wiphy_name(ar->hw->wiphy), tlen);
goto err_telluser;
}
return ;
err_telluser:
printk(KERN_ERR "%s: damaged RX stream data [want:%d, "
"data:%d, rx:%d, pending:%d ]\n",
wiphy_name(ar->hw->wiphy), clen, wlen, tlen,
ar->rx_failover_missing);
if (ar->rx_failover_missing)
print_hex_dump_bytes("rxbuf:", DUMP_PREFIX_OFFSET,
ar->rx_failover->data,
ar->rx_failover->len);
print_hex_dump_bytes("stream:", DUMP_PREFIX_OFFSET,
skb->data, skb->len);
printk(KERN_ERR "%s: please check your hardware and cables, if "
"you see this message frequently.\n",
wiphy_name(ar->hw->wiphy));
err_silent:
if (ar->rx_failover_missing) {
skb_reset_tail_pointer(ar->rx_failover);
skb_trim(ar->rx_failover, 0);
ar->rx_failover_missing = 0;
}
}
#define AR9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
do { \
queue.aifs = ai_fs; \
queue.cw_min = cwmin; \
queue.cw_max = cwmax; \
queue.txop = _txop; \
} while (0)
static int ar9170_op_start(struct ieee80211_hw *hw)
{
struct ar9170 *ar = hw->priv;
int err, i;
mutex_lock(&ar->mutex);
/* reinitialize queues statistics */
memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
for (i = 0; i < __AR9170_NUM_TXQ; i++)
ar->tx_stats[i].limit = AR9170_TXQ_DEPTH;
/* reset QoS defaults */
AR9170_FILL_QUEUE(ar->edcf[0], 3, 15, 1023, 0); /* BEST EFFORT*/
AR9170_FILL_QUEUE(ar->edcf[1], 7, 15, 1023, 0); /* BACKGROUND */
AR9170_FILL_QUEUE(ar->edcf[2], 2, 7, 15, 94); /* VIDEO */
AR9170_FILL_QUEUE(ar->edcf[3], 2, 3, 7, 47); /* VOICE */
AR9170_FILL_QUEUE(ar->edcf[4], 2, 3, 7, 0); /* SPECIAL */
/* set sane AMPDU defaults */
ar->global_ampdu_density = 6;
ar->global_ampdu_factor = 3;
ar->bad_hw_nagger = jiffies;
err = ar->open(ar);
if (err)
goto out;
err = ar9170_init_mac(ar);
if (err)
goto out;
err = ar9170_set_qos(ar);
if (err)
goto out;
err = ar9170_init_phy(ar, IEEE80211_BAND_2GHZ);
if (err)
goto out;
err = ar9170_init_rf(ar);
if (err)
goto out;
/* start DMA */
err = ar9170_write_reg(ar, 0x1c3d30, 0x100);
if (err)
goto out;
ar->state = AR9170_STARTED;
out:
mutex_unlock(&ar->mutex);
return err;
}
static void ar9170_op_stop(struct ieee80211_hw *hw)
{
struct ar9170 *ar = hw->priv;
unsigned int i;
if (IS_STARTED(ar))
ar->state = AR9170_IDLE;
cancel_delayed_work_sync(&ar->tx_janitor);
#ifdef CONFIG_AR9170_LEDS
cancel_delayed_work_sync(&ar->led_work);
#endif
cancel_work_sync(&ar->beacon_work);
mutex_lock(&ar->mutex);
if (IS_ACCEPTING_CMD(ar)) {
ar9170_set_leds_state(ar, 0);
/* stop DMA */
ar9170_write_reg(ar, 0x1c3d30, 0);
ar->stop(ar);
}
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
skb_queue_purge(&ar->tx_pending[i]);
skb_queue_purge(&ar->tx_status[i]);
}
mutex_unlock(&ar->mutex);
}
static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
struct ar9170_tx_control *txc;
struct ieee80211_tx_info *info;
struct ieee80211_tx_rate *txrate;
struct ar9170_tx_info *arinfo;
unsigned int queue = skb_get_queue_mapping(skb);
u16 keytype = 0;
u16 len, icv = 0;
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
len = skb->len;
txc = (void *)skb_push(skb, sizeof(*txc));
if (info->control.hw_key) {
icv = info->control.hw_key->icv_len;
switch (info->control.hw_key->alg) {
case ALG_WEP:
keytype = AR9170_TX_MAC_ENCR_RC4;
break;
case ALG_TKIP:
keytype = AR9170_TX_MAC_ENCR_RC4;
break;
case ALG_CCMP:
keytype = AR9170_TX_MAC_ENCR_AES;
break;
default:
WARN_ON(1);
goto err_out;
}
}
/* Length */
txc->length = cpu_to_le16(len + icv + 4);
txc->mac_control = cpu_to_le16(AR9170_TX_MAC_HW_DURATION |
AR9170_TX_MAC_BACKOFF);
txc->mac_control |= cpu_to_le16(ar9170_qos_hwmap[queue] <<
AR9170_TX_MAC_QOS_SHIFT);
txc->mac_control |= cpu_to_le16(keytype);
txc->phy_control = cpu_to_le32(0);
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_NO_ACK);
txrate = &info->control.rates[0];
if (txrate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
else if (txrate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
arinfo = (void *)info->rate_driver_data;
arinfo->timeout = jiffies + msecs_to_jiffies(AR9170_QUEUE_TIMEOUT);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
(is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
/*
* WARNING:
* Putting the QoS queue bits into an unexplored territory is
* certainly not elegant.
*
* In my defense: This idea provides a reasonable way to
* smuggle valuable information to the tx_status callback.
* Also, the idea behind this bit-abuse came straight from
* the original driver code.
*/
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
}
return 0;
err_out:
skb_pull(skb, sizeof(*txc));
return -EINVAL;
}
static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
{
struct ar9170_tx_control *txc;
struct ieee80211_tx_info *info;
struct ieee80211_rate *rate = NULL;
struct ieee80211_tx_rate *txrate;
u32 power, chains;
txc = (void *) skb->data;
info = IEEE80211_SKB_CB(skb);
txrate = &info->control.rates[0];
if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD)
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE);
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ);
/* this works because 40 MHz is 2 and dup is 3 */
if (txrate->flags & IEEE80211_TX_RC_DUP_DATA)
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP);
if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI);
if (txrate->flags & IEEE80211_TX_RC_MCS) {
u32 r = txrate->idx;
u8 *txpower;
/* heavy clip control */
txc->phy_control |= cpu_to_le32((r & 0x7) << 7);
r <<= AR9170_TX_PHY_MCS_SHIFT;
BUG_ON(r & ~AR9170_TX_PHY_MCS_MASK);
txc->phy_control |= cpu_to_le32(r & AR9170_TX_PHY_MCS_MASK);
txc->phy_control |= cpu_to_le32(AR9170_TX_PHY_MOD_HT);
if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) {
if (info->band == IEEE80211_BAND_5GHZ)
txpower = ar->power_5G_ht40;
else
txpower = ar->power_2G_ht40;
} else {
if (info->band == IEEE80211_BAND_5GHZ)
txpower = ar->power_5G_ht20;
else
txpower = ar->power_2G_ht20;
}
power = txpower[(txrate->idx) & 7];
} else {
u8 *txpower;
u32 mod;
u32 phyrate;
u8 idx = txrate->idx;
if (info->band != IEEE80211_BAND_2GHZ) {
idx += 4;
txpower = ar->power_5G_leg;
mod = AR9170_TX_PHY_MOD_OFDM;
} else {
if (idx < 4) {
txpower = ar->power_2G_cck;
mod = AR9170_TX_PHY_MOD_CCK;
} else {
mod = AR9170_TX_PHY_MOD_OFDM;
txpower = ar->power_2G_ofdm;
}
}
rate = &__ar9170_ratetable[idx];
phyrate = rate->hw_value & 0xF;
power = txpower[(rate->hw_value & 0x30) >> 4];
phyrate <<= AR9170_TX_PHY_MCS_SHIFT;
txc->phy_control |= cpu_to_le32(mod);
txc->phy_control |= cpu_to_le32(phyrate);
}
power <<= AR9170_TX_PHY_TX_PWR_SHIFT;
power &= AR9170_TX_PHY_TX_PWR_MASK;
txc->phy_control |= cpu_to_le32(power);
/* set TX chains */
if (ar->eeprom.tx_mask == 1) {
chains = AR9170_TX_PHY_TXCHAIN_1;
} else {
chains = AR9170_TX_PHY_TXCHAIN_2;
/* >= 36M legacy OFDM - use only one chain */
if (rate && rate->bitrate >= 360)
chains = AR9170_TX_PHY_TXCHAIN_1;
}
txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
}
static void ar9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
unsigned long flags;
struct ieee80211_tx_info *info;
struct ar9170_tx_info *arinfo;
unsigned int i, frames, frames_failed, remaining_space;
int err;
bool schedule_garbagecollector = false;
BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data));
if (unlikely(!IS_STARTED(ar)))
return ;
remaining_space = AR9170_TX_MAX_PENDING;
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
spin_lock_irqsave(&ar->tx_stats_lock, flags);
frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
skb_queue_len(&ar->tx_pending[i]));
if (remaining_space < frames) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
"remaining slots:%d, needed:%d\n",
wiphy_name(ar->hw->wiphy), i, remaining_space,
frames);
#endif /* AR9170_QUEUE_DEBUG */
frames = remaining_space;
}
ar->tx_stats[i].len += frames;
ar->tx_stats[i].count += frames;
if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: queue %d full\n",
wiphy_name(ar->hw->wiphy), i);
printk(KERN_DEBUG "%s: stuck frames: ===>\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
ar9170_dump_txqueue(ar, &ar->tx_status[i]);
#endif /* AR9170_QUEUE_DEBUG */
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: stop queue %d\n",
wiphy_name(ar->hw->wiphy), i);
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_stop_queue(ar->hw, i);
}
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
if (!frames)
continue;
frames_failed = 0;
while (frames) {
skb = skb_dequeue(&ar->tx_pending[i]);
if (unlikely(!skb)) {
frames_failed += frames;
frames = 0;
break;
}
info = IEEE80211_SKB_CB(skb);
arinfo = (void *) info->rate_driver_data;
/* TODO: cancel stuck frames */
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: send frame q:%d =>\n",
wiphy_name(ar->hw->wiphy), i);
ar9170_print_txheader(ar, skb);
#endif /* AR9170_QUEUE_DEBUG */
err = ar->tx(ar, skb);
if (unlikely(err)) {
frames_failed++;
dev_kfree_skb_any(skb);
} else {
remaining_space--;
schedule_garbagecollector = true;
}
frames--;
}
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: ar9170_tx report for queue %d\n",
wiphy_name(ar->hw->wiphy), i);
printk(KERN_DEBUG "%s: unprocessed pending frames left:\n",
wiphy_name(ar->hw->wiphy));
ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
#endif /* AR9170_QUEUE_DEBUG */
if (unlikely(frames_failed)) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: frames failed %d =>\n",
wiphy_name(ar->hw->wiphy), frames_failed);
#endif /* AR9170_QUEUE_DEBUG */
spin_lock_irqsave(&ar->tx_stats_lock, flags);
ar->tx_stats[i].len -= frames_failed;
ar->tx_stats[i].count -= frames_failed;
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), i);
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_wake_queue(ar->hw, i);
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
}
}
if (!schedule_garbagecollector)
return;
ieee80211_queue_delayed_work(ar->hw,
&ar->tx_janitor,
msecs_to_jiffies(AR9170_JANITOR_DELAY));
}
int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ar9170 *ar = hw->priv;
struct ieee80211_tx_info *info;
unsigned int queue;
if (unlikely(!IS_STARTED(ar)))
goto err_free;
if (unlikely(ar9170_tx_prepare(ar, skb)))
goto err_free;
queue = skb_get_queue_mapping(skb);
info = IEEE80211_SKB_CB(skb);
ar9170_tx_prepare_phy(ar, skb);
skb_queue_tail(&ar->tx_pending[queue], skb);
ar9170_tx(ar);
return NETDEV_TX_OK;
err_free:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
int err = 0;
mutex_lock(&ar->mutex);
if (ar->vif) {
err = -EBUSY;
goto unlock;
}
ar->vif = vif;
memcpy(common->macaddr, vif->addr, ETH_ALEN);
if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
ar->rx_software_decryption = true;
ar->disable_offload = true;
}
ar->cur_filter = 0;
err = ar9170_update_frame_filter(ar, AR9170_MAC_REG_FTF_DEFAULTS);
if (err)
goto unlock;
err = ar9170_set_operating_mode(ar);
unlock:
mutex_unlock(&ar->mutex);
return err;
}
static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
mutex_lock(&ar->mutex);
ar->vif = NULL;
ar9170_update_frame_filter(ar, 0);
ar9170_set_beacon_timers(ar);
dev_kfree_skb(ar->beacon);
ar->beacon = NULL;
ar->sniffer_enabled = false;
ar->rx_software_decryption = false;
ar9170_set_operating_mode(ar);
mutex_unlock(&ar->mutex);
}
static int ar9170_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct ar9170 *ar = hw->priv;
int err = 0;
mutex_lock(&ar->mutex);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
/* TODO */
err = 0;
}
if (changed & IEEE80211_CONF_CHANGE_PS) {
/* TODO */
err = 0;
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
/* TODO */
err = 0;
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
/*
* is it long_frame_max_tx_count or short_frame_max_tx_count?
*/
err = ar9170_set_hwretry_limit(ar,
ar->hw->conf.long_frame_max_tx_count);
if (err)
goto out;
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
/* adjust slot time for 5 GHz */
err = ar9170_set_slot_time(ar);
if (err)
goto out;
err = ar9170_set_dyn_sifs_ack(ar);
if (err)
goto out;
err = ar9170_set_channel(ar, hw->conf.channel,
AR9170_RFI_NONE,
nl80211_to_ar9170(hw->conf.channel_type));
if (err)
goto out;
}
out:
mutex_unlock(&ar->mutex);
return err;
}
static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
u64 mchash;
struct netdev_hw_addr *ha;
/* always get broadcast frames */
mchash = 1ULL << (0xff >> 2);
netdev_hw_addr_list_for_each(ha, mc_list)
mchash |= 1ULL << (ha->addr[5] >> 2);
return mchash;
}
static void ar9170_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *new_flags,
u64 multicast)
{
struct ar9170 *ar = hw->priv;
if (unlikely(!IS_ACCEPTING_CMD(ar)))
return ;
mutex_lock(&ar->mutex);
/* mask supported flags */
*new_flags &= FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC |
FIF_PROMISC_IN_BSS | FIF_FCSFAIL | FIF_PLCPFAIL;
ar->filter_state = *new_flags;
/*
* We can support more by setting the sniffer bit and
* then checking the error flags, later.
*/
if (changed_flags & FIF_ALLMULTI && *new_flags & FIF_ALLMULTI)
multicast = ~0ULL;
if (multicast != ar->cur_mc_hash)
ar9170_update_multicast(ar, multicast);
if (changed_flags & FIF_CONTROL) {
u32 filter = AR9170_MAC_REG_FTF_PSPOLL |
AR9170_MAC_REG_FTF_RTS |
AR9170_MAC_REG_FTF_CTS |
AR9170_MAC_REG_FTF_ACK |
AR9170_MAC_REG_FTF_CFE |
AR9170_MAC_REG_FTF_CFE_ACK;
if (*new_flags & FIF_CONTROL)
filter |= ar->cur_filter;
else
filter &= (~ar->cur_filter);
ar9170_update_frame_filter(ar, filter);
}
if (changed_flags & FIF_PROMISC_IN_BSS) {
ar->sniffer_enabled = ((*new_flags) & FIF_PROMISC_IN_BSS) != 0;
ar9170_set_operating_mode(ar);
}
mutex_unlock(&ar->mutex);
}
static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
int err = 0;
mutex_lock(&ar->mutex);
if (changed & BSS_CHANGED_BSSID) {
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
err = ar9170_set_operating_mode(ar);
if (err)
goto out;
}
if (changed & BSS_CHANGED_BEACON_ENABLED)
ar->enable_beacon = bss_conf->enable_beacon;
if (changed & BSS_CHANGED_BEACON) {
err = ar9170_update_beacon(ar);
if (err)
goto out;
}
if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_INT)) {
err = ar9170_set_beacon_timers(ar);
if (err)
goto out;
}
if (changed & BSS_CHANGED_ASSOC) {
#ifndef CONFIG_AR9170_LEDS
/* enable assoc LED. */
err = ar9170_set_leds_state(ar, bss_conf->assoc ? 2 : 0);
#endif /* CONFIG_AR9170_LEDS */
}
if (changed & BSS_CHANGED_HT) {
/* TODO */
err = 0;
}
if (changed & BSS_CHANGED_ERP_SLOT) {
err = ar9170_set_slot_time(ar);
if (err)
goto out;
}
if (changed & BSS_CHANGED_BASIC_RATES) {
err = ar9170_set_basic_rates(ar);
if (err)
goto out;
}
out:
mutex_unlock(&ar->mutex);
}
static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
{
struct ar9170 *ar = hw->priv;
int err;
u64 tsf;
#define NR 3
static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
AR9170_MAC_REG_TSF_L,
AR9170_MAC_REG_TSF_H };
u32 val[NR];
int loops = 0;
mutex_lock(&ar->mutex);
while (loops++ < 10) {
err = ar9170_read_mreg(ar, NR, addr, val);
if (err || val[0] == val[2])
break;
}
mutex_unlock(&ar->mutex);
if (WARN_ON(err))
return 0;
tsf = val[0];
tsf = (tsf << 32) | val[1];
return tsf;
#undef NR
}
static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct ar9170 *ar = hw->priv;
int err = 0, i;
u8 ktype;
if ((!ar->vif) || (ar->disable_offload))
return -EOPNOTSUPP;
switch (key->alg) {
case ALG_WEP:
if (key->keylen == WLAN_KEY_LEN_WEP40)
ktype = AR9170_ENC_ALG_WEP64;
else
ktype = AR9170_ENC_ALG_WEP128;
break;
case ALG_TKIP:
ktype = AR9170_ENC_ALG_TKIP;
break;
case ALG_CCMP:
ktype = AR9170_ENC_ALG_AESCCMP;
break;
default:
return -EOPNOTSUPP;
}
mutex_lock(&ar->mutex);
if (cmd == SET_KEY) {
if (unlikely(!IS_STARTED(ar))) {
err = -EOPNOTSUPP;
goto out;
}
/* group keys need all-zeroes address */
if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
sta = NULL;
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
for (i = 0; i < 64; i++)
if (!(ar->usedkeys & BIT(i)))
break;
if (i == 64) {
ar->rx_software_decryption = true;
ar9170_set_operating_mode(ar);
err = -ENOSPC;
goto out;
}
} else {
i = 64 + key->keyidx;
}
key->hw_key_idx = i;
err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL, ktype, 0,
key->key, min_t(u8, 16, key->keylen));
if (err)
goto out;
if (key->alg == ALG_TKIP) {
err = ar9170_upload_key(ar, i, sta ? sta->addr : NULL,
ktype, 1, key->key + 16, 16);
if (err)
goto out;
/*
* hardware is not capable generating the MMIC
* for fragmented frames!
*/
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
}
if (i < 64)
ar->usedkeys |= BIT(i);
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
} else {
if (unlikely(!IS_STARTED(ar))) {
/* The device is gone... together with the key ;-) */
err = 0;
goto out;
}
err = ar9170_disable_key(ar, key->hw_key_idx);
if (err)
goto out;
if (key->hw_key_idx < 64) {
ar->usedkeys &= ~BIT(key->hw_key_idx);
} else {
err = ar9170_upload_key(ar, key->hw_key_idx, NULL,
AR9170_ENC_ALG_NONE, 0,
NULL, 0);
if (err)
goto out;
if (key->alg == ALG_TKIP) {
err = ar9170_upload_key(ar, key->hw_key_idx,
NULL,
AR9170_ENC_ALG_NONE, 1,
NULL, 0);
if (err)
goto out;
}
}
}
ar9170_regwrite_begin(ar);
ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_L, ar->usedkeys);
ar9170_regwrite(AR9170_MAC_REG_ROLL_CALL_TBL_H, ar->usedkeys >> 32);
ar9170_regwrite_finish();
err = ar9170_regwrite_result();
out:
mutex_unlock(&ar->mutex);
return err;
}
static int ar9170_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ar9170 *ar = hw->priv;
u32 val;
int err;
mutex_lock(&ar->mutex);
err = ar9170_read_reg(ar, AR9170_MAC_REG_TX_RETRY, &val);
ar->stats.dot11ACKFailureCount += val;
memcpy(stats, &ar->stats, sizeof(*stats));
mutex_unlock(&ar->mutex);
return 0;
}
static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
struct ar9170 *ar = hw->priv;
int ret;
mutex_lock(&ar->mutex);
if (queue < __AR9170_NUM_TXQ) {
memcpy(&ar->edcf[ar9170_qos_hwmap[queue]],
param, sizeof(*param));
ret = ar9170_set_qos(ar);
} else {
ret = -EINVAL;
}
mutex_unlock(&ar->mutex);
return ret;
}
static int ar9170_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
switch (action) {
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
/* Handled by firmware */
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static const struct ieee80211_ops ar9170_ops = {
.start = ar9170_op_start,
.stop = ar9170_op_stop,
.tx = ar9170_op_tx,
.add_interface = ar9170_op_add_interface,
.remove_interface = ar9170_op_remove_interface,
.config = ar9170_op_config,
.prepare_multicast = ar9170_op_prepare_multicast,
.configure_filter = ar9170_op_configure_filter,
.conf_tx = ar9170_conf_tx,
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
.get_stats = ar9170_get_stats,
.ampdu_action = ar9170_ampdu_action,
};
void *ar9170_alloc(size_t priv_size)
{
struct ieee80211_hw *hw;
struct ar9170 *ar;
struct sk_buff *skb;
int i;
/*
* this buffer is used for rx stream reconstruction.
* Under heavy load this device (or the transport layer?)
* tends to split the streams into separate rx descriptors.
*/
skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
if (!skb)
goto err_nomem;
hw = ieee80211_alloc_hw(priv_size, &ar9170_ops);
if (!hw)
goto err_nomem;
ar = hw->priv;
ar->hw = hw;
ar->rx_failover = skb;
mutex_init(&ar->mutex);
spin_lock_init(&ar->cmdlock);
spin_lock_init(&ar->tx_stats_lock);
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
}
ar9170_rx_reset_rx_mpdu(ar);
INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
/* all hw supports 2.4 GHz, so set channel to 1 by default */
ar->channel = &ar9170_2ghz_chantable[0];
/* first part of wiphy init */
ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_ADHOC);
ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM;
ar->hw->queues = __AR9170_NUM_TXQ;
ar->hw->extra_tx_headroom = 8;
ar->hw->max_rates = 1;
ar->hw->max_rate_tries = 3;
for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
return ar;
err_nomem:
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
static int ar9170_read_eeprom(struct ar9170 *ar)
{
#define RW 8 /* number of words to read at once */
#define RB (sizeof(u32) * RW)
struct ath_regulatory *regulatory = &ar->common.regulatory;
u8 *eeprom = (void *)&ar->eeprom;
u8 *addr = ar->eeprom.mac_address;
__le32 offsets[RW];
unsigned int rx_streams, tx_streams, tx_params = 0;
int i, j, err, bands = 0;
BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
BUILD_BUG_ON(RB > AR9170_MAX_CMD_LEN - 4);
#ifndef __CHECKER__
/* don't want to handle trailing remains */
BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
#endif
for (i = 0; i < sizeof(ar->eeprom)/RB; i++) {
for (j = 0; j < RW; j++)
offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
RB * i + 4 * j);
err = ar->exec_cmd(ar, AR9170_CMD_RREG,
RB, (u8 *) &offsets,
RB, eeprom + RB * i);
if (err)
return err;
}
#undef RW
#undef RB
if (ar->eeprom.length == cpu_to_le16(0xFFFF))
return -ENODATA;
if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar9170_band_2GHz;
bands++;
}
if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &ar9170_band_5GHz;
bands++;
}
rx_streams = hweight8(ar->eeprom.rx_mask);
tx_streams = hweight8(ar->eeprom.tx_mask);
if (rx_streams != tx_streams)
tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
if (tx_streams >= 1 && tx_streams <= IEEE80211_HT_MCS_TX_MAX_STREAMS)
tx_params = (tx_streams - 1) <<
IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
ar9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
ar9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
/*
* I measured this, a bandswitch takes roughly
* 135 ms and a frequency switch about 80.
*
* FIXME: measure these values again once EEPROM settings
* are used, that will influence them!
*/
if (bands == 2)
ar->hw->channel_change_time = 135 * 1000;
else
ar->hw->channel_change_time = 80 * 1000;
regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
regulatory->current_rd_ext = le16_to_cpu(ar->eeprom.reg_domain[1]);
/* second part of wiphy init */
SET_IEEE80211_PERM_ADDR(ar->hw, addr);
return bands ? 0 : -EINVAL;
}
static int ar9170_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ar9170 *ar = hw->priv;
return ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
}
int ar9170_register(struct ar9170 *ar, struct device *pdev)
{
struct ath_regulatory *regulatory = &ar->common.regulatory;
int err;
/* try to read EEPROM, init MAC addr */
err = ar9170_read_eeprom(ar);
if (err)
goto err_out;
err = ath_regd_init(regulatory, ar->hw->wiphy,
ar9170_reg_notifier);
if (err)
goto err_out;
err = ieee80211_register_hw(ar->hw);
if (err)
goto err_out;
if (!ath_is_world_regd(regulatory))
regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
err = ar9170_init_leds(ar);
if (err)
goto err_unreg;
#ifdef CONFIG_AR9170_LEDS
err = ar9170_register_leds(ar);
if (err)
goto err_unreg;
#endif /* CONFIG_AR9170_LEDS */
dev_info(pdev, "Atheros AR9170 is registered as '%s'\n",
wiphy_name(ar->hw->wiphy));
ar->registered = true;
return 0;
err_unreg:
ieee80211_unregister_hw(ar->hw);
err_out:
return err;
}
void ar9170_unregister(struct ar9170 *ar)
{
if (ar->registered) {
#ifdef CONFIG_AR9170_LEDS
ar9170_unregister_leds(ar);
#endif /* CONFIG_AR9170_LEDS */
ieee80211_unregister_hw(ar->hw);
}
kfree_skb(ar->rx_failover);
mutex_destroy(&ar->mutex);
}
|
gpl-2.0
|
froggy666uk/kernel-msm
|
fs/ecryptfs/inode.c
|
763
|
34644
|
/**
* eCryptfs: Linux filesystem encryption layer
*
* Copyright (C) 1997-2004 Erez Zadok
* Copyright (C) 2001-2004 Stony Brook University
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
* Michael C. Thompsion <mcthomps@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*/
#include <linux/file.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/crypto.h>
#include <linux/fs_stack.h>
#include <linux/slab.h>
#include <linux/xattr.h>
#include <asm/unaligned.h>
#include "ecryptfs_kernel.h"
static struct dentry *lock_parent(struct dentry *dentry)
{
struct dentry *dir;
dir = dget_parent(dentry);
mutex_lock_nested(&(dir->d_inode->i_mutex), I_MUTEX_PARENT);
return dir;
}
static void unlock_dir(struct dentry *dir)
{
mutex_unlock(&dir->d_inode->i_mutex);
dput(dir);
}
static int ecryptfs_inode_test(struct inode *inode, void *lower_inode)
{
if (ecryptfs_inode_to_lower(inode) == (struct inode *)lower_inode)
return 1;
return 0;
}
static int ecryptfs_inode_set(struct inode *inode, void *opaque)
{
struct inode *lower_inode = opaque;
ecryptfs_set_inode_lower(inode, lower_inode);
fsstack_copy_attr_all(inode, lower_inode);
/* i_size will be overwritten for encrypted regular files */
fsstack_copy_inode_size(inode, lower_inode);
inode->i_ino = lower_inode->i_ino;
inode->i_version++;
inode->i_mapping->a_ops = &ecryptfs_aops;
inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi;
if (S_ISLNK(inode->i_mode))
inode->i_op = &ecryptfs_symlink_iops;
else if (S_ISDIR(inode->i_mode))
inode->i_op = &ecryptfs_dir_iops;
else
inode->i_op = &ecryptfs_main_iops;
if (S_ISDIR(inode->i_mode))
inode->i_fop = &ecryptfs_dir_fops;
else if (special_file(inode->i_mode))
init_special_inode(inode, inode->i_mode, inode->i_rdev);
else
inode->i_fop = &ecryptfs_main_fops;
return 0;
}
static struct inode *__ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb)
{
struct inode *inode;
if (lower_inode->i_sb != ecryptfs_superblock_to_lower(sb))
return ERR_PTR(-EXDEV);
if (!igrab(lower_inode))
return ERR_PTR(-ESTALE);
inode = iget5_locked(sb, (unsigned long)lower_inode,
ecryptfs_inode_test, ecryptfs_inode_set,
lower_inode);
if (!inode) {
iput(lower_inode);
return ERR_PTR(-EACCES);
}
if (!(inode->i_state & I_NEW))
iput(lower_inode);
return inode;
}
struct inode *ecryptfs_get_inode(struct inode *lower_inode,
struct super_block *sb)
{
struct inode *inode = __ecryptfs_get_inode(lower_inode, sb);
if (!IS_ERR(inode) && (inode->i_state & I_NEW))
unlock_new_inode(inode);
return inode;
}
/**
* ecryptfs_interpose
* @lower_dentry: Existing dentry in the lower filesystem
* @dentry: ecryptfs' dentry
* @sb: ecryptfs's super_block
*
* Interposes upper and lower dentries.
*
* Returns zero on success; non-zero otherwise
*/
static int ecryptfs_interpose(struct dentry *lower_dentry,
struct dentry *dentry, struct super_block *sb)
{
struct inode *inode = ecryptfs_get_inode(lower_dentry->d_inode, sb);
if (IS_ERR(inode))
return PTR_ERR(inode);
d_instantiate(dentry, inode);
return 0;
}
static int ecryptfs_do_unlink(struct inode *dir, struct dentry *dentry,
struct inode *inode)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
struct dentry *lower_dir_dentry;
int rc;
dget(lower_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
rc = vfs_unlink(lower_dir_inode, lower_dentry);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
}
fsstack_copy_attr_times(dir, lower_dir_inode);
set_nlink(inode, ecryptfs_inode_to_lower(inode)->i_nlink);
inode->i_ctime = dir->i_ctime;
d_drop(dentry);
out_unlock:
unlock_dir(lower_dir_dentry);
dput(lower_dentry);
return rc;
}
/**
* ecryptfs_do_create
* @directory_inode: inode of the new file's dentry's parent in ecryptfs
* @ecryptfs_dentry: New file's dentry in ecryptfs
* @mode: The mode of the new file
* @nd: nameidata of ecryptfs' parent's dentry & vfsmount
*
* Creates the underlying file and the eCryptfs inode which will link to
* it. It will also update the eCryptfs directory inode to mimic the
* stat of the lower directory inode.
*
* Returns the new eCryptfs inode on success; an ERR_PTR on error condition
*/
static struct inode *
ecryptfs_do_create(struct inode *directory_inode,
struct dentry *ecryptfs_dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
struct inode *inode;
lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
if (IS_ERR(lower_dir_dentry)) {
ecryptfs_printk(KERN_ERR, "Error locking directory of "
"dentry\n");
inode = ERR_CAST(lower_dir_dentry);
goto out;
}
rc = vfs_create(lower_dir_dentry->d_inode, lower_dentry, mode, NULL);
if (rc) {
printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
"rc = [%d]\n", __func__, rc);
inode = ERR_PTR(rc);
goto out_lock;
}
inode = __ecryptfs_get_inode(lower_dentry->d_inode,
directory_inode->i_sb);
if (IS_ERR(inode)) {
vfs_unlink(lower_dir_dentry->d_inode, lower_dentry);
goto out_lock;
}
fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
out_lock:
unlock_dir(lower_dir_dentry);
out:
return inode;
}
/**
* ecryptfs_initialize_file
*
* Cause the file to be changed from a basic empty file to an ecryptfs
* file with a header and first data page.
*
* Returns zero on success
*/
int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
struct inode *ecryptfs_inode)
{
struct ecryptfs_crypt_stat *crypt_stat =
&ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
int rc = 0;
if (S_ISDIR(ecryptfs_inode->i_mode)) {
ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
goto out;
}
ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
rc = ecryptfs_new_file_context(ecryptfs_inode);
if (rc) {
ecryptfs_printk(KERN_ERR, "Error creating new file "
"context; rc = [%d]\n", rc);
goto out;
}
rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
ecryptfs_dentry->d_name.name, rc);
goto out;
}
rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
if (rc)
printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
ecryptfs_put_lower_file(ecryptfs_inode);
out:
return rc;
}
/**
* ecryptfs_create
* @dir: The inode of the directory in which to create the file.
* @dentry: The eCryptfs dentry
* @mode: The mode of the new file.
* @nd: nameidata
*
* Creates a new file.
*
* Returns zero on success; non-zero on error condition
*/
static int
ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
umode_t mode, struct nameidata *nd)
{
struct inode *ecryptfs_inode;
int rc;
ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
mode);
if (unlikely(IS_ERR(ecryptfs_inode))) {
ecryptfs_printk(KERN_WARNING, "Failed to create file in"
"lower filesystem\n");
rc = PTR_ERR(ecryptfs_inode);
goto out;
}
/* At this point, a file exists on "disk"; we need to make sure
* that this on disk file is prepared to be an ecryptfs file */
rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
if (rc) {
ecryptfs_do_unlink(directory_inode, ecryptfs_dentry,
ecryptfs_inode);
make_bad_inode(ecryptfs_inode);
unlock_new_inode(ecryptfs_inode);
iput(ecryptfs_inode);
goto out;
}
d_instantiate(ecryptfs_dentry, ecryptfs_inode);
unlock_new_inode(ecryptfs_inode);
out:
return rc;
}
static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode)
{
struct ecryptfs_crypt_stat *crypt_stat;
int rc;
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc) {
printk(KERN_ERR "%s: Error attempting to initialize "
"the lower file for the dentry with name "
"[%s]; rc = [%d]\n", __func__,
dentry->d_name.name, rc);
return rc;
}
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
/* TODO: lock for crypt_stat comparison */
if (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED))
ecryptfs_set_default_sizes(crypt_stat);
rc = ecryptfs_read_and_validate_header_region(inode);
ecryptfs_put_lower_file(inode);
if (rc) {
rc = ecryptfs_read_and_validate_xattr_region(dentry, inode);
if (!rc)
crypt_stat->flags |= ECRYPTFS_METADATA_IN_XATTR;
}
/* Must return 0 to allow non-eCryptfs files to be looked up, too */
return 0;
}
/**
* ecryptfs_lookup_interpose - Dentry interposition for a lookup
*/
static int ecryptfs_lookup_interpose(struct dentry *dentry,
struct dentry *lower_dentry,
struct inode *dir_inode)
{
struct inode *inode, *lower_inode = lower_dentry->d_inode;
struct ecryptfs_dentry_info *dentry_info;
struct vfsmount *lower_mnt;
int rc = 0;
lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent));
fsstack_copy_attr_atime(dir_inode, lower_dentry->d_parent->d_inode);
BUG_ON(!lower_dentry->d_count);
dentry_info = kmem_cache_alloc(ecryptfs_dentry_info_cache, GFP_KERNEL);
ecryptfs_set_dentry_private(dentry, dentry_info);
if (!dentry_info) {
printk(KERN_ERR "%s: Out of memory whilst attempting "
"to allocate ecryptfs_dentry_info struct\n",
__func__);
dput(lower_dentry);
mntput(lower_mnt);
d_drop(dentry);
return -ENOMEM;
}
ecryptfs_set_dentry_lower(dentry, lower_dentry);
ecryptfs_set_dentry_lower_mnt(dentry, lower_mnt);
if (!lower_dentry->d_inode) {
/* We want to add because we couldn't find in lower */
d_add(dentry, NULL);
return 0;
}
inode = __ecryptfs_get_inode(lower_inode, dir_inode->i_sb);
if (IS_ERR(inode)) {
printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n",
__func__, PTR_ERR(inode));
return PTR_ERR(inode);
}
if (S_ISREG(inode->i_mode)) {
rc = ecryptfs_i_size_read(dentry, inode);
if (rc) {
make_bad_inode(inode);
return rc;
}
}
if (inode->i_state & I_NEW)
unlock_new_inode(inode);
d_add(dentry, inode);
return rc;
}
/**
* ecryptfs_lookup
* @ecryptfs_dir_inode: The eCryptfs directory inode
* @ecryptfs_dentry: The eCryptfs dentry that we are looking up
* @ecryptfs_nd: nameidata; may be NULL
*
* Find a file on disk. If the file does not exist, then we'll add it to the
* dentry cache and continue on to read it from the disk.
*/
static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
struct dentry *ecryptfs_dentry,
struct nameidata *ecryptfs_nd)
{
char *encrypted_and_encoded_name = NULL;
size_t encrypted_and_encoded_name_size;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
struct dentry *lower_dir_dentry, *lower_dentry;
int rc = 0;
if ((ecryptfs_dentry->d_name.len == 1
&& !strcmp(ecryptfs_dentry->d_name.name, "."))
|| (ecryptfs_dentry->d_name.len == 2
&& !strcmp(ecryptfs_dentry->d_name.name, ".."))) {
goto out_d_drop;
}
lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
lower_dir_dentry,
ecryptfs_dentry->d_name.len);
mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
goto out_d_drop;
}
if (lower_dentry->d_inode)
goto interpose;
mount_crypt_stat = &ecryptfs_superblock_to_private(
ecryptfs_dentry->d_sb)->mount_crypt_stat;
if (!(mount_crypt_stat
&& (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)))
goto interpose;
dput(lower_dentry);
rc = ecryptfs_encrypt_and_encode_filename(
&encrypted_and_encoded_name, &encrypted_and_encoded_name_size,
NULL, mount_crypt_stat, ecryptfs_dentry->d_name.name,
ecryptfs_dentry->d_name.len);
if (rc) {
printk(KERN_ERR "%s: Error attempting to encrypt and encode "
"filename; rc = [%d]\n", __func__, rc);
goto out_d_drop;
}
mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
lower_dentry = lookup_one_len(encrypted_and_encoded_name,
lower_dir_dentry,
encrypted_and_encoded_name_size);
mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
if (IS_ERR(lower_dentry)) {
rc = PTR_ERR(lower_dentry);
ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
"[%d] on lower_dentry = [%s]\n", __func__, rc,
encrypted_and_encoded_name);
goto out_d_drop;
}
interpose:
rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry,
ecryptfs_dir_inode);
goto out;
out_d_drop:
d_drop(ecryptfs_dentry);
out:
kfree(encrypted_and_encoded_name);
return ERR_PTR(rc);
}
static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
struct dentry *lower_old_dentry;
struct dentry *lower_new_dentry;
struct dentry *lower_dir_dentry;
u64 file_size_save;
int rc;
file_size_save = i_size_read(old_dentry->d_inode);
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
dget(lower_old_dentry);
dget(lower_new_dentry);
lower_dir_dentry = lock_parent(lower_new_dentry);
rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
lower_new_dentry);
if (rc || !lower_new_dentry->d_inode)
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb);
if (rc)
goto out_lock;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
set_nlink(old_dentry->d_inode,
ecryptfs_inode_to_lower(old_dentry->d_inode)->i_nlink);
i_size_write(new_dentry->d_inode, file_size_save);
out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
return rc;
}
static int ecryptfs_unlink(struct inode *dir, struct dentry *dentry)
{
return ecryptfs_do_unlink(dir, dentry, dentry->d_inode);
}
static int ecryptfs_symlink(struct inode *dir, struct dentry *dentry,
const char *symname)
{
int rc;
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
char *encoded_symname;
size_t encoded_symlen;
struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
dget(lower_dentry);
lower_dir_dentry = lock_parent(lower_dentry);
mount_crypt_stat = &ecryptfs_superblock_to_private(
dir->i_sb)->mount_crypt_stat;
rc = ecryptfs_encrypt_and_encode_filename(&encoded_symname,
&encoded_symlen,
NULL,
mount_crypt_stat, symname,
strlen(symname));
if (rc)
goto out_lock;
rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry,
encoded_symname);
kfree(encoded_symname);
if (rc || !lower_dentry->d_inode)
goto out_lock;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out_lock;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_dentry);
if (!dentry->d_inode)
d_drop(dentry);
return rc;
}
static int ecryptfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int rc;
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode);
if (rc || !lower_dentry->d_inode)
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
set_nlink(dir, lower_dir_dentry->d_inode->i_nlink);
out:
unlock_dir(lower_dir_dentry);
if (!dentry->d_inode)
d_drop(dentry);
return rc;
}
static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
{
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
int rc;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
dget(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
dget(lower_dentry);
rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
dput(lower_dentry);
if (!rc && dentry->d_inode)
clear_nlink(dentry->d_inode);
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
set_nlink(dir, lower_dir_dentry->d_inode->i_nlink);
unlock_dir(lower_dir_dentry);
if (!rc)
d_drop(dentry);
dput(dentry);
return rc;
}
static int
ecryptfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
int rc;
struct dentry *lower_dentry;
struct dentry *lower_dir_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev);
if (rc || !lower_dentry->d_inode)
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb);
if (rc)
goto out;
fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
fsstack_copy_inode_size(dir, lower_dir_dentry->d_inode);
out:
unlock_dir(lower_dir_dentry);
if (!dentry->d_inode)
d_drop(dentry);
return rc;
}
static int
ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
int rc;
struct dentry *lower_old_dentry;
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
struct dentry *trap = NULL;
struct inode *target_inode;
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
dget(lower_old_dentry);
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
target_inode = new_dentry->d_inode;
trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
/* source should not be ancestor of target */
if (trap == lower_old_dentry) {
rc = -EINVAL;
goto out_lock;
}
/* target should not be ancestor of source */
if (trap == lower_new_dentry) {
rc = -ENOTEMPTY;
goto out_lock;
}
rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
goto out_lock;
if (target_inode)
fsstack_copy_attr_all(target_inode,
ecryptfs_inode_to_lower(target_inode));
fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode);
if (new_dir != old_dir)
fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode);
out_lock:
unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
dput(lower_new_dir_dentry);
dput(lower_old_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
return rc;
}
static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
size_t *bufsiz)
{
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
char *lower_buf;
size_t lower_bufsiz = PATH_MAX;
mm_segment_t old_fs;
int rc;
lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL);
if (!lower_buf) {
rc = -ENOMEM;
goto out;
}
old_fs = get_fs();
set_fs(get_ds());
rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
(char __user *)lower_buf,
lower_bufsiz);
set_fs(old_fs);
if (rc < 0)
goto out;
lower_bufsiz = rc;
rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
lower_buf, lower_bufsiz);
out:
kfree(lower_buf);
return rc;
}
static int
ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
{
char *kbuf;
size_t kbufsiz, copied;
int rc;
rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz);
if (rc)
goto out;
copied = min_t(size_t, bufsiz, kbufsiz);
rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
kfree(kbuf);
fsstack_copy_attr_atime(dentry->d_inode,
ecryptfs_dentry_to_lower(dentry)->d_inode);
out:
return rc;
}
static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
char *buf;
int len = PAGE_SIZE, rc;
mm_segment_t old_fs;
/* Released in ecryptfs_put_link(); only release here on error */
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
buf = ERR_PTR(-ENOMEM);
goto out;
}
old_fs = get_fs();
set_fs(get_ds());
rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
set_fs(old_fs);
if (rc < 0) {
kfree(buf);
buf = ERR_PTR(rc);
} else
buf[rc] = '\0';
out:
nd_set_link(nd, buf);
return NULL;
}
static void
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
char *buf = nd_get_link(nd);
if (!IS_ERR(buf)) {
/* Free the char* */
kfree(buf);
}
}
/**
* upper_size_to_lower_size
* @crypt_stat: Crypt_stat associated with file
* @upper_size: Size of the upper file
*
* Calculate the required size of the lower file based on the
* specified size of the upper file. This calculation is based on the
* number of headers in the underlying file and the extent size.
*
* Returns Calculated size of the lower file.
*/
static loff_t
upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
loff_t upper_size)
{
loff_t lower_size;
lower_size = ecryptfs_lower_header_size(crypt_stat);
if (upper_size != 0) {
loff_t num_extents;
num_extents = upper_size >> crypt_stat->extent_shift;
if (upper_size & ~crypt_stat->extent_mask)
num_extents++;
lower_size += (num_extents * crypt_stat->extent_size);
}
return lower_size;
}
/**
* truncate_upper
* @dentry: The ecryptfs layer dentry
* @ia: Address of the ecryptfs inode's attributes
* @lower_ia: Address of the lower inode's attributes
*
* Function to handle truncations modifying the size of the file. Note
* that the file sizes are interpolated. When expanding, we are simply
* writing strings of 0's out. When truncating, we truncate the upper
* inode and update the lower_ia according to the page index
* interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return,
* the caller must use lower_ia in a call to notify_change() to perform
* the truncation of the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
static int truncate_upper(struct dentry *dentry, struct iattr *ia,
struct iattr *lower_ia)
{
int rc = 0;
struct inode *inode = dentry->d_inode;
struct ecryptfs_crypt_stat *crypt_stat;
loff_t i_size = i_size_read(inode);
loff_t lower_size_before_truncate;
loff_t lower_size_after_truncate;
if (unlikely((ia->ia_size == i_size))) {
lower_ia->ia_valid &= ~ATTR_SIZE;
return 0;
}
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc)
return rc;
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
/* Switch on growing or shrinking file */
if (ia->ia_size > i_size) {
char zero[] = { 0x00 };
lower_ia->ia_valid &= ~ATTR_SIZE;
/* Write a single 0 at the last position of the file;
* this triggers code that will fill in 0's throughout
* the intermediate portion of the previous end of the
* file and the new and of the file */
rc = ecryptfs_write(inode, zero,
(ia->ia_size - 1), 1);
} else { /* ia->ia_size < i_size_read(inode) */
/* We're chopping off all the pages down to the page
* in which ia->ia_size is located. Fill in the end of
* that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
* PAGE_CACHE_SIZE with zeros. */
size_t num_zeros = (PAGE_CACHE_SIZE
- (ia->ia_size & ~PAGE_CACHE_MASK));
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
truncate_setsize(inode, ia->ia_size);
lower_ia->ia_size = ia->ia_size;
lower_ia->ia_valid |= ATTR_SIZE;
goto out;
}
if (num_zeros) {
char *zeros_virt;
zeros_virt = kzalloc(num_zeros, GFP_KERNEL);
if (!zeros_virt) {
rc = -ENOMEM;
goto out;
}
rc = ecryptfs_write(inode, zeros_virt,
ia->ia_size, num_zeros);
kfree(zeros_virt);
if (rc) {
printk(KERN_ERR "Error attempting to zero out "
"the remainder of the end page on "
"reducing truncate; rc = [%d]\n", rc);
goto out;
}
}
truncate_setsize(inode, ia->ia_size);
rc = ecryptfs_write_inode_size_to_metadata(inode);
if (rc) {
printk(KERN_ERR "Problem with "
"ecryptfs_write_inode_size_to_metadata; "
"rc = [%d]\n", rc);
goto out;
}
/* We are reducing the size of the ecryptfs file, and need to
* know if we need to reduce the size of the lower file. */
lower_size_before_truncate =
upper_size_to_lower_size(crypt_stat, i_size);
lower_size_after_truncate =
upper_size_to_lower_size(crypt_stat, ia->ia_size);
if (lower_size_after_truncate < lower_size_before_truncate) {
lower_ia->ia_size = lower_size_after_truncate;
lower_ia->ia_valid |= ATTR_SIZE;
} else
lower_ia->ia_valid &= ~ATTR_SIZE;
}
out:
ecryptfs_put_lower_file(inode);
return rc;
}
static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
{
struct ecryptfs_crypt_stat *crypt_stat;
loff_t lower_oldsize, lower_newsize;
crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
lower_oldsize = upper_size_to_lower_size(crypt_stat,
i_size_read(inode));
lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
if (lower_newsize > lower_oldsize) {
/*
* The eCryptfs inode and the new *lower* size are mixed here
* because we may not have the lower i_mutex held and/or it may
* not be appropriate to call inode_newsize_ok() with inodes
* from other filesystems.
*/
return inode_newsize_ok(inode, lower_newsize);
}
return 0;
}
/**
* ecryptfs_truncate
* @dentry: The ecryptfs layer dentry
* @new_length: The length to expand the file to
*
* Simple function that handles the truncation of an eCryptfs inode and
* its corresponding lower inode.
*
* Returns zero on success; non-zero otherwise
*/
int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
{
struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length };
struct iattr lower_ia = { .ia_valid = 0 };
int rc;
rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
if (rc)
return rc;
rc = truncate_upper(dentry, &ia, &lower_ia);
if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
mutex_lock(&lower_dentry->d_inode->i_mutex);
rc = notify_change(lower_dentry, &lower_ia);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
}
return rc;
}
static int
ecryptfs_permission(struct inode *inode, int mask)
{
return inode_permission(ecryptfs_inode_to_lower(inode), mask);
}
/**
* ecryptfs_setattr
* @dentry: dentry handle to the inode to modify
* @ia: Structure with flags of what to change and values
*
* Updates the metadata of an inode. If the update is to the size
* i.e. truncation, then ecryptfs_truncate will handle the size modification
* of both the ecryptfs inode and the lower inode.
*
* All other metadata changes will be passed right to the lower filesystem,
* and we will just update our inode to look like the lower.
*/
static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
struct dentry *lower_dentry;
struct iattr lower_ia;
struct inode *inode;
struct inode *lower_inode;
struct ecryptfs_crypt_stat *crypt_stat;
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
ecryptfs_init_crypt_stat(crypt_stat);
inode = dentry->d_inode;
lower_inode = ecryptfs_inode_to_lower(inode);
lower_dentry = ecryptfs_dentry_to_lower(dentry);
mutex_lock(&crypt_stat->cs_mutex);
if (S_ISDIR(dentry->d_inode->i_mode))
crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
else if (S_ISREG(dentry->d_inode->i_mode)
&& (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
|| !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
rc = ecryptfs_get_lower_file(dentry, inode);
if (rc) {
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
rc = ecryptfs_read_metadata(dentry);
ecryptfs_put_lower_file(inode);
if (rc) {
if (!(mount_crypt_stat->flags
& ECRYPTFS_PLAINTEXT_PASSTHROUGH_ENABLED)) {
rc = -EIO;
printk(KERN_WARNING "Either the lower file "
"is not in a valid eCryptfs format, "
"or the key could not be retrieved. "
"Plaintext passthrough mode is not "
"enabled; returning -EIO\n");
mutex_unlock(&crypt_stat->cs_mutex);
goto out;
}
rc = 0;
crypt_stat->flags &= ~(ECRYPTFS_I_SIZE_INITIALIZED
| ECRYPTFS_ENCRYPTED);
}
}
mutex_unlock(&crypt_stat->cs_mutex);
rc = inode_change_ok(inode, ia);
if (rc)
goto out;
if (ia->ia_valid & ATTR_SIZE) {
rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
if (rc)
goto out;
}
memcpy(&lower_ia, ia, sizeof(lower_ia));
if (ia->ia_valid & ATTR_FILE)
lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
if (ia->ia_valid & ATTR_SIZE) {
rc = truncate_upper(dentry, ia, &lower_ia);
if (rc < 0)
goto out;
}
/*
* mode change is for clearing setuid/setgid bits. Allow lower fs
* to interpret this in its own way.
*/
if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
lower_ia.ia_valid &= ~ATTR_MODE;
mutex_lock(&lower_dentry->d_inode->i_mutex);
rc = notify_change(lower_dentry, &lower_ia);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
int ecryptfs_getattr_link(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
int rc = 0;
mount_crypt_stat = &ecryptfs_superblock_to_private(
dentry->d_sb)->mount_crypt_stat;
generic_fillattr(dentry->d_inode, stat);
if (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES) {
char *target;
size_t targetsiz;
rc = ecryptfs_readlink_lower(dentry, &target, &targetsiz);
if (!rc) {
kfree(target);
stat->size = targetsiz;
}
}
return rc;
}
int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct kstat lower_stat;
int rc;
rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
ecryptfs_dentry_to_lower(dentry), &lower_stat);
if (!rc) {
fsstack_copy_attr_all(dentry->d_inode,
ecryptfs_inode_to_lower(dentry->d_inode));
generic_fillattr(dentry->d_inode, stat);
stat->blocks = lower_stat.blocks;
}
return rc;
}
int
ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
int rc = 0;
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->setxattr) {
rc = -EOPNOTSUPP;
goto out;
}
rc = vfs_setxattr(lower_dentry, name, value, size, flags);
if (!rc)
fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode);
out:
return rc;
}
ssize_t
ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name,
void *value, size_t size)
{
int rc = 0;
if (!lower_dentry->d_inode->i_op->getxattr) {
rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
rc = lower_dentry->d_inode->i_op->getxattr(lower_dentry, name, value,
size);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
return rc;
}
static ssize_t
ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value,
size_t size)
{
return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), name,
value, size);
}
static ssize_t
ecryptfs_listxattr(struct dentry *dentry, char *list, size_t size)
{
int rc = 0;
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->listxattr) {
rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
rc = lower_dentry->d_inode->i_op->listxattr(lower_dentry, list, size);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
return rc;
}
static int ecryptfs_removexattr(struct dentry *dentry, const char *name)
{
int rc = 0;
struct dentry *lower_dentry;
lower_dentry = ecryptfs_dentry_to_lower(dentry);
if (!lower_dentry->d_inode->i_op->removexattr) {
rc = -EOPNOTSUPP;
goto out;
}
mutex_lock(&lower_dentry->d_inode->i_mutex);
rc = lower_dentry->d_inode->i_op->removexattr(lower_dentry, name);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
return rc;
}
const struct inode_operations ecryptfs_symlink_iops = {
.readlink = ecryptfs_readlink,
.follow_link = ecryptfs_follow_link,
.put_link = ecryptfs_put_link,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.getattr = ecryptfs_getattr_link,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
.removexattr = ecryptfs_removexattr
};
const struct inode_operations ecryptfs_dir_iops = {
.create = ecryptfs_create,
.lookup = ecryptfs_lookup,
.link = ecryptfs_link,
.unlink = ecryptfs_unlink,
.symlink = ecryptfs_symlink,
.mkdir = ecryptfs_mkdir,
.rmdir = ecryptfs_rmdir,
.mknod = ecryptfs_mknod,
.rename = ecryptfs_rename,
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
.removexattr = ecryptfs_removexattr
};
const struct inode_operations ecryptfs_main_iops = {
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
.getattr = ecryptfs_getattr,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
.removexattr = ecryptfs_removexattr
};
|
gpl-2.0
|
linuxvom/linux
|
drivers/usb/gadget/function/uvc_v4l2.c
|
1019
|
9480
|
/*
* uvc_v4l2.c -- USB Video Class Gadget driver
*
* Copyright (C) 2009-2010
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "f_uvc.h"
#include "uvc.h"
#include "uvc_queue.h"
#include "uvc_video.h"
#include "uvc_v4l2.h"
/* --------------------------------------------------------------------------
* Requests handling
*/
static int
uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data)
{
struct usb_composite_dev *cdev = uvc->func.config->cdev;
struct usb_request *req = uvc->control_req;
if (data->length < 0)
return usb_ep_set_halt(cdev->gadget->ep0);
req->length = min_t(unsigned int, uvc->event_length, data->length);
req->zero = data->length < uvc->event_length;
memcpy(req->buf, data->data, req->length);
return usb_ep_queue(cdev->gadget->ep0, req, GFP_KERNEL);
}
/* --------------------------------------------------------------------------
* V4L2 ioctls
*/
struct uvc_format
{
u8 bpp;
u32 fcc;
};
static struct uvc_format uvc_formats[] = {
{ 16, V4L2_PIX_FMT_YUYV },
{ 0, V4L2_PIX_FMT_MJPEG },
};
static int
uvc_v4l2_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct usb_composite_dev *cdev = uvc->func.config->cdev;
strlcpy(cap->driver, "g_uvc", sizeof(cap->driver));
strlcpy(cap->card, cdev->gadget->name, sizeof(cap->card));
strlcpy(cap->bus_info, dev_name(&cdev->gadget->dev),
sizeof(cap->bus_info));
cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
static int
uvc_v4l2_get_format(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
fmt->fmt.pix.pixelformat = video->fcc;
fmt->fmt.pix.width = video->width;
fmt->fmt.pix.height = video->height;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.bytesperline = video->bpp * video->width / 8;
fmt->fmt.pix.sizeimage = video->imagesize;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
fmt->fmt.pix.priv = 0;
return 0;
}
static int
uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
struct uvc_format *format;
unsigned int imagesize;
unsigned int bpl;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(uvc_formats); ++i) {
format = &uvc_formats[i];
if (format->fcc == fmt->fmt.pix.pixelformat)
break;
}
if (i == ARRAY_SIZE(uvc_formats)) {
printk(KERN_INFO "Unsupported format 0x%08x.\n",
fmt->fmt.pix.pixelformat);
return -EINVAL;
}
bpl = format->bpp * fmt->fmt.pix.width / 8;
imagesize = bpl ? bpl * fmt->fmt.pix.height : fmt->fmt.pix.sizeimage;
video->fcc = format->fcc;
video->bpp = format->bpp;
video->width = fmt->fmt.pix.width;
video->height = fmt->fmt.pix.height;
video->imagesize = imagesize;
fmt->fmt.pix.field = V4L2_FIELD_NONE;
fmt->fmt.pix.bytesperline = bpl;
fmt->fmt.pix.sizeimage = imagesize;
fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
fmt->fmt.pix.priv = 0;
return 0;
}
static int
uvc_v4l2_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
if (b->type != video->queue.queue.type)
return -EINVAL;
return uvcg_alloc_buffers(&video->queue, b);
}
static int
uvc_v4l2_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
return uvcg_query_buffer(&video->queue, b);
}
static int
uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
int ret;
ret = uvcg_queue_buffer(&video->queue, b);
if (ret < 0)
return ret;
return uvcg_video_pump(video);
}
static int
uvc_v4l2_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK);
}
static int
uvc_v4l2_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
int ret;
if (type != video->queue.queue.type)
return -EINVAL;
/* Enable UVC video. */
ret = uvcg_video_enable(video, 1);
if (ret < 0)
return ret;
/*
* Complete the alternate setting selection setup phase now that
* userspace is ready to provide video frames.
*/
uvc_function_setup_continue(uvc);
uvc->state = UVC_STATE_STREAMING;
return 0;
}
static int
uvc_v4l2_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_video *video = &uvc->video;
if (type != video->queue.queue.type)
return -EINVAL;
return uvcg_video_enable(video, 0);
}
static int
uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
return v4l2_event_subscribe(fh, sub, 2, NULL);
}
static int
uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
static long
uvc_v4l2_ioctl_default(struct file *file, void *fh, bool valid_prio,
unsigned int cmd, void *arg)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
switch (cmd) {
case UVCIOC_SEND_RESPONSE:
return uvc_send_response(uvc, arg);
default:
return -ENOIOCTLCMD;
}
}
const struct v4l2_ioctl_ops uvc_v4l2_ioctl_ops = {
.vidioc_querycap = uvc_v4l2_querycap,
.vidioc_g_fmt_vid_out = uvc_v4l2_get_format,
.vidioc_s_fmt_vid_out = uvc_v4l2_set_format,
.vidioc_reqbufs = uvc_v4l2_reqbufs,
.vidioc_querybuf = uvc_v4l2_querybuf,
.vidioc_qbuf = uvc_v4l2_qbuf,
.vidioc_dqbuf = uvc_v4l2_dqbuf,
.vidioc_streamon = uvc_v4l2_streamon,
.vidioc_streamoff = uvc_v4l2_streamoff,
.vidioc_subscribe_event = uvc_v4l2_subscribe_event,
.vidioc_unsubscribe_event = uvc_v4l2_unsubscribe_event,
.vidioc_default = uvc_v4l2_ioctl_default,
};
/* --------------------------------------------------------------------------
* V4L2
*/
static int
uvc_v4l2_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
v4l2_fh_init(&handle->vfh, vdev);
v4l2_fh_add(&handle->vfh);
handle->device = &uvc->video;
file->private_data = &handle->vfh;
uvc_function_connect(uvc);
return 0;
}
static int
uvc_v4l2_release(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
struct uvc_video *video = handle->device;
uvc_function_disconnect(uvc);
mutex_lock(&video->mutex);
uvcg_video_enable(video, 0);
uvcg_free_buffers(&video->queue);
mutex_unlock(&video->mutex);
file->private_data = NULL;
v4l2_fh_del(&handle->vfh);
v4l2_fh_exit(&handle->vfh);
kfree(handle);
return 0;
}
static int
uvc_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
return uvcg_queue_mmap(&uvc->video.queue, vma);
}
static unsigned int
uvc_v4l2_poll(struct file *file, poll_table *wait)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
return uvcg_queue_poll(&uvc->video.queue, file, wait);
}
#ifndef CONFIG_MMU
static unsigned long uvcg_v4l2_get_unmapped_area(struct file *file,
unsigned long addr, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
return uvcg_queue_get_unmapped_area(&uvc->video.queue, pgoff);
}
#endif
struct v4l2_file_operations uvc_v4l2_fops = {
.owner = THIS_MODULE,
.open = uvc_v4l2_open,
.release = uvc_v4l2_release,
.unlocked_ioctl = video_ioctl2,
.mmap = uvc_v4l2_mmap,
.poll = uvc_v4l2_poll,
#ifndef CONFIG_MMU
.get_unmapped_area = uvcg_v4l2_get_unmapped_area,
#endif
};
|
gpl-2.0
|
shminer/android_kernel_lge_f460
|
drivers/usb/serial/iuu_phoenix.c
|
2043
|
31222
|
/*
* Infinity Unlimited USB Phoenix driver
*
* Copyright (C) 2010 James Courtier-Dutton (James@superbug.co.uk)
* Copyright (C) 2007 Alain Degreffe (eczema@ecze.com)
*
* Original code taken from iuutool (Copyright (C) 2006 Juan Carlos Borrás)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* And tested with help of WB Electronics
*
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/serial.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "iuu_phoenix.h"
#include <linux/random.h>
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
static const struct usb_device_id id_table[] = {
{USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
/* turbo parameter */
static int boost = 100;
static int clockmode = 1;
static int cdmode = 1;
static int iuu_cardin;
static int iuu_cardout;
static bool xmas;
static int vcc_default = 5;
static int iuu_create_sysfs_attrs(struct usb_serial_port *port);
static int iuu_remove_sysfs_attrs(struct usb_serial_port *port);
static void read_rxcmd_callback(struct urb *urb);
struct iuu_private {
spinlock_t lock; /* store irq state */
u8 line_status;
int tiostatus; /* store IUART SIGNAL for tiocmget call */
u8 reset; /* if 1 reset is needed */
int poll; /* number of poll */
u8 *writebuf; /* buffer for writing to device */
int writelen; /* num of byte to write to device */
u8 *buf; /* used for initialize speed */
u8 len;
int vcc; /* vcc (either 3 or 5 V) */
u32 baud;
u32 boost;
u32 clk;
};
static int iuu_port_probe(struct usb_serial_port *port)
{
struct iuu_private *priv;
int ret;
priv = kzalloc(sizeof(struct iuu_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->buf = kzalloc(256, GFP_KERNEL);
if (!priv->buf) {
kfree(priv);
return -ENOMEM;
}
priv->writebuf = kzalloc(256, GFP_KERNEL);
if (!priv->writebuf) {
kfree(priv->buf);
kfree(priv);
return -ENOMEM;
}
priv->vcc = vcc_default;
spin_lock_init(&priv->lock);
usb_set_serial_port_data(port, priv);
ret = iuu_create_sysfs_attrs(port);
if (ret) {
kfree(priv->writebuf);
kfree(priv->buf);
kfree(priv);
return ret;
}
return 0;
}
static int iuu_port_remove(struct usb_serial_port *port)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
iuu_remove_sysfs_attrs(port);
kfree(priv->writebuf);
kfree(priv->buf);
kfree(priv);
return 0;
}
static int iuu_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
/* FIXME: locking on tiomstatus */
dev_dbg(&port->dev, "%s msg : SET = 0x%04x, CLEAR = 0x%04x\n",
__func__, set, clear);
spin_lock_irqsave(&priv->lock, flags);
if ((set & TIOCM_RTS) && !(priv->tiostatus == TIOCM_RTS)) {
dev_dbg(&port->dev, "%s TIOCMSET RESET called !!!\n", __func__);
priv->reset = 1;
}
if (set & TIOCM_RTS)
priv->tiostatus = TIOCM_RTS;
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
/* This is used to provide a carrier detect mechanism
* When a card is present, the response is 0x00
* When no card , the reader respond with TIOCM_CD
* This is known as CD autodetect mechanism
*/
static int iuu_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int rc;
spin_lock_irqsave(&priv->lock, flags);
rc = priv->tiostatus;
spin_unlock_irqrestore(&priv->lock, flags);
return rc;
}
static void iuu_rxcmd(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int result;
int status = urb->status;
if (status) {
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
/* error stop all */
return;
}
memset(port->write_urb->transfer_buffer, IUU_UART_RX, 1);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
}
static int iuu_reset(struct usb_serial_port *port, u8 wt)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
/* Prepare the reset sequence */
*buf_ptr++ = IUU_RST_SET;
*buf_ptr++ = IUU_DELAY_MS;
*buf_ptr++ = wt;
*buf_ptr = IUU_RST_CLEAR;
/* send the sequence */
usb_fill_bulk_urb(port->write_urb,
port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 4, iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
priv->reset = 0;
return result;
}
/* Status Function
* Return value is
* 0x00 = no card
* 0x01 = smartcard
* 0x02 = sim card
*/
static void iuu_update_status_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct iuu_private *priv = usb_get_serial_port_data(port);
u8 *st;
int status = urb->status;
if (status) {
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
/* error stop all */
return;
}
st = urb->transfer_buffer;
dev_dbg(&port->dev, "%s - enter\n", __func__);
if (urb->actual_length == 1) {
switch (st[0]) {
case 0x1:
priv->tiostatus = iuu_cardout;
break;
case 0x0:
priv->tiostatus = iuu_cardin;
break;
default:
priv->tiostatus = iuu_cardin;
}
}
iuu_rxcmd(urb);
}
static void iuu_status_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int result;
int status = urb->status;
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, 256,
iuu_update_status_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
}
static int iuu_status(struct usb_serial_port *port)
{
int result;
memset(port->write_urb->transfer_buffer, IUU_GET_STATE_REGISTER, 1);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 1,
iuu_status_callback, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
return result;
}
static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
{
int status;
struct usb_serial *serial = port->serial;
int actual = 0;
/* send the data out the bulk port */
status =
usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
port->bulk_out_endpointAddress), buf,
count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - write OK !\n", __func__);
return status;
}
static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count)
{
int status;
struct usb_serial *serial = port->serial;
int actual = 0;
/* send the data out the bulk port */
status =
usb_bulk_msg(serial->dev,
usb_rcvbulkpipe(serial->dev,
port->bulk_in_endpointAddress), buf,
count, &actual, 1000);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - error = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - read OK !\n", __func__);
return status;
}
static int iuu_led(struct usb_serial_port *port, unsigned int R,
unsigned int G, unsigned int B, u8 f)
{
int status;
u8 *buf;
buf = kmalloc(8, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_SET_LED;
buf[1] = R & 0xFF;
buf[2] = (R >> 8) & 0xFF;
buf[3] = G & 0xFF;
buf[4] = (G >> 8) & 0xFF;
buf[5] = B & 0xFF;
buf[6] = (B >> 8) & 0xFF;
buf[7] = f;
status = bulk_immediate(port, buf, 8);
kfree(buf);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - led error status = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - led OK !\n", __func__);
return IUU_OPERATION_OK;
}
static void iuu_rgbf_fill_buffer(u8 *buf, u8 r1, u8 r2, u8 g1, u8 g2, u8 b1,
u8 b2, u8 freq)
{
*buf++ = IUU_SET_LED;
*buf++ = r1;
*buf++ = r2;
*buf++ = g1;
*buf++ = g2;
*buf++ = b1;
*buf++ = b2;
*buf = freq;
}
static void iuu_led_activity_on(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
*buf_ptr++ = IUU_SET_LED;
if (xmas == 1) {
get_random_bytes(buf_ptr, 6);
*(buf_ptr+7) = 1;
} else {
iuu_rgbf_fill_buffer(buf_ptr, 255, 255, 0, 0, 0, 0, 255);
}
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 8 ,
iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
}
static void iuu_led_activity_off(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int result;
char *buf_ptr = port->write_urb->transfer_buffer;
if (xmas == 1) {
iuu_rxcmd(urb);
return;
} else {
*buf_ptr++ = IUU_SET_LED;
iuu_rgbf_fill_buffer(buf_ptr, 0, 0, 255, 255, 0, 0, 255);
}
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 8 ,
iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
}
static int iuu_clk(struct usb_serial_port *port, int dwFrq)
{
int status;
struct iuu_private *priv = usb_get_serial_port_data(port);
int Count = 0;
u8 FrqGenAdr = 0x69;
u8 DIV = 0; /* 8bit */
u8 XDRV = 0; /* 8bit */
u8 PUMP = 0; /* 3bit */
u8 PBmsb = 0; /* 2bit */
u8 PBlsb = 0; /* 8bit */
u8 PO = 0; /* 1bit */
u8 Q = 0; /* 7bit */
/* 24bit = 3bytes */
unsigned int P = 0;
unsigned int P2 = 0;
int frq = (int)dwFrq;
if (frq == 0) {
priv->buf[Count++] = IUU_UART_WRITE_I2C;
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x09;
priv->buf[Count++] = 0x00;
status = bulk_immediate(port, (u8 *) priv->buf, Count);
if (status != 0) {
dev_dbg(&port->dev, "%s - write error\n", __func__);
return status;
}
} else if (frq == 3579000) {
DIV = 100;
P = 1193;
Q = 40;
XDRV = 0;
} else if (frq == 3680000) {
DIV = 105;
P = 161;
Q = 5;
XDRV = 0;
} else if (frq == 6000000) {
DIV = 66;
P = 66;
Q = 2;
XDRV = 0x28;
} else {
unsigned int result = 0;
unsigned int tmp = 0;
unsigned int check;
unsigned int check2;
char found = 0x00;
unsigned int lQ = 2;
unsigned int lP = 2055;
unsigned int lDiv = 4;
for (lQ = 2; lQ <= 47 && !found; lQ++)
for (lP = 2055; lP >= 8 && !found; lP--)
for (lDiv = 4; lDiv <= 127 && !found; lDiv++) {
tmp = (12000000 / lDiv) * (lP / lQ);
if (abs((int)(tmp - frq)) <
abs((int)(frq - result))) {
check2 = (12000000 / lQ);
if (check2 < 250000)
continue;
check = (12000000 / lQ) * lP;
if (check > 400000000)
continue;
if (check < 100000000)
continue;
if (lDiv < 4 || lDiv > 127)
continue;
result = tmp;
P = lP;
DIV = lDiv;
Q = lQ;
if (result == frq)
found = 0x01;
}
}
}
P2 = ((P - PO) / 2) - 4;
DIV = DIV;
PUMP = 0x04;
PBmsb = (P2 >> 8 & 0x03);
PBlsb = P2 & 0xFF;
PO = (P >> 10) & 0x01;
Q = Q - 2;
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x09;
priv->buf[Count++] = 0x20; /* Adr = 0x09 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x0C;
priv->buf[Count++] = DIV; /* Adr = 0x0C */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x12;
priv->buf[Count++] = XDRV; /* Adr = 0x12 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x13;
priv->buf[Count++] = 0x6B; /* Adr = 0x13 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x40;
priv->buf[Count++] = (0xC0 | ((PUMP & 0x07) << 2)) |
(PBmsb & 0x03); /* Adr = 0x40 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x41;
priv->buf[Count++] = PBlsb; /* Adr = 0x41 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x42;
priv->buf[Count++] = Q | (((PO & 0x01) << 7)); /* Adr = 0x42 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x44;
priv->buf[Count++] = (char)0xFF; /* Adr = 0x44 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x45;
priv->buf[Count++] = (char)0xFE; /* Adr = 0x45 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x46;
priv->buf[Count++] = 0x7F; /* Adr = 0x46 */
priv->buf[Count++] = IUU_UART_WRITE_I2C; /* 0x4C */
priv->buf[Count++] = FrqGenAdr << 1;
priv->buf[Count++] = 0x47;
priv->buf[Count++] = (char)0x84; /* Adr = 0x47 */
status = bulk_immediate(port, (u8 *) priv->buf, Count);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - write error\n", __func__);
return status;
}
static int iuu_uart_flush(struct usb_serial_port *port)
{
struct device *dev = &port->dev;
int i;
int status;
u8 rxcmd = IUU_UART_RX;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
for (i = 0; i < 2; i++) {
status = bulk_immediate(port, &rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
return status;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
return status;
}
if (priv->len > 0) {
dev_dbg(dev, "%s - uart_flush datalen is : %i\n", __func__, priv->len);
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
return status;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
return status;
}
static void read_buf_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
if (status) {
if (status == -EPROTO) {
/* reschedule needed */
}
return;
}
dev_dbg(&port->dev, "%s - %i chars to write\n", __func__, urb->actual_length);
if (data == NULL)
dev_dbg(&port->dev, "%s - data is NULL !!!\n", __func__);
if (urb->actual_length && data) {
tty_insert_flip_string(&port->port, data, urb->actual_length);
tty_flip_buffer_push(&port->port);
}
iuu_led_activity_on(urb);
}
static int iuu_bulk_write(struct usb_serial_port *port)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
int buf_len;
char *buf_ptr = port->write_urb->transfer_buffer;
spin_lock_irqsave(&priv->lock, flags);
*buf_ptr++ = IUU_UART_ESC;
*buf_ptr++ = IUU_UART_TX;
*buf_ptr++ = priv->writelen;
memcpy(buf_ptr, priv->writebuf, priv->writelen);
buf_len = priv->writelen;
priv->writelen = 0;
spin_unlock_irqrestore(&priv->lock, flags);
dev_dbg(&port->dev, "%s - writing %i chars : %*ph\n", __func__,
buf_len, buf_len, buf_ptr);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, buf_len + 3,
iuu_rxcmd, port);
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
usb_serial_port_softint(port);
return result;
}
static int iuu_read_buf(struct usb_serial_port *port, int len)
{
int result;
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, len,
read_buf_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
return result;
}
static void iuu_uart_read_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int status = urb->status;
int error = 0;
int len = 0;
unsigned char *data = urb->transfer_buffer;
priv->poll++;
if (status) {
dev_dbg(&port->dev, "%s - status = %d\n", __func__, status);
/* error stop all */
return;
}
if (data == NULL)
dev_dbg(&port->dev, "%s - data is NULL !!!\n", __func__);
if (urb->actual_length == 1 && data != NULL)
len = (int) data[0];
if (urb->actual_length > 1) {
dev_dbg(&port->dev, "%s - urb->actual_length = %i\n", __func__,
urb->actual_length);
error = 1;
return;
}
/* if len > 0 call readbuf */
if (len > 0 && error == 0) {
dev_dbg(&port->dev, "%s - call read buf - len to read is %i\n",
__func__, len);
status = iuu_read_buf(port, len);
return;
}
/* need to update status ? */
if (priv->poll > 99) {
status = iuu_status(port);
priv->poll = 0;
return;
}
/* reset waiting ? */
if (priv->reset == 1) {
status = iuu_reset(port, 0xC);
return;
}
/* Writebuf is waiting */
spin_lock_irqsave(&priv->lock, flags);
if (priv->writelen > 0) {
spin_unlock_irqrestore(&priv->lock, flags);
status = iuu_bulk_write(port);
return;
}
spin_unlock_irqrestore(&priv->lock, flags);
/* if nothing to write call again rxcmd */
dev_dbg(&port->dev, "%s - rxcmd recall\n", __func__);
iuu_led_activity_off(urb);
}
static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
const u8 *buf, int count)
{
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
if (count > 256)
return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
/* fill the buffer */
memcpy(priv->writebuf + priv->writelen, buf, count);
priv->writelen += count;
spin_unlock_irqrestore(&priv->lock, flags);
return count;
}
static void read_rxcmd_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
int result;
int status = urb->status;
if (status) {
/* error stop all */
return;
}
usb_fill_bulk_urb(port->read_urb, port->serial->dev,
usb_rcvbulkpipe(port->serial->dev,
port->bulk_in_endpointAddress),
port->read_urb->transfer_buffer, 256,
iuu_uart_read_callback, port);
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
dev_dbg(&port->dev, "%s - submit result = %d\n", __func__, result);
}
static int iuu_uart_on(struct usb_serial_port *port)
{
int status;
u8 *buf;
buf = kmalloc(sizeof(u8) * 4, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_UART_ENABLE;
buf[1] = (u8) ((IUU_BAUD_9600 >> 8) & 0x00FF);
buf[2] = (u8) (0x00FF & IUU_BAUD_9600);
buf[3] = (u8) (0x0F0 & IUU_ONE_STOP_BIT) | (0x07 & IUU_PARITY_EVEN);
status = bulk_immediate(port, buf, 4);
if (status != IUU_OPERATION_OK) {
dev_dbg(&port->dev, "%s - uart_on error\n", __func__);
goto uart_enable_failed;
}
/* iuu_reset() the card after iuu_uart_on() */
status = iuu_uart_flush(port);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - uart_flush error\n", __func__);
uart_enable_failed:
kfree(buf);
return status;
}
/* Diables the IUU UART (a.k.a. the Phoenix voiderface) */
static int iuu_uart_off(struct usb_serial_port *port)
{
int status;
u8 *buf;
buf = kmalloc(1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_UART_DISABLE;
status = bulk_immediate(port, buf, 1);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - uart_off error\n", __func__);
kfree(buf);
return status;
}
static int iuu_uart_baud(struct usb_serial_port *port, u32 baud_base,
u32 *actual, u8 parity)
{
int status;
u32 baud;
u8 *dataout;
u8 DataCount = 0;
u8 T1Frekvens = 0;
u8 T1reload = 0;
unsigned int T1FrekvensHZ = 0;
dev_dbg(&port->dev, "%s - enter baud_base=%d\n", __func__, baud_base);
dataout = kmalloc(sizeof(u8) * 5, GFP_KERNEL);
if (!dataout)
return -ENOMEM;
/*baud = (((priv->clk / 35) * baud_base) / 100000); */
baud = baud_base;
if (baud < 1200 || baud > 230400) {
kfree(dataout);
return IUU_INVALID_PARAMETER;
}
if (baud > 977) {
T1Frekvens = 3;
T1FrekvensHZ = 500000;
}
if (baud > 3906) {
T1Frekvens = 2;
T1FrekvensHZ = 2000000;
}
if (baud > 11718) {
T1Frekvens = 1;
T1FrekvensHZ = 6000000;
}
if (baud > 46875) {
T1Frekvens = 0;
T1FrekvensHZ = 24000000;
}
T1reload = 256 - (u8) (T1FrekvensHZ / (baud * 2));
/* magic number here: ENTER_FIRMWARE_UPDATE; */
dataout[DataCount++] = IUU_UART_ESC;
/* magic number here: CHANGE_BAUD; */
dataout[DataCount++] = IUU_UART_CHANGE;
dataout[DataCount++] = T1Frekvens;
dataout[DataCount++] = T1reload;
*actual = (T1FrekvensHZ / (256 - T1reload)) / 2;
switch (parity & 0x0F) {
case IUU_PARITY_NONE:
dataout[DataCount++] = 0x00;
break;
case IUU_PARITY_EVEN:
dataout[DataCount++] = 0x01;
break;
case IUU_PARITY_ODD:
dataout[DataCount++] = 0x02;
break;
case IUU_PARITY_MARK:
dataout[DataCount++] = 0x03;
break;
case IUU_PARITY_SPACE:
dataout[DataCount++] = 0x04;
break;
default:
kfree(dataout);
return IUU_INVALID_PARAMETER;
break;
}
switch (parity & 0xF0) {
case IUU_ONE_STOP_BIT:
dataout[DataCount - 1] |= IUU_ONE_STOP_BIT;
break;
case IUU_TWO_STOP_BITS:
dataout[DataCount - 1] |= IUU_TWO_STOP_BITS;
break;
default:
kfree(dataout);
return IUU_INVALID_PARAMETER;
break;
}
status = bulk_immediate(port, dataout, DataCount);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - uart_off error\n", __func__);
kfree(dataout);
return status;
}
static void iuu_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
const u32 supported_mask = CMSPAR|PARENB|PARODD;
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned int cflag = tty->termios.c_cflag;
int status;
u32 actual;
u32 parity;
int csize = CS7;
int baud;
u32 newval = cflag & supported_mask;
/* Just use the ospeed. ispeed should be the same. */
baud = tty->termios.c_ospeed;
dev_dbg(&port->dev, "%s - enter c_ospeed or baud=%d\n", __func__, baud);
/* compute the parity parameter */
parity = 0;
if (cflag & CMSPAR) { /* Using mark space */
if (cflag & PARODD)
parity |= IUU_PARITY_SPACE;
else
parity |= IUU_PARITY_MARK;
} else if (!(cflag & PARENB)) {
parity |= IUU_PARITY_NONE;
csize = CS8;
} else if (cflag & PARODD)
parity |= IUU_PARITY_ODD;
else
parity |= IUU_PARITY_EVEN;
parity |= (cflag & CSTOPB ? IUU_TWO_STOP_BITS : IUU_ONE_STOP_BIT);
/* set it */
status = iuu_uart_baud(port,
baud * priv->boost / 100,
&actual, parity);
/* set the termios value to the real one, so the user now what has
* changed. We support few fields so its easies to copy the old hw
* settings back over and then adjust them
*/
if (old_termios)
tty_termios_copy_hw(&tty->termios, old_termios);
if (status != 0) /* Set failed - return old bits */
return;
/* Re-encode speed, parity and csize */
tty_encode_baud_rate(tty, baud, baud);
tty->termios.c_cflag &= ~(supported_mask|CSIZE);
tty->termios.c_cflag |= newval | csize;
}
static void iuu_close(struct usb_serial_port *port)
{
/* iuu_led (port,255,0,0,0); */
iuu_uart_off(port);
usb_kill_urb(port->write_urb);
usb_kill_urb(port->read_urb);
iuu_led(port, 0, 0, 0xF000, 0xFF);
}
static void iuu_init_termios(struct tty_struct *tty)
{
tty->termios = tty_std_termios;
tty->termios.c_cflag = CLOCAL | CREAD | CS8 | B9600
| TIOCM_CTS | CSTOPB | PARENB;
tty->termios.c_ispeed = 9600;
tty->termios.c_ospeed = 9600;
tty->termios.c_lflag = 0;
tty->termios.c_oflag = 0;
tty->termios.c_iflag = 0;
}
static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
struct device *dev = &port->dev;
u8 *buf;
int result;
int baud;
u32 actual;
struct iuu_private *priv = usb_get_serial_port_data(port);
baud = tty->termios.c_ospeed;
tty->termios.c_ispeed = baud;
/* Re-encode speed */
tty_encode_baud_rate(tty, baud, baud);
dev_dbg(dev, "%s - baud %d\n", __func__, baud);
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
buf = kmalloc(10, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
priv->poll = 0;
/* initialize writebuf */
#define FISH(a, b, c, d) do { \
result = usb_control_msg(port->serial->dev, \
usb_rcvctrlpipe(port->serial->dev, 0), \
b, a, c, d, buf, 1, 1000); \
dev_dbg(dev, "0x%x:0x%x:0x%x:0x%x %d - %x\n", a, b, c, d, result, \
buf[0]); } while (0);
#define SOUP(a, b, c, d) do { \
result = usb_control_msg(port->serial->dev, \
usb_sndctrlpipe(port->serial->dev, 0), \
b, a, c, d, NULL, 0, 1000); \
dev_dbg(dev, "0x%x:0x%x:0x%x:0x%x %d\n", a, b, c, d, result); } while (0)
/* This is not UART related but IUU USB driver related or something */
/* like that. Basically no IUU will accept any commands from the USB */
/* host unless it has received the following message */
/* sprintf(buf ,"%c%c%c%c",0x03,0x02,0x02,0x0); */
SOUP(0x03, 0x02, 0x02, 0x0);
kfree(buf);
iuu_led(port, 0xF000, 0xF000, 0, 0xFF);
iuu_uart_on(port);
if (boost < 100)
boost = 100;
priv->boost = boost;
priv->baud = baud;
switch (clockmode) {
case 2: /* 3.680 Mhz */
priv->clk = IUU_CLK_3680000;
iuu_clk(port, IUU_CLK_3680000 * boost / 100);
result =
iuu_uart_baud(port, baud * boost / 100, &actual,
IUU_PARITY_EVEN);
break;
case 3: /* 6.00 Mhz */
iuu_clk(port, IUU_CLK_6000000 * boost / 100);
priv->clk = IUU_CLK_6000000;
/* Ratio of 6000000 to 3500000 for baud 9600 */
result =
iuu_uart_baud(port, 16457 * boost / 100, &actual,
IUU_PARITY_EVEN);
break;
default: /* 3.579 Mhz */
iuu_clk(port, IUU_CLK_3579000 * boost / 100);
priv->clk = IUU_CLK_3579000;
result =
iuu_uart_baud(port, baud * boost / 100, &actual,
IUU_PARITY_EVEN);
}
/* set the cardin cardout signals */
switch (cdmode) {
case 0:
iuu_cardin = 0;
iuu_cardout = 0;
break;
case 1:
iuu_cardin = TIOCM_CD;
iuu_cardout = 0;
break;
case 2:
iuu_cardin = 0;
iuu_cardout = TIOCM_CD;
break;
case 3:
iuu_cardin = TIOCM_DSR;
iuu_cardout = 0;
break;
case 4:
iuu_cardin = 0;
iuu_cardout = TIOCM_DSR;
break;
case 5:
iuu_cardin = TIOCM_CTS;
iuu_cardout = 0;
break;
case 6:
iuu_cardin = 0;
iuu_cardout = TIOCM_CTS;
break;
case 7:
iuu_cardin = TIOCM_RNG;
iuu_cardout = 0;
break;
case 8:
iuu_cardin = 0;
iuu_cardout = TIOCM_RNG;
}
iuu_uart_flush(port);
dev_dbg(dev, "%s - initialization done\n", __func__);
memset(port->write_urb->transfer_buffer, IUU_UART_RX, 1);
usb_fill_bulk_urb(port->write_urb, port->serial->dev,
usb_sndbulkpipe(port->serial->dev,
port->bulk_out_endpointAddress),
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
result = usb_submit_urb(port->write_urb, GFP_KERNEL);
if (result) {
dev_err(dev, "%s - failed submitting read urb, error %d\n", __func__, result);
iuu_close(port);
} else {
dev_dbg(dev, "%s - rxcmd OK\n", __func__);
}
return result;
}
/* how to change VCC */
static int iuu_vcc_set(struct usb_serial_port *port, unsigned int vcc)
{
int status;
u8 *buf;
buf = kmalloc(5, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf[0] = IUU_SET_VCC;
buf[1] = vcc & 0xFF;
buf[2] = (vcc >> 8) & 0xFF;
buf[3] = (vcc >> 16) & 0xFF;
buf[4] = (vcc >> 24) & 0xFF;
status = bulk_immediate(port, buf, 5);
kfree(buf);
if (status != IUU_OPERATION_OK)
dev_dbg(&port->dev, "%s - vcc error status = %2x\n", __func__, status);
else
dev_dbg(&port->dev, "%s - vcc OK !\n", __func__);
return status;
}
/*
* Sysfs Attributes
*/
static ssize_t show_vcc_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct iuu_private *priv = usb_get_serial_port_data(port);
return sprintf(buf, "%d\n", priv->vcc);
}
static ssize_t store_vcc_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long v;
if (kstrtoul(buf, 10, &v)) {
dev_err(dev, "%s - vcc_mode: %s is not a unsigned long\n",
__func__, buf);
goto fail_store_vcc_mode;
}
dev_dbg(dev, "%s: setting vcc_mode = %ld", __func__, v);
if ((v != 3) && (v != 5)) {
dev_err(dev, "%s - vcc_mode %ld is invalid\n", __func__, v);
} else {
iuu_vcc_set(port, v);
priv->vcc = v;
}
fail_store_vcc_mode:
return count;
}
static DEVICE_ATTR(vcc_mode, S_IRUSR | S_IWUSR, show_vcc_mode,
store_vcc_mode);
static int iuu_create_sysfs_attrs(struct usb_serial_port *port)
{
return device_create_file(&port->dev, &dev_attr_vcc_mode);
}
static int iuu_remove_sysfs_attrs(struct usb_serial_port *port)
{
device_remove_file(&port->dev, &dev_attr_vcc_mode);
return 0;
}
/*
* End Sysfs Attributes
*/
static struct usb_serial_driver iuu_device = {
.driver = {
.owner = THIS_MODULE,
.name = "iuu_phoenix",
},
.id_table = id_table,
.num_ports = 1,
.bulk_in_size = 512,
.bulk_out_size = 512,
.open = iuu_open,
.close = iuu_close,
.write = iuu_uart_write,
.read_bulk_callback = iuu_uart_read_callback,
.tiocmget = iuu_tiocmget,
.tiocmset = iuu_tiocmset,
.set_termios = iuu_set_termios,
.init_termios = iuu_init_termios,
.port_probe = iuu_port_probe,
.port_remove = iuu_port_remove,
};
static struct usb_serial_driver * const serial_drivers[] = {
&iuu_device, NULL
};
module_usb_serial_driver(serial_drivers, id_table);
MODULE_AUTHOR("Alain Degreffe eczema@ecze.com");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(xmas, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(xmas, "Xmas colors enabled or not");
module_param(boost, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(boost, "Card overclock boost (in percent 100-500)");
module_param(clockmode, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(clockmode, "Card clock mode (1=3.579 MHz, 2=3.680 MHz, "
"3=6 Mhz)");
module_param(cdmode, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(cdmode, "Card detect mode (0=none, 1=CD, 2=!CD, 3=DSR, "
"4=!DSR, 5=CTS, 6=!CTS, 7=RING, 8=!RING)");
module_param(vcc_default, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(vcc_default, "Set default VCC (either 3 for 3.3V or 5 "
"for 5V). Default to 5.");
|
gpl-2.0
|
kishpatel1998/HeatWave-CP8676_I02
|
arch/arm/mach-msm/scm.c
|
2299
|
7208
|
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <asm/cacheflush.h>
#include "scm.h"
/* Cache line size for msm8x60 */
#define CACHELINESIZE 32
#define SCM_ENOMEM -5
#define SCM_EOPNOTSUPP -4
#define SCM_EINVAL_ADDR -3
#define SCM_EINVAL_ARG -2
#define SCM_ERROR -1
#define SCM_INTERRUPTED 1
static DEFINE_MUTEX(scm_lock);
/**
* struct scm_command - one SCM command buffer
* @len: total available memory for command and response
* @buf_offset: start of command buffer
* @resp_hdr_offset: start of response buffer
* @id: command to be executed
* @buf: buffer returned from scm_get_command_buffer()
*
* An SCM command is laid out in memory as follows:
*
* ------------------- <--- struct scm_command
* | command header |
* ------------------- <--- scm_get_command_buffer()
* | command buffer |
* ------------------- <--- struct scm_response and
* | response header | scm_command_to_response()
* ------------------- <--- scm_get_response_buffer()
* | response buffer |
* -------------------
*
* There can be arbitrary padding between the headers and buffers so
* you should always use the appropriate scm_get_*_buffer() routines
* to access the buffers in a safe manner.
*/
struct scm_command {
u32 len;
u32 buf_offset;
u32 resp_hdr_offset;
u32 id;
u32 buf[0];
};
/**
* struct scm_response - one SCM response buffer
* @len: total available memory for response
* @buf_offset: start of response data relative to start of scm_response
* @is_complete: indicates if the command has finished processing
*/
struct scm_response {
u32 len;
u32 buf_offset;
u32 is_complete;
};
/**
* alloc_scm_command() - Allocate an SCM command
* @cmd_size: size of the command buffer
* @resp_size: size of the response buffer
*
* Allocate an SCM command, including enough room for the command
* and response headers as well as the command and response buffers.
*
* Returns a valid &scm_command on success or %NULL if the allocation fails.
*/
static struct scm_command *alloc_scm_command(size_t cmd_size, size_t resp_size)
{
struct scm_command *cmd;
size_t len = sizeof(*cmd) + sizeof(struct scm_response) + cmd_size +
resp_size;
cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
if (cmd) {
cmd->len = len;
cmd->buf_offset = offsetof(struct scm_command, buf);
cmd->resp_hdr_offset = cmd->buf_offset + cmd_size;
}
return cmd;
}
/**
* free_scm_command() - Free an SCM command
* @cmd: command to free
*
* Free an SCM command.
*/
static inline void free_scm_command(struct scm_command *cmd)
{
kfree(cmd);
}
/**
* scm_command_to_response() - Get a pointer to a scm_response
* @cmd: command
*
* Returns a pointer to a response for a command.
*/
static inline struct scm_response *scm_command_to_response(
const struct scm_command *cmd)
{
return (void *)cmd + cmd->resp_hdr_offset;
}
/**
* scm_get_command_buffer() - Get a pointer to a command buffer
* @cmd: command
*
* Returns a pointer to the command buffer of a command.
*/
static inline void *scm_get_command_buffer(const struct scm_command *cmd)
{
return (void *)cmd->buf;
}
/**
* scm_get_response_buffer() - Get a pointer to a response buffer
* @rsp: response
*
* Returns a pointer to a response buffer of a response.
*/
static inline void *scm_get_response_buffer(const struct scm_response *rsp)
{
return (void *)rsp + rsp->buf_offset;
}
static int scm_remap_error(int err)
{
switch (err) {
case SCM_ERROR:
return -EIO;
case SCM_EINVAL_ADDR:
case SCM_EINVAL_ARG:
return -EINVAL;
case SCM_EOPNOTSUPP:
return -EOPNOTSUPP;
case SCM_ENOMEM:
return -ENOMEM;
}
return -EINVAL;
}
static u32 smc(u32 cmd_addr)
{
int context_id;
register u32 r0 asm("r0") = 1;
register u32 r1 asm("r1") = (u32)&context_id;
register u32 r2 asm("r2") = cmd_addr;
do {
asm volatile(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
: "r3");
} while (r0 == SCM_INTERRUPTED);
return r0;
}
static int __scm_call(const struct scm_command *cmd)
{
int ret;
u32 cmd_addr = virt_to_phys(cmd);
/*
* Flush the entire cache here so callers don't have to remember
* to flush the cache when passing physical addresses to the secure
* side in the buffer.
*/
flush_cache_all();
ret = smc(cmd_addr);
if (ret < 0)
ret = scm_remap_error(ret);
return ret;
}
/**
* scm_call() - Send an SCM command
* @svc_id: service identifier
* @cmd_id: command identifier
* @cmd_buf: command buffer
* @cmd_len: length of the command buffer
* @resp_buf: response buffer
* @resp_len: length of the response buffer
*
* Sends a command to the SCM and waits for the command to finish processing.
*/
int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
void *resp_buf, size_t resp_len)
{
int ret;
struct scm_command *cmd;
struct scm_response *rsp;
cmd = alloc_scm_command(cmd_len, resp_len);
if (!cmd)
return -ENOMEM;
cmd->id = (svc_id << 10) | cmd_id;
if (cmd_buf)
memcpy(scm_get_command_buffer(cmd), cmd_buf, cmd_len);
mutex_lock(&scm_lock);
ret = __scm_call(cmd);
mutex_unlock(&scm_lock);
if (ret)
goto out;
rsp = scm_command_to_response(cmd);
do {
u32 start = (u32)rsp;
u32 end = (u32)scm_get_response_buffer(rsp) + resp_len;
start &= ~(CACHELINESIZE - 1);
while (start < end) {
asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
: "memory");
start += CACHELINESIZE;
}
} while (!rsp->is_complete);
if (resp_buf)
memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
out:
free_scm_command(cmd);
return ret;
}
EXPORT_SYMBOL(scm_call);
u32 scm_get_version(void)
{
int context_id;
static u32 version = -1;
register u32 r0 asm("r0");
register u32 r1 asm("r1");
if (version != -1)
return version;
mutex_lock(&scm_lock);
r0 = 0x1 << 8;
r1 = (u32)&context_id;
do {
asm volatile(
__asmeq("%0", "r0")
__asmeq("%1", "r1")
__asmeq("%2", "r0")
__asmeq("%3", "r1")
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0 @ switch to secure world\n"
: "=r" (r0), "=r" (r1)
: "r" (r0), "r" (r1)
: "r2", "r3");
} while (r0 == SCM_INTERRUPTED);
version = r1;
mutex_unlock(&scm_lock);
return version;
}
EXPORT_SYMBOL(scm_get_version);
|
gpl-2.0
|
bgamari/linux
|
arch/mips/kernel/mips-mt.c
|
2811
|
7897
|
/*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/security.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsmtregs.h>
#include <asm/r4kcache.h>
#include <asm/cacheflush.h>
int vpelimit;
static int __init maxvpes(char *str)
{
get_option(&str, &vpelimit);
return 1;
}
__setup("maxvpes=", maxvpes);
int tclimit;
static int __init maxtcs(char *str)
{
get_option(&str, &tclimit);
return 1;
}
__setup("maxtcs=", maxtcs);
/*
* Dump new MIPS MT state for the core. Does not leave TCs halted.
* Takes an argument which taken to be a pre-call MVPControl value.
*/
void mips_mt_regdump(unsigned long mvpctl)
{
unsigned long flags;
unsigned long vpflags;
unsigned long mvpconf0;
int nvpe;
int ntc;
int i;
int tc;
unsigned long haltval;
unsigned long tcstatval;
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_soft_dump(void);
#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags);
vpflags = dvpe();
printk("=== MIPS MT State Dump ===\n");
printk("-- Global State --\n");
printk(" MVPControl Passed: %08lx\n", mvpctl);
printk(" MVPControl Read: %08lx\n", vpflags);
printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
printk("-- per-VPE State --\n");
for (i = 0; i < nvpe; i++) {
for (tc = 0; tc < ntc; tc++) {
settc(tc);
if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
printk(" VPE %d\n", i);
printk(" VPEControl : %08lx\n",
read_vpe_c0_vpecontrol());
printk(" VPEConf0 : %08lx\n",
read_vpe_c0_vpeconf0());
printk(" VPE%d.Status : %08lx\n",
i, read_vpe_c0_status());
printk(" VPE%d.EPC : %08lx %pS\n",
i, read_vpe_c0_epc(),
(void *) read_vpe_c0_epc());
printk(" VPE%d.Cause : %08lx\n",
i, read_vpe_c0_cause());
printk(" VPE%d.Config7 : %08lx\n",
i, read_vpe_c0_config7());
break; /* Next VPE */
}
}
}
printk("-- per-TC State --\n");
for (tc = 0; tc < ntc; tc++) {
settc(tc);
if (read_tc_c0_tcbind() == read_c0_tcbind()) {
/* Are we dumping ourself? */
haltval = 0; /* Then we're not halted, and mustn't be */
tcstatval = flags; /* And pre-dump TCStatus is flags */
printk(" TC %d (current TC with VPE EPC above)\n", tc);
} else {
haltval = read_tc_c0_tchalt();
write_tc_c0_tchalt(1);
tcstatval = read_tc_c0_tcstatus();
printk(" TC %d\n", tc);
}
printk(" TCStatus : %08lx\n", tcstatval);
printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
printk(" TCRestart : %08lx %pS\n",
read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
printk(" TCHalt : %08lx\n", haltval);
printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
if (!haltval)
write_tc_c0_tchalt(0);
}
#ifdef CONFIG_MIPS_MT_SMTC
smtc_soft_dump();
#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n");
evpe(vpflags);
local_irq_restore(flags);
}
static int mt_opt_norps;
static int mt_opt_rpsctl = -1;
static int mt_opt_nblsu = -1;
static int mt_opt_forceconfig7;
static int mt_opt_config7 = -1;
static int __init rps_disable(char *s)
{
mt_opt_norps = 1;
return 1;
}
__setup("norps", rps_disable);
static int __init rpsctl_set(char *str)
{
get_option(&str, &mt_opt_rpsctl);
return 1;
}
__setup("rpsctl=", rpsctl_set);
static int __init nblsu_set(char *str)
{
get_option(&str, &mt_opt_nblsu);
return 1;
}
__setup("nblsu=", nblsu_set);
static int __init config7_set(char *str)
{
get_option(&str, &mt_opt_config7);
mt_opt_forceconfig7 = 1;
return 1;
}
__setup("config7=", config7_set);
/* Experimental cache flush control parameters that should go away some day */
int mt_protiflush;
int mt_protdflush;
int mt_n_iflushes = 1;
int mt_n_dflushes = 1;
static int __init set_protiflush(char *s)
{
mt_protiflush = 1;
return 1;
}
__setup("protiflush", set_protiflush);
static int __init set_protdflush(char *s)
{
mt_protdflush = 1;
return 1;
}
__setup("protdflush", set_protdflush);
static int __init niflush(char *s)
{
get_option(&s, &mt_n_iflushes);
return 1;
}
__setup("niflush=", niflush);
static int __init ndflush(char *s)
{
get_option(&s, &mt_n_dflushes);
return 1;
}
__setup("ndflush=", ndflush);
static unsigned int itc_base;
static int __init set_itc_base(char *str)
{
get_option(&str, &itc_base);
return 1;
}
__setup("itcbase=", set_itc_base);
void mips_mt_set_cpuoptions(void)
{
unsigned int oconfig7 = read_c0_config7();
unsigned int nconfig7 = oconfig7;
if (mt_opt_norps) {
printk("\"norps\" option deprecated: use \"rpsctl=\"\n");
}
if (mt_opt_rpsctl >= 0) {
printk("34K return prediction stack override set to %d.\n",
mt_opt_rpsctl);
if (mt_opt_rpsctl)
nconfig7 |= (1 << 2);
else
nconfig7 &= ~(1 << 2);
}
if (mt_opt_nblsu >= 0) {
printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
if (mt_opt_nblsu)
nconfig7 |= (1 << 5);
else
nconfig7 &= ~(1 << 5);
}
if (mt_opt_forceconfig7) {
printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
nconfig7 = mt_opt_config7;
}
if (oconfig7 != nconfig7) {
__asm__ __volatile("sync");
write_c0_config7(nconfig7);
ehb();
printk("Config7: 0x%08x\n", read_c0_config7());
}
/* Report Cache management debug options */
if (mt_protiflush)
printk("I-cache flushes single-threaded\n");
if (mt_protdflush)
printk("D-cache flushes single-threaded\n");
if (mt_n_iflushes != 1)
printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
if (mt_n_dflushes != 1)
printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
* specific to the 34K core family, which uses
* a special mode bit ("ITC") in the ErrCtl
* register to enable access to ITC control
* registers via cache "tag" operations.
*/
unsigned long ectlval;
unsigned long itcblkgrn;
/* ErrCtl register is known as "ecc" to Linux */
ectlval = read_c0_ecc();
write_c0_ecc(ectlval | (0x1 << 26));
ehb();
#define INDEX_0 (0x80000000)
#define INDEX_8 (0x80000008)
/* Read "cache tag" for Dcache pseudo-index 8 */
cache_op(Index_Load_Tag_D, INDEX_8);
ehb();
itcblkgrn = read_c0_dtaglo();
itcblkgrn &= 0xfffe0000;
/* Set for 128 byte pitch of ITC cells */
itcblkgrn |= 0x00000c00;
/* Stage in Tag register */
write_c0_dtaglo(itcblkgrn);
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_8);
/* Now set base address, and turn ITC on with 0x1 bit */
write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
ehb();
/* Write out to ITU with CACHE op */
cache_op(Index_Store_Tag_D, INDEX_0);
write_c0_ecc(ectlval);
ehb();
printk("Mapped %ld ITC cells starting at 0x%08x\n",
((itcblkgrn & 0x7fe00000) >> 20), itc_base);
}
}
/*
* Function to protect cache flushes from concurrent execution
* depends on MP software model chosen.
*/
void mt_cflush_lockdown(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_lockdown(void);
smtc_cflush_lockdown();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
void mt_cflush_release(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_release(void);
smtc_cflush_release();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */
}
struct class *mt_class;
static int __init mt_init(void)
{
struct class *mtc;
mtc = class_create(THIS_MODULE, "mt");
if (IS_ERR(mtc))
return PTR_ERR(mtc);
mt_class = mtc;
return 0;
}
subsys_initcall(mt_init);
|
gpl-2.0
|
regalstreak/android_kernel_samsung_logan2g
|
drivers/usb/storage/shuttle_usbat.c
|
3067
|
50354
|
/* Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable
*
* Current development and maintenance by:
* (c) 2000, 2001 Robert Baruch (autophile@starband.net)
* (c) 2004, 2005 Daniel Drake <dsd@gentoo.org>
*
* Developed with the assistance of:
* (c) 2002 Alan Stern <stern@rowland.org>
*
* Flash support based on earlier work by:
* (c) 2002 Thomas Kreiling <usbdev@sm04.de>
*
* Many originally ATAPI devices were slightly modified to meet the USB
* market by using some kind of translation from ATAPI to USB on the host,
* and the peripheral would translate from USB back to ATAPI.
*
* SCM Microsystems (www.scmmicro.com) makes a device, sold to OEM's only,
* which does the USB-to-ATAPI conversion. By obtaining the data sheet on
* their device under nondisclosure agreement, I have been able to write
* this driver for Linux.
*
* The chip used in the device can also be used for EPP and ISA translation
* as well. This driver is only guaranteed to work with the ATAPI
* translation.
*
* See the Kconfig help text for a list of devices known to be supported by
* this driver.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/cdrom.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
MODULE_DESCRIPTION("Driver for SCM Microsystems (a.k.a. Shuttle) USB-ATAPI cable");
MODULE_AUTHOR("Daniel Drake <dsd@gentoo.org>, Robert Baruch <autophile@starband.net>");
MODULE_LICENSE("GPL");
/* Supported device types */
#define USBAT_DEV_HP8200 0x01
#define USBAT_DEV_FLASH 0x02
#define USBAT_EPP_PORT 0x10
#define USBAT_EPP_REGISTER 0x30
#define USBAT_ATA 0x40
#define USBAT_ISA 0x50
/* Commands (need to be logically OR'd with an access type */
#define USBAT_CMD_READ_REG 0x00
#define USBAT_CMD_WRITE_REG 0x01
#define USBAT_CMD_READ_BLOCK 0x02
#define USBAT_CMD_WRITE_BLOCK 0x03
#define USBAT_CMD_COND_READ_BLOCK 0x04
#define USBAT_CMD_COND_WRITE_BLOCK 0x05
#define USBAT_CMD_WRITE_REGS 0x07
/* Commands (these don't need an access type) */
#define USBAT_CMD_EXEC_CMD 0x80
#define USBAT_CMD_SET_FEAT 0x81
#define USBAT_CMD_UIO 0x82
/* Methods of accessing UIO register */
#define USBAT_UIO_READ 1
#define USBAT_UIO_WRITE 0
/* Qualifier bits */
#define USBAT_QUAL_FCQ 0x20 /* full compare */
#define USBAT_QUAL_ALQ 0x10 /* auto load subcount */
/* USBAT Flash Media status types */
#define USBAT_FLASH_MEDIA_NONE 0
#define USBAT_FLASH_MEDIA_CF 1
/* USBAT Flash Media change types */
#define USBAT_FLASH_MEDIA_SAME 0
#define USBAT_FLASH_MEDIA_CHANGED 1
/* USBAT ATA registers */
#define USBAT_ATA_DATA 0x10 /* read/write data (R/W) */
#define USBAT_ATA_FEATURES 0x11 /* set features (W) */
#define USBAT_ATA_ERROR 0x11 /* error (R) */
#define USBAT_ATA_SECCNT 0x12 /* sector count (R/W) */
#define USBAT_ATA_SECNUM 0x13 /* sector number (R/W) */
#define USBAT_ATA_LBA_ME 0x14 /* cylinder low (R/W) */
#define USBAT_ATA_LBA_HI 0x15 /* cylinder high (R/W) */
#define USBAT_ATA_DEVICE 0x16 /* head/device selection (R/W) */
#define USBAT_ATA_STATUS 0x17 /* device status (R) */
#define USBAT_ATA_CMD 0x17 /* device command (W) */
#define USBAT_ATA_ALTSTATUS 0x0E /* status (no clear IRQ) (R) */
/* USBAT User I/O Data registers */
#define USBAT_UIO_EPAD 0x80 /* Enable Peripheral Control Signals */
#define USBAT_UIO_CDT 0x40 /* Card Detect (Read Only) */
/* CDT = ACKD & !UI1 & !UI0 */
#define USBAT_UIO_1 0x20 /* I/O 1 */
#define USBAT_UIO_0 0x10 /* I/O 0 */
#define USBAT_UIO_EPP_ATA 0x08 /* 1=EPP mode, 0=ATA mode */
#define USBAT_UIO_UI1 0x04 /* Input 1 */
#define USBAT_UIO_UI0 0x02 /* Input 0 */
#define USBAT_UIO_INTR_ACK 0x01 /* Interrupt (ATA/ISA)/Acknowledge (EPP) */
/* USBAT User I/O Enable registers */
#define USBAT_UIO_DRVRST 0x80 /* Reset Peripheral */
#define USBAT_UIO_ACKD 0x40 /* Enable Card Detect */
#define USBAT_UIO_OE1 0x20 /* I/O 1 set=output/clr=input */
/* If ACKD=1, set OE1 to 1 also. */
#define USBAT_UIO_OE0 0x10 /* I/O 0 set=output/clr=input */
#define USBAT_UIO_ADPRST 0x01 /* Reset SCM chip */
/* USBAT Features */
#define USBAT_FEAT_ETEN 0x80 /* External trigger enable */
#define USBAT_FEAT_U1 0x08
#define USBAT_FEAT_U0 0x04
#define USBAT_FEAT_ET1 0x02
#define USBAT_FEAT_ET2 0x01
struct usbat_info {
int devicetype;
/* Used for Flash readers only */
unsigned long sectors; /* total sector count */
unsigned long ssize; /* sector size in bytes */
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
};
#define short_pack(LSB,MSB) ( ((u16)(LSB)) | ( ((u16)(MSB))<<8 ) )
#define LSB_of(s) ((s)&0xFF)
#define MSB_of(s) ((s)>>8)
static int transferred = 0;
static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us);
static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us);
static int init_usbat_cd(struct us_data *us);
static int init_usbat_flash(struct us_data *us);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, usbat_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev usbat_unusual_dev_list[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
/*
* Convenience function to produce an ATA read/write sectors command
* Use cmd=0x20 for read, cmd=0x30 for write
*/
static void usbat_pack_ata_sector_cmd(unsigned char *buf,
unsigned char thistime,
u32 sector, unsigned char cmd)
{
buf[0] = 0;
buf[1] = thistime;
buf[2] = sector & 0xFF;
buf[3] = (sector >> 8) & 0xFF;
buf[4] = (sector >> 16) & 0xFF;
buf[5] = 0xE0 | ((sector >> 24) & 0x0F);
buf[6] = cmd;
}
/*
* Convenience function to get the device type (flash or hp8200)
*/
static int usbat_get_device_type(struct us_data *us)
{
return ((struct usbat_info*)us->extra)->devicetype;
}
/*
* Read a register from the device
*/
static int usbat_read(struct us_data *us,
unsigned char access,
unsigned char reg,
unsigned char *content)
{
return usb_stor_ctrl_transfer(us,
us->recv_ctrl_pipe,
access | USBAT_CMD_READ_REG,
0xC0,
(u16)reg,
0,
content,
1);
}
/*
* Write to a register on the device
*/
static int usbat_write(struct us_data *us,
unsigned char access,
unsigned char reg,
unsigned char content)
{
return usb_stor_ctrl_transfer(us,
us->send_ctrl_pipe,
access | USBAT_CMD_WRITE_REG,
0x40,
short_pack(reg, content),
0,
NULL,
0);
}
/*
* Convenience function to perform a bulk read
*/
static int usbat_bulk_read(struct us_data *us,
void* buf,
unsigned int len,
int use_sg)
{
if (len == 0)
return USB_STOR_XFER_GOOD;
US_DEBUGP("usbat_bulk_read: len = %d\n", len);
return usb_stor_bulk_transfer_sg(us, us->recv_bulk_pipe, buf, len, use_sg, NULL);
}
/*
* Convenience function to perform a bulk write
*/
static int usbat_bulk_write(struct us_data *us,
void* buf,
unsigned int len,
int use_sg)
{
if (len == 0)
return USB_STOR_XFER_GOOD;
US_DEBUGP("usbat_bulk_write: len = %d\n", len);
return usb_stor_bulk_transfer_sg(us, us->send_bulk_pipe, buf, len, use_sg, NULL);
}
/*
* Some USBAT-specific commands can only be executed over a command transport
* This transport allows one (len=8) or two (len=16) vendor-specific commands
* to be executed.
*/
static int usbat_execute_command(struct us_data *us,
unsigned char *commands,
unsigned int len)
{
return usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
USBAT_CMD_EXEC_CMD, 0x40, 0, 0,
commands, len);
}
/*
* Read the status register
*/
static int usbat_get_status(struct us_data *us, unsigned char *status)
{
int rc;
rc = usbat_read(us, USBAT_ATA, USBAT_ATA_STATUS, status);
US_DEBUGP("usbat_get_status: 0x%02X\n", (unsigned short) (*status));
return rc;
}
/*
* Check the device status
*/
static int usbat_check_status(struct us_data *us)
{
unsigned char *reply = us->iobuf;
int rc;
rc = usbat_get_status(us, reply);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
/* error/check condition (0x51 is ok) */
if (*reply & 0x01 && *reply != 0x51)
return USB_STOR_TRANSPORT_FAILED;
/* device fault */
if (*reply & 0x20)
return USB_STOR_TRANSPORT_FAILED;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Stores critical information in internal registers in preparation for the execution
* of a conditional usbat_read_blocks or usbat_write_blocks call.
*/
static int usbat_set_shuttle_features(struct us_data *us,
unsigned char external_trigger,
unsigned char epp_control,
unsigned char mask_byte,
unsigned char test_pattern,
unsigned char subcountH,
unsigned char subcountL)
{
unsigned char *command = us->iobuf;
command[0] = 0x40;
command[1] = USBAT_CMD_SET_FEAT;
/*
* The only bit relevant to ATA access is bit 6
* which defines 8 bit data access (set) or 16 bit (unset)
*/
command[2] = epp_control;
/*
* If FCQ is set in the qualifier (defined in R/W cmd), then bits U0, U1,
* ET1 and ET2 define an external event to be checked for on event of a
* _read_blocks or _write_blocks operation. The read/write will not take
* place unless the defined trigger signal is active.
*/
command[3] = external_trigger;
/*
* The resultant byte of the mask operation (see mask_byte) is compared for
* equivalence with this test pattern. If equal, the read/write will take
* place.
*/
command[4] = test_pattern;
/*
* This value is logically ANDed with the status register field specified
* in the read/write command.
*/
command[5] = mask_byte;
/*
* If ALQ is set in the qualifier, this field contains the address of the
* registers where the byte count should be read for transferring the data.
* If ALQ is not set, then this field contains the number of bytes to be
* transferred.
*/
command[6] = subcountL;
command[7] = subcountH;
return usbat_execute_command(us, command, 8);
}
/*
* Block, waiting for an ATA device to become not busy or to report
* an error condition.
*/
static int usbat_wait_not_busy(struct us_data *us, int minutes)
{
int i;
int result;
unsigned char *status = us->iobuf;
/* Synchronizing cache on a CDR could take a heck of a long time,
* but probably not more than 10 minutes or so. On the other hand,
* doing a full blank on a CDRW at speed 1 will take about 75
* minutes!
*/
for (i=0; i<1200+minutes*60; i++) {
result = usbat_get_status(us, status);
if (result!=USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (*status & 0x01) { /* check condition */
result = usbat_read(us, USBAT_ATA, 0x10, status);
return USB_STOR_TRANSPORT_FAILED;
}
if (*status & 0x20) /* device fault */
return USB_STOR_TRANSPORT_FAILED;
if ((*status & 0x80)==0x00) { /* not busy */
US_DEBUGP("Waited not busy for %d steps\n", i);
return USB_STOR_TRANSPORT_GOOD;
}
if (i<500)
msleep(10); /* 5 seconds */
else if (i<700)
msleep(50); /* 10 seconds */
else if (i<1200)
msleep(100); /* 50 seconds */
else
msleep(1000); /* X minutes */
}
US_DEBUGP("Waited not busy for %d minutes, timing out.\n",
minutes);
return USB_STOR_TRANSPORT_FAILED;
}
/*
* Read block data from the data register
*/
static int usbat_read_block(struct us_data *us,
void* buf,
unsigned short len,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
if (!len)
return USB_STOR_TRANSPORT_GOOD;
command[0] = 0xC0;
command[1] = USBAT_ATA | USBAT_CMD_READ_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = 0;
command[4] = 0;
command[5] = 0;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
result = usbat_bulk_read(us, buf, len, use_sg);
return (result == USB_STOR_XFER_GOOD ?
USB_STOR_TRANSPORT_GOOD : USB_STOR_TRANSPORT_ERROR);
}
/*
* Write block data via the data register
*/
static int usbat_write_block(struct us_data *us,
unsigned char access,
void* buf,
unsigned short len,
int minutes,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
if (!len)
return USB_STOR_TRANSPORT_GOOD;
command[0] = 0x40;
command[1] = access | USBAT_CMD_WRITE_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = 0;
command[4] = 0;
command[5] = 0;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
result = usbat_bulk_write(us, buf, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return usbat_wait_not_busy(us, minutes);
}
/*
* Process read and write requests
*/
static int usbat_hp8200e_rw_block_test(struct us_data *us,
unsigned char access,
unsigned char *registers,
unsigned char *data_out,
unsigned short num_registers,
unsigned char data_reg,
unsigned char status_reg,
unsigned char timeout,
unsigned char qualifier,
int direction,
void *buf,
unsigned short len,
int use_sg,
int minutes)
{
int result;
unsigned int pipe = (direction == DMA_FROM_DEVICE) ?
us->recv_bulk_pipe : us->send_bulk_pipe;
unsigned char *command = us->iobuf;
int i, j;
int cmdlen;
unsigned char *data = us->iobuf;
unsigned char *status = us->iobuf;
BUG_ON(num_registers > US_IOBUF_SIZE/2);
for (i=0; i<20; i++) {
/*
* The first time we send the full command, which consists
* of downloading the SCSI command followed by downloading
* the data via a write-and-test. Any other time we only
* send the command to download the data -- the SCSI command
* is still 'active' in some sense in the device.
*
* We're only going to try sending the data 10 times. After
* that, we just return a failure.
*/
if (i==0) {
cmdlen = 16;
/*
* Write to multiple registers
* Not really sure the 0x07, 0x17, 0xfc, 0xe7 is
* necessary here, but that's what came out of the
* trace every single time.
*/
command[0] = 0x40;
command[1] = access | USBAT_CMD_WRITE_REGS;
command[2] = 0x07;
command[3] = 0x17;
command[4] = 0xFC;
command[5] = 0xE7;
command[6] = LSB_of(num_registers*2);
command[7] = MSB_of(num_registers*2);
} else
cmdlen = 8;
/* Conditionally read or write blocks */
command[cmdlen-8] = (direction==DMA_TO_DEVICE ? 0x40 : 0xC0);
command[cmdlen-7] = access |
(direction==DMA_TO_DEVICE ?
USBAT_CMD_COND_WRITE_BLOCK : USBAT_CMD_COND_READ_BLOCK);
command[cmdlen-6] = data_reg;
command[cmdlen-5] = status_reg;
command[cmdlen-4] = timeout;
command[cmdlen-3] = qualifier;
command[cmdlen-2] = LSB_of(len);
command[cmdlen-1] = MSB_of(len);
result = usbat_execute_command(us, command, cmdlen);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (i==0) {
for (j=0; j<num_registers; j++) {
data[j<<1] = registers[j];
data[1+(j<<1)] = data_out[j];
}
result = usbat_bulk_write(us, data, num_registers*2, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
result = usb_stor_bulk_transfer_sg(us,
pipe, buf, len, use_sg, NULL);
/*
* If we get a stall on the bulk download, we'll retry
* the bulk download -- but not the SCSI command because
* in some sense the SCSI command is still 'active' and
* waiting for the data. Don't ask me why this should be;
* I'm only following what the Windoze driver did.
*
* Note that a stall for the test-and-read/write command means
* that the test failed. In this case we're testing to make
* sure that the device is error-free
* (i.e. bit 0 -- CHK -- of status is 0). The most likely
* hypothesis is that the USBAT chip somehow knows what
* the device will accept, but doesn't give the device any
* data until all data is received. Thus, the device would
* still be waiting for the first byte of data if a stall
* occurs, even if the stall implies that some data was
* transferred.
*/
if (result == USB_STOR_XFER_SHORT ||
result == USB_STOR_XFER_STALLED) {
/*
* If we're reading and we stalled, then clear
* the bulk output pipe only the first time.
*/
if (direction==DMA_FROM_DEVICE && i==0) {
if (usb_stor_clear_halt(us,
us->send_bulk_pipe) < 0)
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Read status: is the device angry, or just busy?
*/
result = usbat_read(us, USBAT_ATA,
direction==DMA_TO_DEVICE ?
USBAT_ATA_STATUS : USBAT_ATA_ALTSTATUS,
status);
if (result!=USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (*status & 0x01) /* check condition */
return USB_STOR_TRANSPORT_FAILED;
if (*status & 0x20) /* device fault */
return USB_STOR_TRANSPORT_FAILED;
US_DEBUGP("Redoing %s\n",
direction==DMA_TO_DEVICE ? "write" : "read");
} else if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
else
return usbat_wait_not_busy(us, minutes);
}
US_DEBUGP("Bummer! %s bulk data 20 times failed.\n",
direction==DMA_TO_DEVICE ? "Writing" : "Reading");
return USB_STOR_TRANSPORT_FAILED;
}
/*
* Write to multiple registers:
* Allows us to write specific data to any registers. The data to be written
* gets packed in this sequence: reg0, data0, reg1, data1, ..., regN, dataN
* which gets sent through bulk out.
* Not designed for large transfers of data!
*/
static int usbat_multiple_write(struct us_data *us,
unsigned char *registers,
unsigned char *data_out,
unsigned short num_registers)
{
int i, result;
unsigned char *data = us->iobuf;
unsigned char *command = us->iobuf;
BUG_ON(num_registers > US_IOBUF_SIZE/2);
/* Write to multiple registers, ATA access */
command[0] = 0x40;
command[1] = USBAT_ATA | USBAT_CMD_WRITE_REGS;
/* No relevance */
command[2] = 0;
command[3] = 0;
command[4] = 0;
command[5] = 0;
/* Number of bytes to be transferred (incl. addresses and data) */
command[6] = LSB_of(num_registers*2);
command[7] = MSB_of(num_registers*2);
/* The setup command */
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Create the reg/data, reg/data sequence */
for (i=0; i<num_registers; i++) {
data[i<<1] = registers[i];
data[1+(i<<1)] = data_out[i];
}
/* Send the data */
result = usbat_bulk_write(us, data, num_registers*2, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_get_device_type(us) == USBAT_DEV_HP8200)
return usbat_wait_not_busy(us, 0);
else
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Conditionally read blocks from device:
* Allows us to read blocks from a specific data register, based upon the
* condition that a status register can be successfully masked with a status
* qualifier. If this condition is not initially met, the read will wait
* up until a maximum amount of time has elapsed, as specified by timeout.
* The read will start when the condition is met, otherwise the command aborts.
*
* The qualifier defined here is not the value that is masked, it defines
* conditions for the write to take place. The actual masked qualifier (and
* other related details) are defined beforehand with _set_shuttle_features().
*/
static int usbat_read_blocks(struct us_data *us,
void* buffer,
int len,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
command[0] = 0xC0;
command[1] = USBAT_ATA | USBAT_CMD_COND_READ_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = USBAT_ATA_STATUS;
command[4] = 0xFD; /* Timeout (ms); */
command[5] = USBAT_QUAL_FCQ;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
/* Multiple block read setup command */
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
/* Read the blocks we just asked for */
result = usbat_bulk_read(us, buffer, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Conditionally write blocks to device:
* Allows us to write blocks to a specific data register, based upon the
* condition that a status register can be successfully masked with a status
* qualifier. If this condition is not initially met, the write will wait
* up until a maximum amount of time has elapsed, as specified by timeout.
* The read will start when the condition is met, otherwise the command aborts.
*
* The qualifier defined here is not the value that is masked, it defines
* conditions for the write to take place. The actual masked qualifier (and
* other related details) are defined beforehand with _set_shuttle_features().
*/
static int usbat_write_blocks(struct us_data *us,
void* buffer,
int len,
int use_sg)
{
int result;
unsigned char *command = us->iobuf;
command[0] = 0x40;
command[1] = USBAT_ATA | USBAT_CMD_COND_WRITE_BLOCK;
command[2] = USBAT_ATA_DATA;
command[3] = USBAT_ATA_STATUS;
command[4] = 0xFD; /* Timeout (ms) */
command[5] = USBAT_QUAL_FCQ;
command[6] = LSB_of(len);
command[7] = MSB_of(len);
/* Multiple block write setup command */
result = usbat_execute_command(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
/* Write the data */
result = usbat_bulk_write(us, buffer, len, use_sg);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_FAILED;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Read the User IO register
*/
static int usbat_read_user_io(struct us_data *us, unsigned char *data_flags)
{
int result;
result = usb_stor_ctrl_transfer(us,
us->recv_ctrl_pipe,
USBAT_CMD_UIO,
0xC0,
0,
0,
data_flags,
USBAT_UIO_READ);
US_DEBUGP("usbat_read_user_io: UIO register reads %02X\n", (unsigned short) (*data_flags));
return result;
}
/*
* Write to the User IO register
*/
static int usbat_write_user_io(struct us_data *us,
unsigned char enable_flags,
unsigned char data_flags)
{
return usb_stor_ctrl_transfer(us,
us->send_ctrl_pipe,
USBAT_CMD_UIO,
0x40,
short_pack(enable_flags, data_flags),
0,
NULL,
USBAT_UIO_WRITE);
}
/*
* Reset the device
* Often needed on media change.
*/
static int usbat_device_reset(struct us_data *us)
{
int rc;
/*
* Reset peripheral, enable peripheral control signals
* (bring reset signal up)
*/
rc = usbat_write_user_io(us,
USBAT_UIO_DRVRST | USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/*
* Enable peripheral control signals
* (bring reset signal down)
*/
rc = usbat_write_user_io(us,
USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Enable card detect
*/
static int usbat_device_enable_cdt(struct us_data *us)
{
int rc;
/* Enable peripheral control signals and card detect */
rc = usbat_write_user_io(us,
USBAT_UIO_ACKD | USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Determine if media is present.
*/
static int usbat_flash_check_media_present(unsigned char *uio)
{
if (*uio & USBAT_UIO_UI0) {
US_DEBUGP("usbat_flash_check_media_present: no media detected\n");
return USBAT_FLASH_MEDIA_NONE;
}
return USBAT_FLASH_MEDIA_CF;
}
/*
* Determine if media has changed since last operation
*/
static int usbat_flash_check_media_changed(unsigned char *uio)
{
if (*uio & USBAT_UIO_0) {
US_DEBUGP("usbat_flash_check_media_changed: media change detected\n");
return USBAT_FLASH_MEDIA_CHANGED;
}
return USBAT_FLASH_MEDIA_SAME;
}
/*
* Check for media change / no media and handle the situation appropriately
*/
static int usbat_flash_check_media(struct us_data *us,
struct usbat_info *info)
{
int rc;
unsigned char *uio = us->iobuf;
rc = usbat_read_user_io(us, uio);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Check for media existence */
rc = usbat_flash_check_media_present(uio);
if (rc == USBAT_FLASH_MEDIA_NONE) {
info->sense_key = 0x02;
info->sense_asc = 0x3A;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
/* Check for media change */
rc = usbat_flash_check_media_changed(uio);
if (rc == USBAT_FLASH_MEDIA_CHANGED) {
/* Reset and re-enable card detect */
rc = usbat_device_reset(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
rc = usbat_device_enable_cdt(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
msleep(50);
rc = usbat_read_user_io(us, uio);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
info->sense_key = UNIT_ATTENTION;
info->sense_asc = 0x28;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Determine whether we are controlling a flash-based reader/writer,
* or a HP8200-based CD drive.
* Sets transport functions as appropriate.
*/
static int usbat_identify_device(struct us_data *us,
struct usbat_info *info)
{
int rc;
unsigned char status;
if (!us || !info)
return USB_STOR_TRANSPORT_ERROR;
rc = usbat_device_reset(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
msleep(500);
/*
* In attempt to distinguish between HP CDRW's and Flash readers, we now
* execute the IDENTIFY PACKET DEVICE command. On ATA devices (i.e. flash
* readers), this command should fail with error. On ATAPI devices (i.e.
* CDROM drives), it should succeed.
*/
rc = usbat_write(us, USBAT_ATA, USBAT_ATA_CMD, 0xA1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
rc = usbat_get_status(us, &status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
/* Check for error bit, or if the command 'fell through' */
if (status == 0xA1 || !(status & 0x01)) {
/* Device is HP 8200 */
US_DEBUGP("usbat_identify_device: Detected HP8200 CDRW\n");
info->devicetype = USBAT_DEV_HP8200;
} else {
/* Device is a CompactFlash reader/writer */
US_DEBUGP("usbat_identify_device: Detected Flash reader/writer\n");
info->devicetype = USBAT_DEV_FLASH;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Set the transport function based on the device type
*/
static int usbat_set_transport(struct us_data *us,
struct usbat_info *info,
int devicetype)
{
if (!info->devicetype)
info->devicetype = devicetype;
if (!info->devicetype)
usbat_identify_device(us, info);
switch (info->devicetype) {
default:
return USB_STOR_TRANSPORT_ERROR;
case USBAT_DEV_HP8200:
us->transport = usbat_hp8200e_transport;
break;
case USBAT_DEV_FLASH:
us->transport = usbat_flash_transport;
break;
}
return 0;
}
/*
* Read the media capacity
*/
static int usbat_flash_get_sector_count(struct us_data *us,
struct usbat_info *info)
{
unsigned char registers[3] = {
USBAT_ATA_SECCNT,
USBAT_ATA_DEVICE,
USBAT_ATA_CMD,
};
unsigned char command[3] = { 0x01, 0xA0, 0xEC };
unsigned char *reply;
unsigned char status;
int rc;
if (!us || !info)
return USB_STOR_TRANSPORT_ERROR;
reply = kmalloc(512, GFP_NOIO);
if (!reply)
return USB_STOR_TRANSPORT_ERROR;
/* ATA command : IDENTIFY DEVICE */
rc = usbat_multiple_write(us, registers, command, 3);
if (rc != USB_STOR_XFER_GOOD) {
US_DEBUGP("usbat_flash_get_sector_count: Gah! identify_device failed\n");
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
/* Read device status */
if (usbat_get_status(us, &status) != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
msleep(100);
/* Read the device identification data */
rc = usbat_read_block(us, reply, 512, 0);
if (rc != USB_STOR_TRANSPORT_GOOD)
goto leave;
info->sectors = ((u32)(reply[117]) << 24) |
((u32)(reply[116]) << 16) |
((u32)(reply[115]) << 8) |
((u32)(reply[114]) );
rc = USB_STOR_TRANSPORT_GOOD;
leave:
kfree(reply);
return rc;
}
/*
* Read data from device
*/
static int usbat_flash_read_data(struct us_data *us,
struct usbat_info *info,
u32 sector,
u32 sectors)
{
unsigned char registers[7] = {
USBAT_ATA_FEATURES,
USBAT_ATA_SECCNT,
USBAT_ATA_SECNUM,
USBAT_ATA_LBA_ME,
USBAT_ATA_LBA_HI,
USBAT_ATA_DEVICE,
USBAT_ATA_STATUS,
};
unsigned char command[7];
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
result = usbat_flash_check_media(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/*
* we're working in LBA mode. according to the ATA spec,
* we can support up to 28-bit addressing. I don't know if Jumpshot
* supports beyond 24-bit addressing. It's kind of hard to test
* since it requires > 8GB CF card.
*/
if (sector > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
totallen = sectors * info->ssize;
/*
* Since we don't read more than 64 KB at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
*/
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
/*
* loop, never allocate or transfer more than 64k at once
* (min(128k, 255*info->ssize) is the real limit)
*/
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
/* ATA command 0x20 (READ SECTORS) */
usbat_pack_ata_sector_cmd(command, thistime, sector, 0x20);
/* Write/execute ATA read command */
result = usbat_multiple_write(us, registers, command, 7);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
/* Read the data we just requested */
result = usbat_read_blocks(us, buffer, len, 0);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
US_DEBUGP("usbat_flash_read_data: %d bytes\n", len);
/* Store the data in the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, TO_XFER_BUF);
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Write data to device
*/
static int usbat_flash_write_data(struct us_data *us,
struct usbat_info *info,
u32 sector,
u32 sectors)
{
unsigned char registers[7] = {
USBAT_ATA_FEATURES,
USBAT_ATA_SECCNT,
USBAT_ATA_SECNUM,
USBAT_ATA_LBA_ME,
USBAT_ATA_LBA_HI,
USBAT_ATA_DEVICE,
USBAT_ATA_STATUS,
};
unsigned char command[7];
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
result = usbat_flash_check_media(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/*
* we're working in LBA mode. according to the ATA spec,
* we can support up to 28-bit addressing. I don't know if the device
* supports beyond 24-bit addressing. It's kind of hard to test
* since it requires > 8GB media.
*/
if (sector > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
totallen = sectors * info->ssize;
/*
* Since we don't write more than 64 KB at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
*/
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
/*
* loop, never allocate or transfer more than 64k at once
* (min(128k, 255*info->ssize) is the real limit)
*/
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
/* Get the data from the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, FROM_XFER_BUF);
/* ATA command 0x30 (WRITE SECTORS) */
usbat_pack_ata_sector_cmd(command, thistime, sector, 0x30);
/* Write/execute ATA write command */
result = usbat_multiple_write(us, registers, command, 7);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
/* Write the data */
result = usbat_write_blocks(us, buffer, len, 0);
if (result != USB_STOR_TRANSPORT_GOOD)
goto leave;
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return result;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
/*
* Squeeze a potentially huge (> 65535 byte) read10 command into
* a little ( <= 65535 byte) ATAPI pipe
*/
static int usbat_hp8200e_handle_read10(struct us_data *us,
unsigned char *registers,
unsigned char *data,
struct scsi_cmnd *srb)
{
int result = USB_STOR_TRANSPORT_GOOD;
unsigned char *buffer;
unsigned int len;
unsigned int sector;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
US_DEBUGP("handle_read10: transfersize %d\n",
srb->transfersize);
if (scsi_bufflen(srb) < 0x10000) {
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_FROM_DEVICE,
scsi_sglist(srb),
scsi_bufflen(srb), scsi_sg_count(srb), 1);
return result;
}
/*
* Since we're requesting more data than we can handle in
* a single read command (max is 64k-1), we will perform
* multiple reads, but each read must be in multiples of
* a sector. Luckily the sector size is in srb->transfersize
* (see linux/drivers/scsi/sr.c).
*/
if (data[7+0] == GPCMD_READ_CD) {
len = short_pack(data[7+9], data[7+8]);
len <<= 16;
len |= data[7+7];
US_DEBUGP("handle_read10: GPCMD_READ_CD: len %d\n", len);
srb->transfersize = scsi_bufflen(srb)/len;
}
if (!srb->transfersize) {
srb->transfersize = 2048; /* A guess */
US_DEBUGP("handle_read10: transfersize 0, forcing %d\n",
srb->transfersize);
}
/*
* Since we only read in one block at a time, we have to create
* a bounce buffer and move the data a piece at a time between the
* bounce buffer and the actual transfer buffer.
*/
len = (65535/srb->transfersize) * srb->transfersize;
US_DEBUGP("Max read is %d bytes\n", len);
len = min(len, scsi_bufflen(srb));
buffer = kmalloc(len, GFP_NOIO);
if (buffer == NULL) /* bloody hell! */
return USB_STOR_TRANSPORT_FAILED;
sector = short_pack(data[7+3], data[7+2]);
sector <<= 16;
sector |= short_pack(data[7+5], data[7+4]);
transferred = 0;
while (transferred != scsi_bufflen(srb)) {
if (len > scsi_bufflen(srb) - transferred)
len = scsi_bufflen(srb) - transferred;
data[3] = len&0xFF; /* (cylL) = expected length (L) */
data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */
/* Fix up the SCSI command sector and num sectors */
data[7+2] = MSB_of(sector>>16); /* SCSI command sector */
data[7+3] = LSB_of(sector>>16);
data[7+4] = MSB_of(sector&0xFFFF);
data[7+5] = LSB_of(sector&0xFFFF);
if (data[7+0] == GPCMD_READ_CD)
data[7+6] = 0;
data[7+7] = MSB_of(len / srb->transfersize); /* SCSI command */
data[7+8] = LSB_of(len / srb->transfersize); /* num sectors */
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_FROM_DEVICE,
buffer,
len, 0, 1);
if (result != USB_STOR_TRANSPORT_GOOD)
break;
/* Store the data in the transfer buffer */
usb_stor_access_xfer_buf(buffer, len, srb,
&sg, &sg_offset, TO_XFER_BUF);
/* Update the amount transferred and the sector number */
transferred += len;
sector += len / srb->transfersize;
} /* while transferred != scsi_bufflen(srb) */
kfree(buffer);
return result;
}
static int usbat_select_and_test_registers(struct us_data *us)
{
int selector;
unsigned char *status = us->iobuf;
/* try device = master, then device = slave. */
for (selector = 0xA0; selector <= 0xB0; selector += 0x10) {
if (usbat_write(us, USBAT_ATA, USBAT_ATA_DEVICE, selector) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_STATUS, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_DEVICE, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_HI, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_write(us, USBAT_ATA, USBAT_ATA_LBA_ME, 0x55) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_write(us, USBAT_ATA, USBAT_ATA_LBA_HI, 0xAA) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Initialize the USBAT processor and the storage device
*/
static int init_usbat(struct us_data *us, int devicetype)
{
int rc;
struct usbat_info *info;
unsigned char subcountH = USBAT_ATA_LBA_HI;
unsigned char subcountL = USBAT_ATA_LBA_ME;
unsigned char *status = us->iobuf;
us->extra = kzalloc(sizeof(struct usbat_info), GFP_NOIO);
if (!us->extra) {
US_DEBUGP("init_usbat: Gah! Can't allocate storage for usbat info struct!\n");
return 1;
}
info = (struct usbat_info *) (us->extra);
/* Enable peripheral control signals */
rc = usbat_write_user_io(us,
USBAT_UIO_OE1 | USBAT_UIO_OE0,
USBAT_UIO_EPAD | USBAT_UIO_1);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 1\n");
msleep(2000);
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
US_DEBUGP("INIT 2\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 3\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
US_DEBUGP("INIT 4\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 5\n");
/* Enable peripheral control signals and card detect */
rc = usbat_device_enable_cdt(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
US_DEBUGP("INIT 6\n");
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 7\n");
msleep(1400);
rc = usbat_read_user_io(us, status);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 8\n");
rc = usbat_select_and_test_registers(us);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
US_DEBUGP("INIT 9\n");
/* At this point, we need to detect which device we are using */
if (usbat_set_transport(us, info, devicetype))
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 10\n");
if (usbat_get_device_type(us) == USBAT_DEV_FLASH) {
subcountH = 0x02;
subcountL = 0x00;
}
rc = usbat_set_shuttle_features(us, (USBAT_FEAT_ETEN | USBAT_FEAT_ET2 | USBAT_FEAT_ET1),
0x00, 0x88, 0x08, subcountH, subcountL);
if (rc != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("INIT 11\n");
return USB_STOR_TRANSPORT_GOOD;
}
/*
* Transport for the HP 8200e
*/
static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
{
int result;
unsigned char *status = us->iobuf;
unsigned char registers[32];
unsigned char data[32];
unsigned int len;
int i;
len = scsi_bufflen(srb);
/* Send A0 (ATA PACKET COMMAND).
Note: I guess we're never going to get any of the ATA
commands... just ATA Packet Commands.
*/
registers[0] = USBAT_ATA_FEATURES;
registers[1] = USBAT_ATA_SECCNT;
registers[2] = USBAT_ATA_SECNUM;
registers[3] = USBAT_ATA_LBA_ME;
registers[4] = USBAT_ATA_LBA_HI;
registers[5] = USBAT_ATA_DEVICE;
registers[6] = USBAT_ATA_CMD;
data[0] = 0x00;
data[1] = 0x00;
data[2] = 0x00;
data[3] = len&0xFF; /* (cylL) = expected length (L) */
data[4] = (len>>8)&0xFF; /* (cylH) = expected length (H) */
data[5] = 0xB0; /* (device sel) = slave */
data[6] = 0xA0; /* (command) = ATA PACKET COMMAND */
for (i=7; i<19; i++) {
registers[i] = 0x10;
data[i] = (i-7 >= srb->cmd_len) ? 0 : srb->cmnd[i-7];
}
result = usbat_get_status(us, status);
US_DEBUGP("Status = %02X\n", *status);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
if (srb->cmnd[0] == TEST_UNIT_READY)
transferred = 0;
if (srb->sc_data_direction == DMA_TO_DEVICE) {
result = usbat_hp8200e_rw_block_test(us, USBAT_ATA,
registers, data, 19,
USBAT_ATA_DATA, USBAT_ATA_STATUS, 0xFD,
(USBAT_QUAL_FCQ | USBAT_QUAL_ALQ),
DMA_TO_DEVICE,
scsi_sglist(srb),
len, scsi_sg_count(srb), 10);
if (result == USB_STOR_TRANSPORT_GOOD) {
transferred += len;
US_DEBUGP("Wrote %08X bytes\n", transferred);
}
return result;
} else if (srb->cmnd[0] == READ_10 ||
srb->cmnd[0] == GPCMD_READ_CD) {
return usbat_hp8200e_handle_read10(us, registers, data, srb);
}
if (len > 0xFFFF) {
US_DEBUGP("Error: len = %08X... what do I do now?\n",
len);
return USB_STOR_TRANSPORT_ERROR;
}
result = usbat_multiple_write(us, registers, data, 7);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/*
* Write the 12-byte command header.
*
* If the command is BLANK then set the timer for 75 minutes.
* Otherwise set it for 10 minutes.
*
* NOTE: THE 8200 DOCUMENTATION STATES THAT BLANKING A CDRW
* AT SPEED 4 IS UNRELIABLE!!!
*/
result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
/* If there is response data to be read in then do it here. */
if (len != 0 && (srb->sc_data_direction == DMA_FROM_DEVICE)) {
/* How many bytes to read in? Check cylL register */
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_ME, status) !=
USB_STOR_XFER_GOOD) {
return USB_STOR_TRANSPORT_ERROR;
}
if (len > 0xFF) { /* need to read cylH also */
len = *status;
if (usbat_read(us, USBAT_ATA, USBAT_ATA_LBA_HI, status) !=
USB_STOR_XFER_GOOD) {
return USB_STOR_TRANSPORT_ERROR;
}
len += ((unsigned int) *status)<<8;
}
else
len = *status;
result = usbat_read_block(us, scsi_sglist(srb), len,
scsi_sg_count(srb));
}
return result;
}
/*
* Transport for USBAT02-based CompactFlash and similar storage devices
*/
static int usbat_flash_transport(struct scsi_cmnd * srb, struct us_data *us)
{
int rc;
struct usbat_info *info = (struct usbat_info *) (us->extra);
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
static unsigned char inquiry_response[36] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
if (srb->cmnd[0] == INQUIRY) {
US_DEBUGP("usbat_flash_transport: INQUIRY. Returning bogus response.\n");
memcpy(ptr, inquiry_response, sizeof(inquiry_response));
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
rc = usbat_flash_check_media(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
rc = usbat_flash_get_sector_count(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
/* hard coded 512 byte sectors as per ATA spec */
info->ssize = 0x200;
US_DEBUGP("usbat_flash_transport: READ_CAPACITY: %ld sectors, %ld bytes per sector\n",
info->sectors, info->ssize);
/*
* build the reply
* note: must return the sector number of the last sector,
* *not* the total number of sectors
*/
((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1);
((__be32 *) ptr)[1] = cpu_to_be32(info->ssize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SELECT_10) {
US_DEBUGP("usbat_flash_transport: Gah! MODE_SELECT_10.\n");
return USB_STOR_TRANSPORT_ERROR;
}
if (srb->cmnd[0] == READ_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
US_DEBUGP("usbat_flash_transport: READ_10: read block 0x%04lx count %ld\n", block, blocks);
return usbat_flash_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == READ_12) {
/*
* I don't think we'll ever see a READ_12 but support it anyway
*/
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
US_DEBUGP("usbat_flash_transport: READ_12: read block 0x%04lx count %ld\n", block, blocks);
return usbat_flash_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
US_DEBUGP("usbat_flash_transport: WRITE_10: write block 0x%04lx count %ld\n", block, blocks);
return usbat_flash_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_12) {
/*
* I don't think we'll ever see a WRITE_12 but support it anyway
*/
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
US_DEBUGP("usbat_flash_transport: WRITE_12: write block 0x%04lx count %ld\n", block, blocks);
return usbat_flash_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
US_DEBUGP("usbat_flash_transport: TEST_UNIT_READY.\n");
rc = usbat_flash_check_media(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
return usbat_check_status(us);
}
if (srb->cmnd[0] == REQUEST_SENSE) {
US_DEBUGP("usbat_flash_transport: REQUEST_SENSE.\n");
memset(ptr, 0, 18);
ptr[0] = 0xF0;
ptr[2] = info->sense_key;
ptr[7] = 11;
ptr[12] = info->sense_asc;
ptr[13] = info->sense_ascq;
usb_stor_set_xfer_buf(ptr, 18, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
/*
* sure. whatever. not like we can stop the user from popping
* the media out of the device (no locking doors, etc)
*/
return USB_STOR_TRANSPORT_GOOD;
}
US_DEBUGP("usbat_flash_transport: Gah! Unknown command: %d (0x%x)\n",
srb->cmnd[0], srb->cmnd[0]);
info->sense_key = 0x05;
info->sense_asc = 0x20;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
static int init_usbat_cd(struct us_data *us)
{
return init_usbat(us, USBAT_DEV_HP8200);
}
static int init_usbat_flash(struct us_data *us)
{
return init_usbat(us, USBAT_DEV_FLASH);
}
static int usbat_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - usbat_usb_ids) + usbat_unusual_dev_list);
if (result)
return result;
/* The actual transport will be determined later by the
* initialization routine; this is just a placeholder.
*/
us->transport_name = "Shuttle USBAT";
us->transport = usbat_flash_transport;
us->transport_reset = usb_stor_CB_reset;
us->max_lun = 1;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver usbat_driver = {
.name = "ums-usbat",
.probe = usbat_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = usbat_usb_ids,
.soft_unbind = 1,
};
static int __init usbat_init(void)
{
return usb_register(&usbat_driver);
}
static void __exit usbat_exit(void)
{
usb_deregister(&usbat_driver);
}
module_init(usbat_init);
module_exit(usbat_exit);
|
gpl-2.0
|
rqmok/android_kernel_huawei_u8800
|
drivers/media/video/uvc/uvc_isight.c
|
3323
|
3953
|
/*
* uvc_isight.c -- USB Video Class driver - iSight support
*
* Copyright (C) 2006-2007
* Ivan N. Zlatev <contact@i-nz.net>
* Copyright (C) 2008-2009
* Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/usb.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include "uvcvideo.h"
/* Built-in iSight webcams implements most of UVC 1.0 except a
* different packet format. Instead of sending a header at the
* beginning of each isochronous transfer payload, the webcam sends a
* single header per image (on its own in a packet), followed by
* packets containing data only.
*
* Offset Size (bytes) Description
* ------------------------------------------------------------------
* 0x00 1 Header length
* 0x01 1 Flags (UVC-compliant)
* 0x02 4 Always equal to '11223344'
* 0x06 8 Always equal to 'deadbeefdeadface'
* 0x0e 16 Unknown
*
* The header can be prefixed by an optional, unknown-purpose byte.
*/
static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf,
const __u8 *data, unsigned int len)
{
static const __u8 hdr[] = {
0x11, 0x22, 0x33, 0x44,
0xde, 0xad, 0xbe, 0xef,
0xde, 0xad, 0xfa, 0xce
};
unsigned int maxlen, nbytes;
__u8 *mem;
int is_header = 0;
if (buf == NULL)
return 0;
if ((len >= 14 && memcmp(&data[2], hdr, 12) == 0) ||
(len >= 15 && memcmp(&data[3], hdr, 12) == 0)) {
uvc_trace(UVC_TRACE_FRAME, "iSight header found\n");
is_header = 1;
}
/* Synchronize to the input stream by waiting for a header packet. */
if (buf->state != UVC_BUF_STATE_ACTIVE) {
if (!is_header) {
uvc_trace(UVC_TRACE_FRAME, "Dropping packet (out of "
"sync).\n");
return 0;
}
buf->state = UVC_BUF_STATE_ACTIVE;
}
/* Mark the buffer as done if we're at the beginning of a new frame.
*
* Empty buffers (bytesused == 0) don't trigger end of frame detection
* as it doesn't make sense to return an empty buffer.
*/
if (is_header && buf->buf.bytesused != 0) {
buf->state = UVC_BUF_STATE_DONE;
return -EAGAIN;
}
/* Copy the video data to the buffer. Skip header packets, as they
* contain no data.
*/
if (!is_header) {
maxlen = buf->buf.length - buf->buf.bytesused;
mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused;
nbytes = min(len, maxlen);
memcpy(mem, data, nbytes);
buf->buf.bytesused += nbytes;
if (len > maxlen || buf->buf.bytesused == buf->buf.length) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete "
"(overflow).\n");
buf->state = UVC_BUF_STATE_DONE;
}
}
return 0;
}
void uvc_video_decode_isight(struct urb *urb, struct uvc_streaming *stream,
struct uvc_buffer *buf)
{
int ret, i;
for (i = 0; i < urb->number_of_packets; ++i) {
if (urb->iso_frame_desc[i].status < 0) {
uvc_trace(UVC_TRACE_FRAME, "USB isochronous frame "
"lost (%d).\n",
urb->iso_frame_desc[i].status);
}
/* Decode the payload packet.
* uvc_video_decode is entered twice when a frame transition
* has been detected because the end of frame can only be
* reliably detected when the first packet of the new frame
* is processed. The first pass detects the transition and
* closes the previous frame's buffer, the second pass
* processes the data of the first payload of the new frame.
*/
do {
ret = isight_decode(&stream->queue, buf,
urb->transfer_buffer +
urb->iso_frame_desc[i].offset,
urb->iso_frame_desc[i].actual_length);
if (buf == NULL)
break;
if (buf->state == UVC_BUF_STATE_DONE ||
buf->state == UVC_BUF_STATE_ERROR)
buf = uvc_queue_next_buffer(&stream->queue,
buf);
} while (ret == -EAGAIN);
}
}
|
gpl-2.0
|
zarboz/Ville-Z.238
|
drivers/i2c/busses/i2c-highlander.c
|
3579
|
11411
|
/*
* Renesas Solutions Highlander FPGA I2C/SMBus support.
*
* Supported devices: R0P7780LC0011RL, R0P7785LC0011RL
*
* Copyright (C) 2008 Paul Mundt
* Copyright (C) 2008 Renesas Solutions Corp.
* Copyright (C) 2008 Atom Create Engineering Co., Ltd.
*
* This file is subject to the terms and conditions of the GNU General
* Public License version 2. See the file "COPYING" in the main directory
* of this archive for more details.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/completion.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
#define SMCR 0x00
#define SMCR_START (1 << 0)
#define SMCR_IRIC (1 << 1)
#define SMCR_BBSY (1 << 2)
#define SMCR_ACKE (1 << 3)
#define SMCR_RST (1 << 4)
#define SMCR_IEIC (1 << 6)
#define SMSMADR 0x02
#define SMMR 0x04
#define SMMR_MODE0 (1 << 0)
#define SMMR_MODE1 (1 << 1)
#define SMMR_CAP (1 << 3)
#define SMMR_TMMD (1 << 4)
#define SMMR_SP (1 << 7)
#define SMSADR 0x06
#define SMTRDR 0x46
struct highlander_i2c_dev {
struct device *dev;
void __iomem *base;
struct i2c_adapter adapter;
struct completion cmd_complete;
unsigned long last_read_time;
int irq;
u8 *buf;
size_t buf_len;
};
static int iic_force_poll, iic_force_normal;
static int iic_timeout = 1000, iic_read_delay;
static inline void highlander_i2c_irq_enable(struct highlander_i2c_dev *dev)
{
iowrite16(ioread16(dev->base + SMCR) | SMCR_IEIC, dev->base + SMCR);
}
static inline void highlander_i2c_irq_disable(struct highlander_i2c_dev *dev)
{
iowrite16(ioread16(dev->base + SMCR) & ~SMCR_IEIC, dev->base + SMCR);
}
static inline void highlander_i2c_start(struct highlander_i2c_dev *dev)
{
iowrite16(ioread16(dev->base + SMCR) | SMCR_START, dev->base + SMCR);
}
static inline void highlander_i2c_done(struct highlander_i2c_dev *dev)
{
iowrite16(ioread16(dev->base + SMCR) | SMCR_IRIC, dev->base + SMCR);
}
static void highlander_i2c_setup(struct highlander_i2c_dev *dev)
{
u16 smmr;
smmr = ioread16(dev->base + SMMR);
smmr |= SMMR_TMMD;
if (iic_force_normal)
smmr &= ~SMMR_SP;
else
smmr |= SMMR_SP;
iowrite16(smmr, dev->base + SMMR);
}
static void smbus_write_data(u8 *src, u16 *dst, int len)
{
for (; len > 1; len -= 2) {
*dst++ = be16_to_cpup((__be16 *)src);
src += 2;
}
if (len)
*dst = *src << 8;
}
static void smbus_read_data(u16 *src, u8 *dst, int len)
{
for (; len > 1; len -= 2) {
*(__be16 *)dst = cpu_to_be16p(src++);
dst += 2;
}
if (len)
*dst = *src >> 8;
}
static void highlander_i2c_command(struct highlander_i2c_dev *dev,
u8 command, int len)
{
unsigned int i;
u16 cmd = (command << 8) | command;
for (i = 0; i < len; i += 2) {
if (len - i == 1)
cmd = command << 8;
iowrite16(cmd, dev->base + SMSADR + i);
dev_dbg(dev->dev, "command data[%x] 0x%04x\n", i/2, cmd);
}
}
static int highlander_i2c_wait_for_bbsy(struct highlander_i2c_dev *dev)
{
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(iic_timeout);
while (ioread16(dev->base + SMCR) & SMCR_BBSY) {
if (time_after(jiffies, timeout)) {
dev_warn(dev->dev, "timeout waiting for bus ready\n");
return -ETIMEDOUT;
}
msleep(1);
}
return 0;
}
static int highlander_i2c_reset(struct highlander_i2c_dev *dev)
{
iowrite16(ioread16(dev->base + SMCR) | SMCR_RST, dev->base + SMCR);
return highlander_i2c_wait_for_bbsy(dev);
}
static int highlander_i2c_wait_for_ack(struct highlander_i2c_dev *dev)
{
u16 tmp = ioread16(dev->base + SMCR);
if ((tmp & (SMCR_IRIC | SMCR_ACKE)) == SMCR_ACKE) {
dev_warn(dev->dev, "ack abnormality\n");
return highlander_i2c_reset(dev);
}
return 0;
}
static irqreturn_t highlander_i2c_irq(int irq, void *dev_id)
{
struct highlander_i2c_dev *dev = dev_id;
highlander_i2c_done(dev);
complete(&dev->cmd_complete);
return IRQ_HANDLED;
}
static void highlander_i2c_poll(struct highlander_i2c_dev *dev)
{
unsigned long timeout;
u16 smcr;
timeout = jiffies + msecs_to_jiffies(iic_timeout);
for (;;) {
smcr = ioread16(dev->base + SMCR);
/*
* Don't bother checking ACKE here, this and the reset
* are handled in highlander_i2c_wait_xfer_done() when
* waiting for the ACK.
*/
if (smcr & SMCR_IRIC)
return;
if (time_after(jiffies, timeout))
break;
cpu_relax();
cond_resched();
}
dev_err(dev->dev, "polling timed out\n");
}
static inline int highlander_i2c_wait_xfer_done(struct highlander_i2c_dev *dev)
{
if (dev->irq)
wait_for_completion_timeout(&dev->cmd_complete,
msecs_to_jiffies(iic_timeout));
else
/* busy looping, the IRQ of champions */
highlander_i2c_poll(dev);
return highlander_i2c_wait_for_ack(dev);
}
static int highlander_i2c_read(struct highlander_i2c_dev *dev)
{
int i, cnt;
u16 data[16];
if (highlander_i2c_wait_for_bbsy(dev))
return -EAGAIN;
highlander_i2c_start(dev);
if (highlander_i2c_wait_xfer_done(dev)) {
dev_err(dev->dev, "Arbitration loss\n");
return -EAGAIN;
}
/*
* The R0P7780LC0011RL FPGA needs a significant delay between
* data read cycles, otherwise the transciever gets confused and
* garbage is returned when the read is subsequently aborted.
*
* It is not sufficient to wait for BBSY.
*
* While this generally only applies to the older SH7780-based
* Highlanders, the same issue can be observed on SH7785 ones,
* albeit less frequently. SH7780-based Highlanders may need
* this to be as high as 1000 ms.
*/
if (iic_read_delay && time_before(jiffies, dev->last_read_time +
msecs_to_jiffies(iic_read_delay)))
msleep(jiffies_to_msecs((dev->last_read_time +
msecs_to_jiffies(iic_read_delay)) - jiffies));
cnt = (dev->buf_len + 1) >> 1;
for (i = 0; i < cnt; i++) {
data[i] = ioread16(dev->base + SMTRDR + (i * sizeof(u16)));
dev_dbg(dev->dev, "read data[%x] 0x%04x\n", i, data[i]);
}
smbus_read_data(data, dev->buf, dev->buf_len);
dev->last_read_time = jiffies;
return 0;
}
static int highlander_i2c_write(struct highlander_i2c_dev *dev)
{
int i, cnt;
u16 data[16];
smbus_write_data(dev->buf, data, dev->buf_len);
cnt = (dev->buf_len + 1) >> 1;
for (i = 0; i < cnt; i++) {
iowrite16(data[i], dev->base + SMTRDR + (i * sizeof(u16)));
dev_dbg(dev->dev, "write data[%x] 0x%04x\n", i, data[i]);
}
if (highlander_i2c_wait_for_bbsy(dev))
return -EAGAIN;
highlander_i2c_start(dev);
return highlander_i2c_wait_xfer_done(dev);
}
static int highlander_i2c_smbus_xfer(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size,
union i2c_smbus_data *data)
{
struct highlander_i2c_dev *dev = i2c_get_adapdata(adap);
u16 tmp;
init_completion(&dev->cmd_complete);
dev_dbg(dev->dev, "addr %04x, command %02x, read_write %d, size %d\n",
addr, command, read_write, size);
/*
* Set up the buffer and transfer size
*/
switch (size) {
case I2C_SMBUS_BYTE_DATA:
dev->buf = &data->byte;
dev->buf_len = 1;
break;
case I2C_SMBUS_I2C_BLOCK_DATA:
dev->buf = &data->block[1];
dev->buf_len = data->block[0];
break;
default:
dev_err(dev->dev, "unsupported command %d\n", size);
return -EINVAL;
}
/*
* Encode the mode setting
*/
tmp = ioread16(dev->base + SMMR);
tmp &= ~(SMMR_MODE0 | SMMR_MODE1);
switch (dev->buf_len) {
case 1:
/* default */
break;
case 8:
tmp |= SMMR_MODE0;
break;
case 16:
tmp |= SMMR_MODE1;
break;
case 32:
tmp |= (SMMR_MODE0 | SMMR_MODE1);
break;
default:
dev_err(dev->dev, "unsupported xfer size %d\n", dev->buf_len);
return -EINVAL;
}
iowrite16(tmp, dev->base + SMMR);
/* Ensure we're in a sane state */
highlander_i2c_done(dev);
/* Set slave address */
iowrite16((addr << 1) | read_write, dev->base + SMSMADR);
highlander_i2c_command(dev, command, dev->buf_len);
if (read_write == I2C_SMBUS_READ)
return highlander_i2c_read(dev);
else
return highlander_i2c_write(dev);
}
static u32 highlander_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_I2C_BLOCK;
}
static const struct i2c_algorithm highlander_i2c_algo = {
.smbus_xfer = highlander_i2c_smbus_xfer,
.functionality = highlander_i2c_func,
};
static int __devinit highlander_i2c_probe(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *res;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(!res)) {
dev_err(&pdev->dev, "no mem resource\n");
return -ENODEV;
}
dev = kzalloc(sizeof(struct highlander_i2c_dev), GFP_KERNEL);
if (unlikely(!dev))
return -ENOMEM;
dev->base = ioremap_nocache(res->start, resource_size(res));
if (unlikely(!dev->base)) {
ret = -ENXIO;
goto err;
}
dev->dev = &pdev->dev;
platform_set_drvdata(pdev, dev);
dev->irq = platform_get_irq(pdev, 0);
if (iic_force_poll)
dev->irq = 0;
if (dev->irq) {
ret = request_irq(dev->irq, highlander_i2c_irq, IRQF_DISABLED,
pdev->name, dev);
if (unlikely(ret))
goto err_unmap;
highlander_i2c_irq_enable(dev);
} else {
dev_notice(&pdev->dev, "no IRQ, using polling mode\n");
highlander_i2c_irq_disable(dev);
}
dev->last_read_time = jiffies; /* initial read jiffies */
highlander_i2c_setup(dev);
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
strlcpy(adap->name, "HL FPGA I2C adapter", sizeof(adap->name));
adap->algo = &highlander_i2c_algo;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
/*
* Reset the adapter
*/
ret = highlander_i2c_reset(dev);
if (unlikely(ret)) {
dev_err(&pdev->dev, "controller didn't come up\n");
goto err_free_irq;
}
ret = i2c_add_numbered_adapter(adap);
if (unlikely(ret)) {
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
return 0;
err_free_irq:
if (dev->irq)
free_irq(dev->irq, dev);
err_unmap:
iounmap(dev->base);
err:
kfree(dev);
platform_set_drvdata(pdev, NULL);
return ret;
}
static int __devexit highlander_i2c_remove(struct platform_device *pdev)
{
struct highlander_i2c_dev *dev = platform_get_drvdata(pdev);
i2c_del_adapter(&dev->adapter);
if (dev->irq)
free_irq(dev->irq, dev);
iounmap(dev->base);
kfree(dev);
platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver highlander_i2c_driver = {
.driver = {
.name = "i2c-highlander",
.owner = THIS_MODULE,
},
.probe = highlander_i2c_probe,
.remove = __devexit_p(highlander_i2c_remove),
};
static int __init highlander_i2c_init(void)
{
return platform_driver_register(&highlander_i2c_driver);
}
static void __exit highlander_i2c_exit(void)
{
platform_driver_unregister(&highlander_i2c_driver);
}
module_init(highlander_i2c_init);
module_exit(highlander_i2c_exit);
MODULE_AUTHOR("Paul Mundt");
MODULE_DESCRIPTION("Renesas Highlander FPGA I2C/SMBus adapter");
MODULE_LICENSE("GPL v2");
module_param(iic_force_poll, bool, 0);
module_param(iic_force_normal, bool, 0);
module_param(iic_timeout, int, 0);
module_param(iic_read_delay, int, 0);
MODULE_PARM_DESC(iic_force_poll, "Force polling mode");
MODULE_PARM_DESC(iic_force_normal,
"Force normal mode (100 kHz), default is fast mode (400 kHz)");
MODULE_PARM_DESC(iic_timeout, "Set timeout value in msecs (default 1000 ms)");
MODULE_PARM_DESC(iic_read_delay,
"Delay between data read cycles (default 0 ms)");
|
gpl-2.0
|
somcom3x/android_kernel_motorola_msm8226-common
|
tools/perf/util/sort.c
|
4859
|
13317
|
#include "sort.h"
#include "hist.h"
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char default_sort_order[] = "comm,dso,symbol";
const char *sort_order = default_sort_order;
int sort__need_collapse = 0;
int sort__has_parent = 0;
int sort__branch_mode = -1; /* -1 = means not set */
enum sort_type sort__first_dimension;
char * field_sep;
LIST_HEAD(hist_entry__sort_list);
static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
{
int n;
va_list ap;
va_start(ap, fmt);
n = vsnprintf(bf, size, fmt, ap);
if (field_sep && n > 0) {
char *sep = bf;
while (1) {
sep = strchr(sep, *field_sep);
if (sep == NULL)
break;
*sep = '.';
}
}
va_end(ap);
if (n >= (int)size)
return size - 1;
return n;
}
static int64_t cmp_null(void *l, void *r)
{
if (!l && !r)
return 0;
else if (!l)
return -1;
else
return 1;
}
/* --sort pid */
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->thread->pid - left->thread->pid;
}
static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*s:%5d", width,
self->thread->comm ?: "", self->thread->pid);
}
struct sort_entry sort_thread = {
.se_header = "Command: Pid",
.se_cmp = sort__thread_cmp,
.se_snprintf = hist_entry__thread_snprintf,
.se_width_idx = HISTC_THREAD,
};
/* --sort comm */
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->thread->pid - left->thread->pid;
}
static int64_t
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
{
char *comm_l = left->thread->comm;
char *comm_r = right->thread->comm;
if (!comm_l || !comm_r)
return cmp_null(comm_l, comm_r);
return strcmp(comm_l, comm_r);
}
static int hist_entry__comm_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*s", width, self->thread->comm);
}
static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
{
struct dso *dso_l = map_l ? map_l->dso : NULL;
struct dso *dso_r = map_r ? map_r->dso : NULL;
const char *dso_name_l, *dso_name_r;
if (!dso_l || !dso_r)
return cmp_null(dso_l, dso_r);
if (verbose) {
dso_name_l = dso_l->long_name;
dso_name_r = dso_r->long_name;
} else {
dso_name_l = dso_l->short_name;
dso_name_r = dso_r->short_name;
}
return strcmp(dso_name_l, dso_name_r);
}
struct sort_entry sort_comm = {
.se_header = "Command",
.se_cmp = sort__comm_cmp,
.se_collapse = sort__comm_collapse,
.se_snprintf = hist_entry__comm_snprintf,
.se_width_idx = HISTC_COMM,
};
/* --sort dso */
static int64_t
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(left->ms.map, right->ms.map);
}
static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r,
u64 ip_l, u64 ip_r)
{
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
if (sym_l == sym_r)
return 0;
if (sym_l)
ip_l = sym_l->start;
if (sym_r)
ip_r = sym_r->start;
return (int64_t)(ip_r - ip_l);
}
static int _hist_entry__dso_snprintf(struct map *map, char *bf,
size_t size, unsigned int width)
{
if (map && map->dso) {
const char *dso_name = !verbose ? map->dso->short_name :
map->dso->long_name;
return repsep_snprintf(bf, size, "%-*s", width, dso_name);
}
return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
}
static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(self->ms.map, bf, size, width);
}
static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
u64 ip, char level, char *bf, size_t size,
unsigned int width __used)
{
size_t ret = 0;
if (verbose) {
char o = map ? dso__symtab_origin(map->dso) : '!';
ret += repsep_snprintf(bf, size, "%-#*llx %c ",
BITS_PER_LONG / 4, ip, o);
}
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
if (sym)
ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
width - ret,
sym->name);
else {
size_t len = BITS_PER_LONG / 4;
ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
len, ip);
ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
width - ret, "");
}
return ret;
}
struct sort_entry sort_dso = {
.se_header = "Shared Object",
.se_cmp = sort__dso_cmp,
.se_snprintf = hist_entry__dso_snprintf,
.se_width_idx = HISTC_DSO,
};
static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width __used)
{
return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip,
self->level, bf, size, width);
}
/* --sort symbol */
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
u64 ip_l, ip_r;
if (!left->ms.sym && !right->ms.sym)
return right->level - left->level;
if (!left->ms.sym || !right->ms.sym)
return cmp_null(left->ms.sym, right->ms.sym);
if (left->ms.sym == right->ms.sym)
return 0;
ip_l = left->ms.sym->start;
ip_r = right->ms.sym->start;
return _sort__sym_cmp(left->ms.sym, right->ms.sym, ip_l, ip_r);
}
struct sort_entry sort_sym = {
.se_header = "Symbol",
.se_cmp = sort__sym_cmp,
.se_snprintf = hist_entry__sym_snprintf,
.se_width_idx = HISTC_SYMBOL,
};
/* --sort parent */
static int64_t
sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct symbol *sym_l = left->parent;
struct symbol *sym_r = right->parent;
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
return strcmp(sym_l->name, sym_r->name);
}
static int hist_entry__parent_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*s", width,
self->parent ? self->parent->name : "[other]");
}
struct sort_entry sort_parent = {
.se_header = "Parent symbol",
.se_cmp = sort__parent_cmp,
.se_snprintf = hist_entry__parent_snprintf,
.se_width_idx = HISTC_PARENT,
};
/* --sort cpu */
static int64_t
sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->cpu - left->cpu;
}
static int hist_entry__cpu_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*d", width, self->cpu);
}
struct sort_entry sort_cpu = {
.se_header = "CPU",
.se_cmp = sort__cpu_cmp,
.se_snprintf = hist_entry__cpu_snprintf,
.se_width_idx = HISTC_CPU,
};
static int64_t
sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(left->branch_info->from.map,
right->branch_info->from.map);
}
static int hist_entry__dso_from_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(self->branch_info->from.map,
bf, size, width);
}
struct sort_entry sort_dso_from = {
.se_header = "Source Shared Object",
.se_cmp = sort__dso_from_cmp,
.se_snprintf = hist_entry__dso_from_snprintf,
.se_width_idx = HISTC_DSO_FROM,
};
static int64_t
sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(left->branch_info->to.map,
right->branch_info->to.map);
}
static int hist_entry__dso_to_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(self->branch_info->to.map,
bf, size, width);
}
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *from_l = &left->branch_info->from;
struct addr_map_symbol *from_r = &right->branch_info->from;
if (!from_l->sym && !from_r->sym)
return right->level - left->level;
return _sort__sym_cmp(from_l->sym, from_r->sym, from_l->addr,
from_r->addr);
}
static int64_t
sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *to_l = &left->branch_info->to;
struct addr_map_symbol *to_r = &right->branch_info->to;
if (!to_l->sym && !to_r->sym)
return right->level - left->level;
return _sort__sym_cmp(to_l->sym, to_r->sym, to_l->addr, to_r->addr);
}
static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width __used)
{
struct addr_map_symbol *from = &self->branch_info->from;
return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
self->level, bf, size, width);
}
static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width __used)
{
struct addr_map_symbol *to = &self->branch_info->to;
return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
self->level, bf, size, width);
}
struct sort_entry sort_dso_to = {
.se_header = "Target Shared Object",
.se_cmp = sort__dso_to_cmp,
.se_snprintf = hist_entry__dso_to_snprintf,
.se_width_idx = HISTC_DSO_TO,
};
struct sort_entry sort_sym_from = {
.se_header = "Source Symbol",
.se_cmp = sort__sym_from_cmp,
.se_snprintf = hist_entry__sym_from_snprintf,
.se_width_idx = HISTC_SYMBOL_FROM,
};
struct sort_entry sort_sym_to = {
.se_header = "Target Symbol",
.se_cmp = sort__sym_to_cmp,
.se_snprintf = hist_entry__sym_to_snprintf,
.se_width_idx = HISTC_SYMBOL_TO,
};
static int64_t
sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
{
const unsigned char mp = left->branch_info->flags.mispred !=
right->branch_info->flags.mispred;
const unsigned char p = left->branch_info->flags.predicted !=
right->branch_info->flags.predicted;
return mp || p;
}
static int hist_entry__mispredict_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width){
static const char *out = "N/A";
if (self->branch_info->flags.predicted)
out = "N";
else if (self->branch_info->flags.mispred)
out = "Y";
return repsep_snprintf(bf, size, "%-*s", width, out);
}
struct sort_entry sort_mispredict = {
.se_header = "Branch Mispredicted",
.se_cmp = sort__mispredict_cmp,
.se_snprintf = hist_entry__mispredict_snprintf,
.se_width_idx = HISTC_MISPREDICT,
};
struct sort_dimension {
const char *name;
struct sort_entry *entry;
int taken;
};
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension sort_dimensions[] = {
DIM(SORT_PID, "pid", sort_thread),
DIM(SORT_COMM, "comm", sort_comm),
DIM(SORT_DSO, "dso", sort_dso),
DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
DIM(SORT_SYM, "symbol", sort_sym),
DIM(SORT_SYM_TO, "symbol_from", sort_sym_from),
DIM(SORT_SYM_FROM, "symbol_to", sort_sym_to),
DIM(SORT_PARENT, "parent", sort_parent),
DIM(SORT_CPU, "cpu", sort_cpu),
DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
};
int sort_dimension__add(const char *tok)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
struct sort_dimension *sd = &sort_dimensions[i];
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sd->entry == &sort_parent) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
char err[BUFSIZ];
regerror(ret, &parent_regex, err, sizeof(err));
pr_err("Invalid regex: %s\n%s", parent_pattern, err);
return -EINVAL;
}
sort__has_parent = 1;
}
if (sd->taken)
return 0;
if (sd->entry->se_collapse)
sort__need_collapse = 1;
if (list_empty(&hist_entry__sort_list)) {
if (!strcmp(sd->name, "pid"))
sort__first_dimension = SORT_PID;
else if (!strcmp(sd->name, "comm"))
sort__first_dimension = SORT_COMM;
else if (!strcmp(sd->name, "dso"))
sort__first_dimension = SORT_DSO;
else if (!strcmp(sd->name, "symbol"))
sort__first_dimension = SORT_SYM;
else if (!strcmp(sd->name, "parent"))
sort__first_dimension = SORT_PARENT;
else if (!strcmp(sd->name, "cpu"))
sort__first_dimension = SORT_CPU;
else if (!strcmp(sd->name, "symbol_from"))
sort__first_dimension = SORT_SYM_FROM;
else if (!strcmp(sd->name, "symbol_to"))
sort__first_dimension = SORT_SYM_TO;
else if (!strcmp(sd->name, "dso_from"))
sort__first_dimension = SORT_DSO_FROM;
else if (!strcmp(sd->name, "dso_to"))
sort__first_dimension = SORT_DSO_TO;
else if (!strcmp(sd->name, "mispredict"))
sort__first_dimension = SORT_MISPREDICT;
}
list_add_tail(&sd->entry->list, &hist_entry__sort_list);
sd->taken = 1;
return 0;
}
return -ESRCH;
}
void setup_sorting(const char * const usagestr[], const struct option *opts)
{
char *tmp, *tok, *str = strdup(sort_order);
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
if (sort_dimension__add(tok) < 0) {
error("Unknown --sort key: `%s'", tok);
usage_with_options(usagestr, opts);
}
}
free(str);
}
void sort_entry__setup_elide(struct sort_entry *self, struct strlist *list,
const char *list_name, FILE *fp)
{
if (list && strlist__nr_entries(list) == 1) {
if (fp != NULL)
fprintf(fp, "# %s: %s\n", list_name,
strlist__entry(list, 0)->s);
self->elide = true;
}
}
|
gpl-2.0
|
TaichiN/android_kernel_google_msm
|
arch/arm/mach-shark/leds.c
|
4859
|
3432
|
/*
* arch/arm/mach-shark/leds.c
* by Alexander Schulz
*
* derived from:
* arch/arm/kernel/leds-footbridge.c
* Copyright (C) 1998-1999 Russell King
*
* DIGITAL Shark LED control routines.
*
* The leds use is as follows:
* - Green front - toggles state every 50 timer interrupts
* - Amber front - Unused, this is a dual color led (Amber/Green)
* - Amber back - On if system is not idle
*
* Changelog:
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <asm/leds.h>
#define LED_STATE_ENABLED 1
#define LED_STATE_CLAIMED 2
#define SEQUOIA_LED_GREEN (1<<6)
#define SEQUOIA_LED_AMBER (1<<5)
#define SEQUOIA_LED_BACK (1<<7)
static char led_state;
static short hw_led_state;
static short saved_state;
static DEFINE_RAW_SPINLOCK(leds_lock);
short sequoia_read(int addr) {
outw(addr,0x24);
return inw(0x26);
}
void sequoia_write(short value,short addr) {
outw(addr,0x24);
outw(value,0x26);
}
static void sequoia_leds_event(led_event_t evt)
{
unsigned long flags;
raw_spin_lock_irqsave(&leds_lock, flags);
hw_led_state = sequoia_read(0x09);
switch (evt) {
case led_start:
hw_led_state |= SEQUOIA_LED_GREEN;
hw_led_state |= SEQUOIA_LED_AMBER;
#ifdef CONFIG_LEDS_CPU
hw_led_state |= SEQUOIA_LED_BACK;
#else
hw_led_state &= ~SEQUOIA_LED_BACK;
#endif
led_state |= LED_STATE_ENABLED;
break;
case led_stop:
hw_led_state &= ~SEQUOIA_LED_BACK;
hw_led_state |= SEQUOIA_LED_GREEN;
hw_led_state |= SEQUOIA_LED_AMBER;
led_state &= ~LED_STATE_ENABLED;
break;
case led_claim:
led_state |= LED_STATE_CLAIMED;
saved_state = hw_led_state;
hw_led_state &= ~SEQUOIA_LED_BACK;
hw_led_state |= SEQUOIA_LED_GREEN;
hw_led_state |= SEQUOIA_LED_AMBER;
break;
case led_release:
led_state &= ~LED_STATE_CLAIMED;
hw_led_state = saved_state;
break;
#ifdef CONFIG_LEDS_TIMER
case led_timer:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state ^= SEQUOIA_LED_GREEN;
break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state &= ~SEQUOIA_LED_BACK;
break;
case led_idle_end:
if (!(led_state & LED_STATE_CLAIMED))
hw_led_state |= SEQUOIA_LED_BACK;
break;
#endif
case led_green_on:
if (led_state & LED_STATE_CLAIMED)
hw_led_state &= ~SEQUOIA_LED_GREEN;
break;
case led_green_off:
if (led_state & LED_STATE_CLAIMED)
hw_led_state |= SEQUOIA_LED_GREEN;
break;
case led_amber_on:
if (led_state & LED_STATE_CLAIMED)
hw_led_state &= ~SEQUOIA_LED_AMBER;
break;
case led_amber_off:
if (led_state & LED_STATE_CLAIMED)
hw_led_state |= SEQUOIA_LED_AMBER;
break;
case led_red_on:
if (led_state & LED_STATE_CLAIMED)
hw_led_state |= SEQUOIA_LED_BACK;
break;
case led_red_off:
if (led_state & LED_STATE_CLAIMED)
hw_led_state &= ~SEQUOIA_LED_BACK;
break;
default:
break;
}
if (led_state & LED_STATE_ENABLED)
sequoia_write(hw_led_state,0x09);
raw_spin_unlock_irqrestore(&leds_lock, flags);
}
static int __init leds_init(void)
{
extern void (*leds_event)(led_event_t);
short temp;
leds_event = sequoia_leds_event;
/* Make LEDs independent of power-state */
request_region(0x24,4,"sequoia");
temp = sequoia_read(0x09);
temp |= 1<<10;
sequoia_write(temp,0x09);
leds_event(led_start);
return 0;
}
__initcall(leds_init);
|
gpl-2.0
|
flar2/m8-GPE-5.0.1
|
arch/arm/mach-omap2/clock3xxx.c
|
4859
|
2533
|
/*
* OMAP3-specific clock framework functions
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
* Copyright (C) 2007-2010 Nokia Corporation
*
* Paul Walmsley
* Jouni Högander
*
* Parts of this code are based on code written by
* Richard Woodruff, Tony Lindgren, Tuukka Tikkanen, Karthik Dasu
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#undef DEBUG
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <plat/hardware.h>
#include <plat/clock.h>
#include "clock.h"
#include "clock3xxx.h"
#include "prm2xxx_3xxx.h"
#include "prm-regbits-34xx.h"
#include "cm2xxx_3xxx.h"
#include "cm-regbits-34xx.h"
/*
* DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks
* that are sourced by DPLL5, and both of these require this clock
* to be at 120 MHz for proper operation.
*/
#define DPLL5_FREQ_FOR_USBHOST 120000000
/* needed by omap3_core_dpll_m2_set_rate() */
struct clk *sdrc_ick_p, *arm_fck_p;
int omap3_dpll4_set_rate(struct clk *clk, unsigned long rate)
{
/*
* According to the 12-5 CDP code from TI, "Limitation 2.5"
* on 3430ES1 prevents us from changing DPLL multipliers or dividers
* on DPLL4.
*/
if (omap_rev() == OMAP3430_REV_ES1_0) {
pr_err("clock: DPLL4 cannot change rate due to "
"silicon 'Limitation 2.5' on 3430ES1.\n");
return -EINVAL;
}
return omap3_noncore_dpll_set_rate(clk, rate);
}
void __init omap3_clk_lock_dpll5(void)
{
struct clk *dpll5_clk;
struct clk *dpll5_m2_clk;
dpll5_clk = clk_get(NULL, "dpll5_ck");
clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST);
clk_enable(dpll5_clk);
/* Program dpll5_m2_clk divider for no division */
dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck");
clk_enable(dpll5_m2_clk);
clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST);
clk_disable(dpll5_m2_clk);
clk_disable(dpll5_clk);
return;
}
/* Common clock code */
/*
* Switch the MPU rate if specified on cmdline. We cannot do this
* early until cmdline is parsed. XXX This should be removed from the
* clock code and handled by the OPP layer code in the near future.
*/
static int __init omap3xxx_clk_arch_init(void)
{
int ret;
if (!cpu_is_omap34xx())
return 0;
ret = omap2_clk_switch_mpurate_at_boot("dpll1_ck");
if (!ret)
omap2_clk_print_new_rates("osc_sys_ck", "core_ck", "arm_fck");
return ret;
}
arch_initcall(omap3xxx_clk_arch_init);
|
gpl-2.0
|
thanhphat11/Android_kernel_xiaomi_ALL
|
drivers/staging/bcm/CmHost.c
|
4859
|
103137
|
/************************************************************
* CMHOST.C
* This file contains the routines for handling Connection
* Management.
************************************************************/
/* #define CONN_MSG */
#include "headers.h"
enum E_CLASSIFIER_ACTION {
eInvalidClassifierAction,
eAddClassifier,
eReplaceClassifier,
eDeleteClassifier
};
static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid);
/************************************************************
* Function - SearchSfid
*
* Description - This routinue would search QOS queues having
* specified SFID as input parameter.
*
* Parameters - Adapter: Pointer to the Adapter structure
* uiSfid : Given SFID for matching
*
* Returns - Queue index for this SFID(If matched)
* Else Invalid Queue Index(If Not matched)
************************************************************/
int SearchSfid(PMINI_ADAPTER Adapter, UINT uiSfid)
{
int i;
for (i = (NO_OF_QUEUES-1); i >= 0; i--)
if (Adapter->PackInfo[i].ulSFID == uiSfid)
return i;
return NO_OF_QUEUES+1;
}
/***************************************************************
* Function -SearchFreeSfid
*
* Description - This routinue would search Free available SFID.
*
* Parameter - Adapter: Pointer to the Adapter structure
*
* Returns - Queue index for the free SFID
* Else returns Invalid Index.
****************************************************************/
static int SearchFreeSfid(PMINI_ADAPTER Adapter)
{
int i;
for (i = 0; i < (NO_OF_QUEUES-1); i++)
if (Adapter->PackInfo[i].ulSFID == 0)
return i;
return NO_OF_QUEUES+1;
}
/*
* Function: SearchClsid
* Description: This routinue would search Classifier having specified ClassifierID as input parameter
* Input parameters: PMINI_ADAPTER Adapter - Adapter Context
* unsigned int uiSfid - The SF in which the classifier is to searched
* B_UINT16 uiClassifierID - The classifier ID to be searched
* Return: int :Classifier table index of matching entry
*/
static int SearchClsid(PMINI_ADAPTER Adapter, ULONG ulSFID, B_UINT16 uiClassifierID)
{
int i;
for (i = 0; i < MAX_CLASSIFIERS; i++) {
if ((Adapter->astClassifierTable[i].bUsed) &&
(Adapter->astClassifierTable[i].uiClassifierRuleIndex == uiClassifierID) &&
(Adapter->astClassifierTable[i].ulSFID == ulSFID))
return i;
}
return MAX_CLASSIFIERS+1;
}
/*
* @ingroup ctrl_pkt_functions
* This routinue would search Free available Classifier entry in classifier table.
* @return free Classifier Entry index in classifier table for specified SF
*/
static int SearchFreeClsid(PMINI_ADAPTER Adapter /**Adapter Context*/)
{
int i;
for (i = 0; i < MAX_CLASSIFIERS; i++) {
if (!Adapter->astClassifierTable[i].bUsed)
return i;
}
return MAX_CLASSIFIERS+1;
}
static VOID deleteSFBySfid(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
/* deleting all the packet held in the SF */
flush_queue(Adapter, uiSearchRuleIndex);
/* Deleting the all classifiers for this SF */
DeleteAllClassifiersForSF(Adapter, uiSearchRuleIndex);
/* Resetting only MIBS related entries in the SF */
memset((PVOID)&Adapter->PackInfo[uiSearchRuleIndex], 0, sizeof(S_MIBS_SERVICEFLOW_TABLE));
}
static inline VOID
CopyIpAddrToClassifier(S_CLASSIFIER_RULE *pstClassifierEntry,
B_UINT8 u8IpAddressLen, B_UINT8 *pu8IpAddressMaskSrc,
BOOLEAN bIpVersion6, E_IPADDR_CONTEXT eIpAddrContext)
{
int i = 0;
UINT nSizeOfIPAddressInBytes = IP_LENGTH_OF_ADDRESS;
UCHAR *ptrClassifierIpAddress = NULL;
UCHAR *ptrClassifierIpMask = NULL;
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
if (bIpVersion6)
nSizeOfIPAddressInBytes = IPV6_ADDRESS_SIZEINBYTES;
/* Destination Ip Address */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Address Range Length:0x%X ", u8IpAddressLen);
if ((bIpVersion6 ? (IPV6_ADDRESS_SIZEINBYTES * MAX_IP_RANGE_LENGTH * 2) :
(TOTAL_MASKED_ADDRESS_IN_BYTES)) >= u8IpAddressLen) {
/*
* checking both the mask and address togethor in Classification.
* So length will be : TotalLengthInBytes/nSizeOfIPAddressInBytes * 2
* (nSizeOfIPAddressInBytes for address and nSizeOfIPAddressInBytes for mask)
*/
if (eIpAddrContext == eDestIpAddress) {
pstClassifierEntry->ucIPDestinationAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
if (bIpVersion6) {
ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv6Address;
ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv6Mask;
} else {
ptrClassifierIpAddress = pstClassifierEntry->stDestIpAddress.ucIpv4Address;
ptrClassifierIpMask = pstClassifierEntry->stDestIpAddress.ucIpv4Mask;
}
} else if (eIpAddrContext == eSrcIpAddress) {
pstClassifierEntry->ucIPSourceAddressLength = u8IpAddressLen/(nSizeOfIPAddressInBytes * 2);
if (bIpVersion6) {
ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv6Address;
ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv6Mask;
} else {
ptrClassifierIpAddress = pstClassifierEntry->stSrcIpAddress.ucIpv4Address;
ptrClassifierIpMask = pstClassifierEntry->stSrcIpAddress.ucIpv4Mask;
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Address Length:0x%X\n", pstClassifierEntry->ucIPDestinationAddressLength);
while ((u8IpAddressLen >= nSizeOfIPAddressInBytes) && (i < MAX_IP_RANGE_LENGTH)) {
memcpy(ptrClassifierIpAddress +
(i * nSizeOfIPAddressInBytes),
(pu8IpAddressMaskSrc+(i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
if (!bIpVersion6) {
if (eIpAddrContext == eSrcIpAddress) {
pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Address:0x%luX ",
pstClassifierEntry->stSrcIpAddress.ulIpv4Addr[i]);
} else if (eIpAddrContext == eDestIpAddress) {
pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Address:0x%luX ",
pstClassifierEntry->stDestIpAddress.ulIpv4Addr[i]);
}
}
u8IpAddressLen -= nSizeOfIPAddressInBytes;
if (u8IpAddressLen >= nSizeOfIPAddressInBytes) {
memcpy(ptrClassifierIpMask +
(i * nSizeOfIPAddressInBytes),
(pu8IpAddressMaskSrc+nSizeOfIPAddressInBytes +
(i*nSizeOfIPAddressInBytes*2)),
nSizeOfIPAddressInBytes);
if (!bIpVersion6) {
if (eIpAddrContext == eSrcIpAddress) {
pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i] =
ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Src Ip Mask Address:0x%luX ",
pstClassifierEntry->stSrcIpAddress.ulIpv4Mask[i]);
} else if (eIpAddrContext == eDestIpAddress) {
pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i] =
ntohl(pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dest Ip Mask Address:0x%luX ",
pstClassifierEntry->stDestIpAddress.ulIpv4Mask[i]);
}
}
u8IpAddressLen -= nSizeOfIPAddressInBytes;
}
if (u8IpAddressLen == 0)
pstClassifierEntry->bDestIpValid = TRUE;
i++;
}
if (bIpVersion6) {
/* Restore EndianNess of Struct */
for (i = 0; i < MAX_IP_RANGE_LENGTH * 4; i++) {
if (eIpAddrContext == eSrcIpAddress) {
pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Addr[i]);
pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stSrcIpAddress.ulIpv6Mask[i]);
} else if (eIpAddrContext == eDestIpAddress) {
pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Addr[i]);
pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i] = ntohl(pstClassifierEntry->stDestIpAddress.ulIpv6Mask[i]);
}
}
}
}
}
void ClearTargetDSXBuffer(PMINI_ADAPTER Adapter, B_UINT16 TID, BOOLEAN bFreeAll)
{
int i;
for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
if (Adapter->astTargetDsxBuffer[i].valid)
continue;
if ((bFreeAll) || (Adapter->astTargetDsxBuffer[i].tid == TID)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "ClearTargetDSXBuffer: found tid %d buffer cleared %lx\n",
TID, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
Adapter->astTargetDsxBuffer[i].valid = 1;
Adapter->astTargetDsxBuffer[i].tid = 0;
Adapter->ulFreeTargetBufferCnt++;
}
}
}
/*
* @ingroup ctrl_pkt_functions
* copy classifier rule into the specified SF index
*/
static inline VOID CopyClassifierRuleToSF(PMINI_ADAPTER Adapter, stConvergenceSLTypes *psfCSType, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
/* VOID *pvPhsContext = NULL; */
int i;
/* UCHAR ucProtocolLength=0; */
/* ULONG ulPhsStatus; */
if (Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value == 0 ||
nClassifierIndex > (MAX_CLASSIFIERS-1))
return;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Storing Classifier Rule Index : %X",
ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex));
if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
if (pstClassifierEntry) {
/* Store if Ipv6 */
pstClassifierEntry->bIpv6Protocol = (Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE;
/* Destinaiton Port */
pstClassifierEntry->ucDestPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength / 4;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Length:0x%X ", pstClassifierEntry->ucDestPortRangeLength);
if (psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength <= MAX_PORT_RANGE) {
for (i = 0; i < (pstClassifierEntry->ucDestPortRangeLength); i++) {
pstClassifierEntry->usDestPortRangeLo[i] = *((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+i));
pstClassifierEntry->usDestPortRangeHi[i] =
*((PUSHORT)(psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange+2+i));
pstClassifierEntry->usDestPortRangeLo[i] = ntohs(pstClassifierEntry->usDestPortRangeLo[i]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Destination Port Range Lo:0x%X ",
pstClassifierEntry->usDestPortRangeLo[i]);
pstClassifierEntry->usDestPortRangeHi[i] = ntohs(pstClassifierEntry->usDestPortRangeHi[i]);
}
} else {
pstClassifierEntry->ucDestPortRangeLength = 0;
}
/* Source Port */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Length:0x%X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
if (psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength <= MAX_PORT_RANGE) {
pstClassifierEntry->ucSrcPortRangeLength = psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength/4;
for (i = 0; i < (pstClassifierEntry->ucSrcPortRangeLength); i++) {
pstClassifierEntry->usSrcPortRangeLo[i] =
*((PUSHORT)(psfCSType->cCPacketClassificationRule.
u8ProtocolSourcePortRange+i));
pstClassifierEntry->usSrcPortRangeHi[i] =
*((PUSHORT)(psfCSType->cCPacketClassificationRule.
u8ProtocolSourcePortRange+2+i));
pstClassifierEntry->usSrcPortRangeLo[i] =
ntohs(pstClassifierEntry->usSrcPortRangeLo[i]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Source Port Range Lo:0x%X ",
pstClassifierEntry->usSrcPortRangeLo[i]);
pstClassifierEntry->usSrcPortRangeHi[i] = ntohs(pstClassifierEntry->usSrcPortRangeHi[i]);
}
}
/* Destination Ip Address and Mask */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Destination Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength,
psfCSType->cCPacketClassificationRule.u8IPDestinationAddress,
(Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ?
TRUE : FALSE, eDestIpAddress);
/* Source Ip Address and Mask */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Ip Source Parameters : ");
CopyIpAddrToClassifier(pstClassifierEntry,
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength,
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress,
(Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion == IPV6) ? TRUE : FALSE,
eSrcIpAddress);
/* TOS */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "TOS Length:0x%X ", psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
if (psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength == 3) {
pstClassifierEntry->ucIPTypeOfServiceLength = psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength;
pstClassifierEntry->ucTosLow = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0];
pstClassifierEntry->ucTosHigh = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1];
pstClassifierEntry->ucTosMask = psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2];
pstClassifierEntry->bTOSValid = TRUE;
}
if (psfCSType->cCPacketClassificationRule.u8Protocol == 0) {
/* we didn't get protocol field filled in by the BS */
pstClassifierEntry->ucProtocolLength = 0;
} else {
pstClassifierEntry->ucProtocolLength = 1; /* 1 valid protocol */
}
pstClassifierEntry->ucProtocol[0] = psfCSType->cCPacketClassificationRule.u8Protocol;
pstClassifierEntry->u8ClassifierRulePriority = psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority;
/* store the classifier rule ID and set this classifier entry as valid */
pstClassifierEntry->ucDirection = Adapter->PackInfo[uiSearchRuleIndex].ucDirection;
pstClassifierEntry->uiClassifierRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
pstClassifierEntry->usVCID_Value = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
pstClassifierEntry->ulSFID = Adapter->PackInfo[uiSearchRuleIndex].ulSFID;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Index %d Dir: %d, Index: %d, Vcid: %d\n",
uiSearchRuleIndex, pstClassifierEntry->ucDirection,
pstClassifierEntry->uiClassifierRuleIndex,
pstClassifierEntry->usVCID_Value);
if (psfCSType->cCPacketClassificationRule.u8AssociatedPHSI)
pstClassifierEntry->u8AssociatedPHSI = psfCSType->cCPacketClassificationRule.u8AssociatedPHSI;
/* Copy ETH CS Parameters */
pstClassifierEntry->ucEthCSSrcMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddressLength);
memcpy(pstClassifierEntry->au8EThCSSrcMAC, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress, MAC_ADDRESS_SIZE);
memcpy(pstClassifierEntry->au8EThCSSrcMACMask, psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEthCSDestMACLen = (psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
memcpy(pstClassifierEntry->au8EThCSDestMAC, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress, MAC_ADDRESS_SIZE);
memcpy(pstClassifierEntry->au8EThCSDestMACMask, psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress + MAC_ADDRESS_SIZE, MAC_ADDRESS_SIZE);
pstClassifierEntry->ucEtherTypeLen = (psfCSType->cCPacketClassificationRule.u8EthertypeLength);
memcpy(pstClassifierEntry->au8EthCSEtherType, psfCSType->cCPacketClassificationRule.u8Ethertype, NUM_ETHERTYPE_BYTES);
memcpy(pstClassifierEntry->usUserPriority, &psfCSType->cCPacketClassificationRule.u16UserPriority, 2);
pstClassifierEntry->usVLANID = ntohs(psfCSType->cCPacketClassificationRule.u16VLANID);
pstClassifierEntry->usValidityBitMap = ntohs(psfCSType->cCPacketClassificationRule.u16ValidityBitMap);
pstClassifierEntry->bUsed = TRUE;
}
}
/*
* @ingroup ctrl_pkt_functions
*/
static inline VOID DeleteClassifierRuleFromSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex, UINT nClassifierIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
B_UINT16 u16PacketClassificationRuleIndex;
USHORT usVCID;
/* VOID *pvPhsContext = NULL; */
/*ULONG ulPhsStatus; */
usVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
if (nClassifierIndex > MAX_CLASSIFIERS-1)
return;
if (usVCID == 0)
return;
u16PacketClassificationRuleIndex = Adapter->astClassifierTable[nClassifierIndex].uiClassifierRuleIndex;
pstClassifierEntry = &Adapter->astClassifierTable[nClassifierIndex];
if (pstClassifierEntry) {
pstClassifierEntry->bUsed = FALSE;
pstClassifierEntry->uiClassifierRuleIndex = 0;
memset(pstClassifierEntry, 0, sizeof(S_CLASSIFIER_RULE));
/* Delete the PHS Rule for this classifier */
PhsDeleteClassifierRule(&Adapter->stBCMPhsContext, usVCID, u16PacketClassificationRuleIndex);
}
}
/*
* @ingroup ctrl_pkt_functions
*/
VOID DeleteAllClassifiersForSF(PMINI_ADAPTER Adapter, UINT uiSearchRuleIndex)
{
S_CLASSIFIER_RULE *pstClassifierEntry = NULL;
int i;
/* B_UINT16 u16PacketClassificationRuleIndex; */
USHORT ulVCID;
/* VOID *pvPhsContext = NULL; */
/* ULONG ulPhsStatus; */
ulVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
if (ulVCID == 0)
return;
for (i = 0; i < MAX_CLASSIFIERS; i++) {
if (Adapter->astClassifierTable[i].usVCID_Value == ulVCID) {
pstClassifierEntry = &Adapter->astClassifierTable[i];
if (pstClassifierEntry->bUsed)
DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, i);
}
}
/* Delete All Phs Rules Associated with this SF */
PhsDeleteSFRules(&Adapter->stBCMPhsContext, ulVCID);
}
/*
* This routinue copies the Connection Management
* related data into the Adapter structure.
* @ingroup ctrl_pkt_functions
*/
static VOID CopyToAdapter(register PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
register pstServiceFlowParamSI psfLocalSet, /* <Pointer to the ServiceFlowParamSI structure */
register UINT uiSearchRuleIndex, /* <Index of Queue, to which this data belongs */
register UCHAR ucDsxType,
stLocalSFAddIndicationAlt *pstAddIndication) {
/* UCHAR ucProtocolLength = 0; */
ULONG ulSFID;
UINT nClassifierIndex = 0;
enum E_CLASSIFIER_ACTION eClassifierAction = eInvalidClassifierAction;
B_UINT16 u16PacketClassificationRuleIndex = 0;
int i;
stConvergenceSLTypes *psfCSType = NULL;
S_PHS_RULE sPhsRule;
USHORT uVCID = Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value;
UINT UGIValue = 0;
Adapter->PackInfo[uiSearchRuleIndex].bValid = TRUE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Search Rule Index = %d\n", uiSearchRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s: SFID= %x ", __func__, ntohl(psfLocalSet->u32SFID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Updating Queue %d", uiSearchRuleIndex);
ulSFID = ntohl(psfLocalSet->u32SFID);
/* Store IP Version used */
/* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = 0;
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
/* Enable IP/ETh CS Support As Required */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : u8CSSpecification : %X\n", psfLocalSet->u8CSSpecification);
switch (psfLocalSet->u8CSSpecification) {
case eCSPacketIPV4:
{
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
break;
}
case eCSPacketIPV6:
{
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
break;
}
case eCS802_3PacketEthernet:
case eCS802_1QPacketVLAN:
{
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
break;
}
case eCSPacketIPV4Over802_1QVLAN:
case eCSPacketIPV4Over802_3Ethernet:
{
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
break;
}
case eCSPacketIPV6Over802_1QVLAN:
case eCSPacketIPV6Over802_3Ethernet:
{
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV6_CS;
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = ETH_CS_802_3;
break;
}
default:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error in value of CS Classification.. setting default to IP CS\n");
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport = IPV4_CS;
break;
}
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "CopyToAdapter : Queue No : %X ETH CS Support : %X , IP CS Support : %X\n",
uiSearchRuleIndex,
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport,
Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport);
/* Store IP Version used */
/* Get The Version Of IP used (IPv6 or IPv4) from CSSpecification field of SF */
if (Adapter->PackInfo[uiSearchRuleIndex].bIPCSSupport == IPV6_CS)
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV6;
else
Adapter->PackInfo[uiSearchRuleIndex].ucIpVersion = IPV4;
/* To ensure that the ETH CS code doesn't gets executed if the BS doesn't supports ETH CS */
if (!Adapter->bETHCSEnabled)
Adapter->PackInfo[uiSearchRuleIndex].bEthCSSupport = 0;
if (psfLocalSet->u8ServiceClassNameLength > 0 && psfLocalSet->u8ServiceClassNameLength < 32)
memcpy(Adapter->PackInfo[uiSearchRuleIndex].ucServiceClassName, psfLocalSet->u8ServiceClassName, psfLocalSet->u8ServiceClassNameLength);
Adapter->PackInfo[uiSearchRuleIndex].u8QueueType = psfLocalSet->u8ServiceFlowSchedulingType;
if (Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == BE && Adapter->PackInfo[uiSearchRuleIndex].ucDirection)
Adapter->usBestEffortQueueIndex = uiSearchRuleIndex;
Adapter->PackInfo[uiSearchRuleIndex].ulSFID = ntohl(psfLocalSet->u32SFID);
Adapter->PackInfo[uiSearchRuleIndex].u8TrafficPriority = psfLocalSet->u8TrafficPriority;
/* copy all the classifier in the Service Flow param structure */
for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Classifier index =%d", i);
if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
if (psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority)
Adapter->PackInfo[uiSearchRuleIndex].bClassifierPriority = TRUE;
if (ucDsxType == DSA_ACK) {
eClassifierAction = eAddClassifier;
} else if (ucDsxType == DSC_ACK) {
switch (psfCSType->u8ClassfierDSCAction) {
case 0: /* DSC Add Classifier */
{
eClassifierAction = eAddClassifier;
}
break;
case 1: /* DSC Replace Classifier */
{
eClassifierAction = eReplaceClassifier;
}
break;
case 2: /* DSC Delete Classifier */
{
eClassifierAction = eDeleteClassifier;
}
break;
default:
{
eClassifierAction = eInvalidClassifierAction;
}
}
}
u16PacketClassificationRuleIndex = ntohs(psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
switch (eClassifierAction) {
case eAddClassifier:
{
/* Get a Free Classifier Index From Classifier table for this SF to add the Classifier */
/* Contained in this message */
nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
if (nClassifierIndex > MAX_CLASSIFIERS) {
nClassifierIndex = SearchFreeClsid(Adapter);
if (nClassifierIndex > MAX_CLASSIFIERS) {
/* Failed To get a free Entry */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Failed To get a free Classifier Entry");
break;
}
/* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
} else {
/* This Classifier Already Exists and it is invalid to Add Classifier with existing PCRI */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
"CopyToAdapter: Error The Specified Classifier Already Exists and attempted To Add Classifier with Same PCRI : 0x%x\n",
u16PacketClassificationRuleIndex);
}
}
break;
case eReplaceClassifier:
{
/* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
/* with the new classifier Contained in this message */
nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
if (nClassifierIndex > MAX_CLASSIFIERS) {
/* Failed To search the classifier */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be replaced failed");
break;
}
/* Copy the Classifier Rule for this service flow into our Classifier table maintained per SF. */
CopyClassifierRuleToSF(Adapter, psfCSType, uiSearchRuleIndex, nClassifierIndex);
}
break;
case eDeleteClassifier:
{
/* Get the Classifier Index From Classifier table for this SF and replace existing Classifier */
/* with the new classifier Contained in this message */
nClassifierIndex = SearchClsid(Adapter, ulSFID, u16PacketClassificationRuleIndex);
if (nClassifierIndex > MAX_CLASSIFIERS) {
/* Failed To search the classifier */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Error Search for Classifier To be deleted failed");
break;
}
/* Delete This classifier */
DeleteClassifierRuleFromSF(Adapter, uiSearchRuleIndex, nClassifierIndex);
}
break;
default:
{
/* Invalid Action for classifier */
break;
}
}
}
/* Repeat parsing Classification Entries to process PHS Rules */
for (i = 0; i < psfLocalSet->u8TotalClassifiers; i++) {
psfCSType = &psfLocalSet->cConvergenceSLTypes[i];
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "psfCSType->u8PhsDSCAction : 0x%x\n", psfCSType->u8PhsDSCAction);
switch (psfCSType->u8PhsDSCAction) {
case eDeleteAllPHSRules:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Deleting All PHS Rules For VCID: 0x%X\n", uVCID);
/* Delete All the PHS rules for this Service flow */
PhsDeleteSFRules(&Adapter->stBCMPhsContext, uVCID);
break;
}
case eDeletePHSRule:
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "PHS DSC Action = Delete PHS Rule\n");
if (psfCSType->cPhsRule.u8PHSI)
PhsDeletePHSRule(&Adapter->stBCMPhsContext, uVCID, psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
break;
}
default:
{
if (ucDsxType == DSC_ACK) {
/* BCM_DEBUG_PRINT(CONN_MSG,("Invalid PHS DSC Action For DSC\n",psfCSType->cPhsRule.u8PHSI)); */
break; /* FOr DSC ACK Case PHS DSC Action must be in valid set */
}
}
/* Proceed To Add PHS rule for DSA_ACK case even if PHS DSC action is unspecified */
/* No Break Here . Intentionally! */
case eAddPHSRule:
case eSetPHSRule:
{
if (psfCSType->cPhsRule.u8PHSI) {
/* Apply This PHS Rule to all classifiers whose Associated PHSI Match */
unsigned int uiClassifierIndex = 0;
if (pstAddIndication->u8Direction == UPLINK_DIR) {
for (uiClassifierIndex = 0; uiClassifierIndex < MAX_CLASSIFIERS; uiClassifierIndex++) {
if ((Adapter->astClassifierTable[uiClassifierIndex].bUsed) &&
(Adapter->astClassifierTable[uiClassifierIndex].ulSFID == Adapter->PackInfo[uiSearchRuleIndex].ulSFID) &&
(Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI == psfCSType->cPhsRule.u8PHSI)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL,
"Adding PHS Rule For Classifier: 0x%x cPhsRule.u8PHSI: 0x%x\n",
Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
psfCSType->cPhsRule.u8PHSI);
/* Update The PHS Rule for this classifier as Associated PHSI id defined */
/* Copy the PHS Rule */
sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
sPhsRule.u8RefCnt = 0;
sPhsRule.bUnclassifiedPHSRule = FALSE;
sPhsRule.PHSModifiedBytes = 0;
sPhsRule.PHSModifiedNumPackets = 0;
sPhsRule.PHSErrorNumPackets = 0;
/* bPHSRuleAssociated = TRUE; */
/* Store The PHS Rule for this classifier */
PhsUpdateClassifierRule(
&Adapter->stBCMPhsContext,
uVCID,
Adapter->astClassifierTable[uiClassifierIndex].uiClassifierRuleIndex,
&sPhsRule,
Adapter->astClassifierTable[uiClassifierIndex].u8AssociatedPHSI);
/* Update PHS Rule For the Classifier */
if (sPhsRule.u8PHSI) {
Adapter->astClassifierTable[uiClassifierIndex].u32PHSRuleID = sPhsRule.u8PHSI;
memcpy(&Adapter->astClassifierTable[uiClassifierIndex].sPhsRule, &sPhsRule, sizeof(S_PHS_RULE));
}
}
}
} else {
/* Error PHS Rule specified in signaling could not be applied to any classifier */
/* Copy the PHS Rule */
sPhsRule.u8PHSI = psfCSType->cPhsRule.u8PHSI;
sPhsRule.u8PHSFLength = psfCSType->cPhsRule.u8PHSFLength;
sPhsRule.u8PHSMLength = psfCSType->cPhsRule.u8PHSMLength;
sPhsRule.u8PHSS = psfCSType->cPhsRule.u8PHSS;
sPhsRule.u8PHSV = psfCSType->cPhsRule.u8PHSV;
memcpy(sPhsRule.u8PHSF, psfCSType->cPhsRule.u8PHSF, MAX_PHS_LENGTHS);
memcpy(sPhsRule.u8PHSM, psfCSType->cPhsRule.u8PHSM, MAX_PHS_LENGTHS);
sPhsRule.u8RefCnt = 0;
sPhsRule.bUnclassifiedPHSRule = TRUE;
sPhsRule.PHSModifiedBytes = 0;
sPhsRule.PHSModifiedNumPackets = 0;
sPhsRule.PHSErrorNumPackets = 0;
/* Store The PHS Rule for this classifier */
/*
* Passing the argument u8PHSI instead of clsid. Because for DL with no classifier rule,
* clsid will be zero hence we can't have multiple PHS rules for the same SF.
* To support multiple PHS rule, passing u8PHSI.
*/
PhsUpdateClassifierRule(
&Adapter->stBCMPhsContext,
uVCID,
sPhsRule.u8PHSI,
&sPhsRule,
sPhsRule.u8PHSI);
}
}
}
break;
}
}
if (psfLocalSet->u32MaxSustainedTrafficRate == 0) {
/* No Rate Limit . Set Max Sustained Traffic Rate to Maximum */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
} else if (ntohl(psfLocalSet->u32MaxSustainedTrafficRate) > WIMAX_MAX_ALLOWED_RATE) {
/* Too large Allowed Rate specified. Limiting to Wi Max Allowed rate */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = WIMAX_MAX_ALLOWED_RATE;
} else {
Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate = ntohl(psfLocalSet->u32MaxSustainedTrafficRate);
}
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = ntohl(psfLocalSet->u32MaximumLatency);
if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency == 0) /* 0 should be treated as infinite */
Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency = MAX_LATENCY_ALLOWED;
if ((Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == ERTPS ||
Adapter->PackInfo[uiSearchRuleIndex].u8QueueType == UGS))
UGIValue = ntohs(psfLocalSet->u16UnsolicitedGrantInterval);
if (UGIValue == 0)
UGIValue = DEFAULT_UG_INTERVAL;
/*
* For UGI based connections...
* DEFAULT_UGI_FACTOR*UGIInterval worth of data is the max token count at host...
* The extra amount of token is to ensure that a large amount of jitter won't have loss in throughput...
* In case of non-UGI based connection, 200 frames worth of data is the max token count at host...
*/
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
(DEFAULT_UGI_FACTOR*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize < WIMAX_MAX_MTU*8) {
UINT UGIFactor = 0;
/* Special Handling to ensure the biggest size of packet can go out from host to FW as follows:
* 1. Any packet from Host to FW can go out in different packet size.
* 2. So in case the Bucket count is smaller than MTU, the packets of size (Size > TokenCount), will get dropped.
* 3. We can allow packets of MaxSize from Host->FW that can go out from FW in multiple SDUs by fragmentation at Wimax Layer
*/
UGIFactor = (Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency/UGIValue + 1);
if (UGIFactor > DEFAULT_UGI_FACTOR)
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize =
(UGIFactor*Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate*UGIValue)/1000;
if (Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize > WIMAX_MAX_MTU*8)
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize = WIMAX_MAX_MTU*8;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "LAT: %d, UGI: %d\n", Adapter->PackInfo[uiSearchRuleIndex].uiMaxLatency, UGIValue);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiMaxAllowedRate: 0x%x, u32MaxSustainedTrafficRate: 0x%x ,uiMaxBucketSize: 0x%x",
Adapter->PackInfo[uiSearchRuleIndex].uiMaxAllowedRate,
ntohl(psfLocalSet->u32MaxSustainedTrafficRate),
Adapter->PackInfo[uiSearchRuleIndex].uiMaxBucketSize);
/* copy the extended SF Parameters to Support MIBS */
CopyMIBSExtendedSFParameters(Adapter, psfLocalSet, uiSearchRuleIndex);
/* store header suppression enabled flag per SF */
Adapter->PackInfo[uiSearchRuleIndex].bHeaderSuppressionEnabled =
!(psfLocalSet->u8RequesttransmissionPolicy &
MASK_DISABLE_HEADER_SUPPRESSION);
kfree(Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication);
Adapter->PackInfo[uiSearchRuleIndex].pstSFIndication = pstAddIndication;
/* Re Sort the SF list in PackInfo according to Traffic Priority */
SortPackInfo(Adapter);
/* Re Sort the Classifier Rules table and re - arrange
* according to Classifier Rule Priority
*/
SortClassifiers(Adapter);
DumpPhsRules(&Adapter->stBCMPhsContext);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s <=====", __func__);
}
/***********************************************************************
* Function - DumpCmControlPacket
*
* Description - This routinue Dumps the Contents of the AddIndication
* Structure in the Connection Management Control Packet
*
* Parameter - pvBuffer: Pointer to the buffer containing the
* AddIndication data.
*
* Returns - None
*************************************************************************/
static VOID DumpCmControlPacket(PVOID pvBuffer)
{
int uiLoopIndex;
int nIndex;
stLocalSFAddIndicationAlt *pstAddIndication;
UINT nCurClassifierCnt;
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
pstAddIndication = (stLocalSFAddIndicationAlt *)pvBuffer;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "======>");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Type: 0x%X", pstAddIndication->u8Type);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Direction: 0x%X", pstAddIndication->u8Direction);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TID: 0x%X", ntohs(pstAddIndication->u16TID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", ntohs(pstAddIndication->u16CID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VCID: 0x%X", ntohs(pstAddIndication->u16VCID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " AuthorizedSet--->");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", htonl(pstAddIndication->sfAuthorizedSet.u32SFID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", htons(pstAddIndication->sfAuthorizedSet.u16CID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
pstAddIndication->sfAuthorizedSet.u8ServiceClassNameLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x%X ,0x%X , 0x%X, 0x%X, 0x%X, 0x%X",
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[0],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[1],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[2],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[3],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[4],
pstAddIndication->sfAuthorizedSet.u8ServiceClassName[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%X", pstAddIndication->sfAuthorizedSet.u8MBSService);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%X", pstAddIndication->sfAuthorizedSet.u8QosParamSet);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%X, %p",
pstAddIndication->sfAuthorizedSet.u8TrafficPriority, &pstAddIndication->sfAuthorizedSet.u8TrafficPriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxSustainedTrafficRate: 0x%X 0x%p",
pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate,
&pstAddIndication->sfAuthorizedSet.u32MaxSustainedTrafficRate);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaxTrafficBurst);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate : 0x%X",
pstAddIndication->sfAuthorizedSet.u32MinReservedTrafficRate);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%X",
pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParamLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%X",
pstAddIndication->sfAuthorizedSet.u8VendorSpecificQoSParam[0]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%X",
pstAddIndication->sfAuthorizedSet.u8ServiceFlowSchedulingType);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAuthorizedSet.u32ToleratedJitter);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAuthorizedSet.u32MaximumLatency);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%X",
pstAddIndication->sfAuthorizedSet.u8FixedLengthVSVariableLengthSDUIndicator);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfAuthorizedSet.u8SDUSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%X", pstAddIndication->sfAuthorizedSet.u16TargetSAID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQEnable);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQWindowSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryTxTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRetryRxTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockLifeTime);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQSyncLossTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfAuthorizedSet.u8ARQDeliverInOrder);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQRxPurgeTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAuthorizedSet.u16ARQBlockSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%X", pstAddIndication->sfAuthorizedSet.u8CSSpecification);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%X",
pstAddIndication->sfAuthorizedSet.u8TypeOfDataDeliveryService);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAuthorizedSet.u16SDUInterArrivalTime);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAuthorizedSet.u16TimeBase);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAuthorizedSet.u8PagingPreference);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UnsolicitedPollingInterval: 0x%X",
pstAddIndication->sfAuthorizedSet.u16UnsolicitedPollingInterval);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "sfAuthorizedSet.u8HARQChannelMapping %x %x %x ",
*(unsigned int *)pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping,
*(unsigned int *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[4],
*(USHORT *)&pstAddIndication->sfAuthorizedSet.u8HARQChannelMapping[8]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%X",
pstAddIndication->sfAuthorizedSet.u8TrafficIndicationPreference);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAuthorizedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAuthorizedSet.u8TotalClassifiers;
if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.bValid %d", pstAddIndication->sfAuthorizedSet.bValid);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "pstAddIndication->sfAuthorizedSet.u16MacOverhead %x", pstAddIndication->sfAuthorizedSet.u16MacOverhead);
if (!pstAddIndication->sfAuthorizedSet.bValid)
pstAddIndication->sfAuthorizedSet.bValid = 1;
for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
psfCSType = &pstAddIndication->sfAuthorizedSet.cConvergenceSLTypes[nIndex];
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "psfCSType = %p", psfCSType);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "CCPacketClassificationRuleSI====>");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%X ",
psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8Protocol);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength:0x%X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x%02X ,0x%02X ,0x%02X ,0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8EthertypeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X ,0x%02X ,0x%02X ",
psfCSType->cCPacketClassificationRule.u8Ethertype[0],
psfCSType->cCPacketClassificationRule.u8Ethertype[1],
psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%X ",
psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%02X", pstAddIndication->sfAuthorizedSet.bValid);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "AdmittedSet--->");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfAdmittedSet.u32SFID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfAdmittedSet.u16CID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X",
pstAddIndication->sfAdmittedSet.u8ServiceClassNameLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
pstAddIndication->sfAdmittedSet.u8ServiceClassName[0],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[1],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[2],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[3],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[4],
pstAddIndication->sfAdmittedSet.u8ServiceClassName[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfAdmittedSet.u8MBSService);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfAdmittedSet.u8QosParamSet);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfAdmittedSet.u8TrafficPriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfAdmittedSet.u32MaxTrafficBurst);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
pstAddIndication->sfAdmittedSet.u32MinReservedTrafficRate);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParamLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
pstAddIndication->sfAdmittedSet.u8VendorSpecificQoSParam[0]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
pstAddIndication->sfAdmittedSet.u8ServiceFlowSchedulingType);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfAdmittedSet.u32ToleratedJitter);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfAdmittedSet.u32MaximumLatency);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
pstAddIndication->sfAdmittedSet.u8FixedLengthVSVariableLengthSDUIndicator);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%02X", pstAddIndication->sfAdmittedSet.u8SDUSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TargetSAID: 0x%02X", pstAddIndication->sfAdmittedSet.u16TargetSAID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQEnable: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQEnable);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQWindowSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQWindowSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryTxTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRetryRxTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockLifeTime);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQSyncLossTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ARQDeliverInOrder: 0x%02X", pstAddIndication->sfAdmittedSet.u8ARQDeliverInOrder);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQRxPurgeTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16ARQBlockSize: 0x%X", pstAddIndication->sfAdmittedSet.u16ARQBlockSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8CSSpecification: 0x%02X", pstAddIndication->sfAdmittedSet.u8CSSpecification);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TypeOfDataDeliveryService: 0x%02X",
pstAddIndication->sfAdmittedSet.u8TypeOfDataDeliveryService);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfAdmittedSet.u16SDUInterArrivalTime);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16TimeBase: 0x%X", pstAddIndication->sfAdmittedSet.u16TimeBase);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8PagingPreference: 0x%X", pstAddIndication->sfAdmittedSet.u8PagingPreference);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficIndicationPreference: 0x%02X",
pstAddIndication->sfAdmittedSet.u8TrafficIndicationPreference);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfAdmittedSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfAdmittedSet.u8TotalClassifiers;
if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
psfCSType = &pstAddIndication->sfAdmittedSet.cConvergenceSLTypes[nIndex];
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ClassifierRulePriority: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfServiceLength: 0x%02X",
psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPTypeOfService[3]: 0x%02X %02X %02X",
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Protocol: 0x%02X ", psfCSType->cCPacketClassificationRule.u8Protocol);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddress[32]: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRangeLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolSourcePortRange[4]: 0x %02X %02X %02X %02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRangeLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ProtocolDestPortRange[4]: 0x %02X %02X %02X %02X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetDestMacAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x %02X %02X %02X %02X %02X %02X",
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthertypeLength: 0x%02X ", psfCSType->cCPacketClassificationRule.u8EthertypeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8Ethertype[3]: 0x%02X %02X %02X",
psfCSType->cCPacketClassificationRule.u8Ethertype[0],
psfCSType->cCPacketClassificationRule.u8Ethertype[1],
psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16UserPriority: 0x%X ", psfCSType->cCPacketClassificationRule.u16UserPriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8AssociatedPHSI: 0x%02X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16PacketClassificationRuleIndex: 0x%X ",
psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParamLength: 0x%02X",
psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificClassifierParam[1]: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLableLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPv6FlowLable[6]: 0x %02X %02X %02X %02X %02X %02X ",
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "bValid: 0x%X", pstAddIndication->sfAdmittedSet.bValid);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " ActiveSet--->");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32SFID: 0x%X", pstAddIndication->sfActiveSet.u32SFID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u16CID: 0x%X", pstAddIndication->sfActiveSet.u16CID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassNameLength: 0x%X", pstAddIndication->sfActiveSet.u8ServiceClassNameLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceClassName: 0x %02X %02X %02X %02X %02X %02X",
pstAddIndication->sfActiveSet.u8ServiceClassName[0],
pstAddIndication->sfActiveSet.u8ServiceClassName[1],
pstAddIndication->sfActiveSet.u8ServiceClassName[2],
pstAddIndication->sfActiveSet.u8ServiceClassName[3],
pstAddIndication->sfActiveSet.u8ServiceClassName[4],
pstAddIndication->sfActiveSet.u8ServiceClassName[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8MBSService: 0x%02X", pstAddIndication->sfActiveSet.u8MBSService);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8QosParamSet: 0x%02X", pstAddIndication->sfActiveSet.u8QosParamSet);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8TrafficPriority: 0x%02X", pstAddIndication->sfActiveSet.u8TrafficPriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaxTrafficBurst: 0x%X", pstAddIndication->sfActiveSet.u32MaxTrafficBurst);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MinReservedTrafficRate: 0x%X",
pstAddIndication->sfActiveSet.u32MinReservedTrafficRate);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParamLength: 0x%02X",
pstAddIndication->sfActiveSet.u8VendorSpecificQoSParamLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8VendorSpecificQoSParam: 0x%02X",
pstAddIndication->sfActiveSet.u8VendorSpecificQoSParam[0]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8ServiceFlowSchedulingType: 0x%02X",
pstAddIndication->sfActiveSet.u8ServiceFlowSchedulingType);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32ToleratedJitter: 0x%X", pstAddIndication->sfActiveSet.u32ToleratedJitter);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u32MaximumLatency: 0x%X", pstAddIndication->sfActiveSet.u32MaximumLatency);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8FixedLengthVSVariableLengthSDUIndicator: 0x%02X",
pstAddIndication->sfActiveSet.u8FixedLengthVSVariableLengthSDUIndicator);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8SDUSize: 0x%X", pstAddIndication->sfActiveSet.u8SDUSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TargetSAID: 0x%X", pstAddIndication->sfActiveSet.u16TargetSAID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQEnable: 0x%X", pstAddIndication->sfActiveSet.u8ARQEnable);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQWindowSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQWindowSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryTxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryTxTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRetryRxTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRetryRxTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockLifeTime: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockLifeTime);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQSyncLossTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQSyncLossTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ARQDeliverInOrder: 0x%X", pstAddIndication->sfActiveSet.u8ARQDeliverInOrder);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQRxPurgeTimeOut: 0x%X", pstAddIndication->sfActiveSet.u16ARQRxPurgeTimeOut);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16ARQBlockSize: 0x%X", pstAddIndication->sfActiveSet.u16ARQBlockSize);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8CSSpecification: 0x%X", pstAddIndication->sfActiveSet.u8CSSpecification);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TypeOfDataDeliveryService: 0x%X",
pstAddIndication->sfActiveSet.u8TypeOfDataDeliveryService);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16SDUInterArrivalTime: 0x%X", pstAddIndication->sfActiveSet.u16SDUInterArrivalTime);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16TimeBase: 0x%X", pstAddIndication->sfActiveSet.u16TimeBase);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8PagingPreference: 0x%X", pstAddIndication->sfActiveSet.u8PagingPreference);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8TrafficIndicationPreference: 0x%X",
pstAddIndication->sfActiveSet.u8TrafficIndicationPreference);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " Total Classifiers Received: 0x%X", pstAddIndication->sfActiveSet.u8TotalClassifiers);
nCurClassifierCnt = pstAddIndication->sfActiveSet.u8TotalClassifiers;
if (nCurClassifierCnt > MAX_CLASSIFIERS_IN_SF)
nCurClassifierCnt = MAX_CLASSIFIERS_IN_SF;
for (nIndex = 0; nIndex < nCurClassifierCnt; nIndex++) {
stConvergenceSLTypes *psfCSType = NULL;
psfCSType = &pstAddIndication->sfActiveSet.cConvergenceSLTypes[nIndex];
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " CCPacketClassificationRuleSI====>");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ClassifierRulePriority: 0x%X ",
psfCSType->cCPacketClassificationRule.u8ClassifierRulePriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfServiceLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPTypeOfServiceLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPTypeOfService[3]: 0x%X ,0x%X ,0x%X ",
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[0],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[1],
psfCSType->cCPacketClassificationRule.u8IPTypeOfService[2]);
for (uiLoopIndex = 0; uiLoopIndex < 1; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Protocol: 0x%X ", psfCSType->cCPacketClassificationRule.u8Protocol);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddressLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddressLength);
for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPMaskedSourceAddress[32]: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPMaskedSourceAddress[uiLoopIndex]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8IPDestinationAddressLength: 0x%02X ",
psfCSType->cCPacketClassificationRule.u8IPDestinationAddressLength);
for (uiLoopIndex = 0; uiLoopIndex < 32; uiLoopIndex++)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPDestinationAddress[32]:0x%X ",
psfCSType->cCPacketClassificationRule.u8IPDestinationAddress[uiLoopIndex]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRangeLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolSourcePortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[0],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[1],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[2],
psfCSType->cCPacketClassificationRule.u8ProtocolSourcePortRange[3]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRangeLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRangeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8ProtocolDestPortRange[4]: 0x%X ,0x%X ,0x%X ,0x%X ",
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[0],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[1],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[2],
psfCSType->cCPacketClassificationRule.u8ProtocolDestPortRange[3]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddressLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetDestMacAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[0],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[1],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[2],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[3],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[4],
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddress[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthernetSourceMACAddressLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8EthernetDestMacAddressLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, "u8EthernetSourceMACAddress[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X",
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[0],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[1],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[2],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[3],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[4],
psfCSType->cCPacketClassificationRule.u8EthernetSourceMACAddress[5]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8EthertypeLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8EthertypeLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8Ethertype[3]: 0x%X ,0x%X ,0x%X ",
psfCSType->cCPacketClassificationRule.u8Ethertype[0],
psfCSType->cCPacketClassificationRule.u8Ethertype[1],
psfCSType->cCPacketClassificationRule.u8Ethertype[2]);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16UserPriority: 0x%X ",
psfCSType->cCPacketClassificationRule.u16UserPriority);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16VLANID: 0x%X ", psfCSType->cCPacketClassificationRule.u16VLANID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8AssociatedPHSI: 0x%X ", psfCSType->cCPacketClassificationRule.u8AssociatedPHSI);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u16PacketClassificationRuleIndex:0x%X ",
psfCSType->cCPacketClassificationRule.u16PacketClassificationRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParamLength:0x%X ",
psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParamLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8VendorSpecificClassifierParam[1]:0x%X ",
psfCSType->cCPacketClassificationRule.u8VendorSpecificClassifierParam[0]);
#ifdef VERSION_D5
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLableLength: 0x%X ",
psfCSType->cCPacketClassificationRule.u8IPv6FlowLableLength);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " u8IPv6FlowLable[6]: 0x%X ,0x%X ,0x%X ,0x%X ,0x%X ,0x%X ",
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[0],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[1],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[2],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[3],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[4],
psfCSType->cCPacketClassificationRule.u8IPv6FlowLable[5]);
#endif
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_CONTROL, DBG_LVL_ALL, " bValid: 0x%X", pstAddIndication->sfActiveSet.bValid);
}
static inline ULONG RestoreSFParam(PMINI_ADAPTER Adapter, ULONG ulAddrSFParamSet, PUCHAR pucDestBuffer)
{
UINT nBytesToRead = sizeof(stServiceFlowParamSI);
if (ulAddrSFParamSet == 0 || NULL == pucDestBuffer) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Got Param address as 0!!");
return 0;
}
ulAddrSFParamSet = ntohl(ulAddrSFParamSet);
/* Read out the SF Param Set At the indicated Location */
if (rdm(Adapter, ulAddrSFParamSet, (PUCHAR)pucDestBuffer, nBytesToRead) < 0)
return STATUS_FAILURE;
return 1;
}
static ULONG StoreSFParam(PMINI_ADAPTER Adapter, PUCHAR pucSrcBuffer, ULONG ulAddrSFParamSet)
{
UINT nBytesToWrite = sizeof(stServiceFlowParamSI);
int ret = 0;
if (ulAddrSFParamSet == 0 || NULL == pucSrcBuffer)
return 0;
ret = wrm(Adapter, ulAddrSFParamSet, (u8 *)pucSrcBuffer, nBytesToWrite);
if (ret < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "%s:%d WRM failed", __func__, __LINE__);
return ret;
}
return 1;
}
ULONG StoreCmControlResponseMessage(PMINI_ADAPTER Adapter, PVOID pvBuffer, UINT *puBufferLength)
{
stLocalSFAddIndicationAlt *pstAddIndicationAlt = NULL;
stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFDeleteRequest *pstDeletionRequest;
UINT uiSearchRuleIndex;
ULONG ulSFID;
pstAddIndicationAlt = (stLocalSFAddIndicationAlt *)(pvBuffer);
/*
* In case of DSD Req By MS, we should immediately delete this SF so that
* we can stop the further classifying the pkt for this SF.
*/
if (pstAddIndicationAlt->u8Type == DSD_REQ) {
pstDeletionRequest = (stLocalSFDeleteRequest *)pvBuffer;
ulSFID = ntohl(pstDeletionRequest->u32SFID);
uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
if (uiSearchRuleIndex < NO_OF_QUEUES) {
deleteSFBySfid(Adapter, uiSearchRuleIndex);
Adapter->u32TotalDSD++;
}
return 1;
}
if ((pstAddIndicationAlt->u8Type == DSD_RSP) ||
(pstAddIndicationAlt->u8Type == DSD_ACK)) {
/* No Special handling send the message as it is */
return 1;
}
/* For DSA_REQ, only up to "psfAuthorizedSet" parameter should be accessed by driver! */
pstAddIndication = kmalloc(sizeof(*pstAddIndication), GFP_KERNEL);
if (pstAddIndication == NULL)
return 0;
/* AUTHORIZED SET */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
if (!pstAddIndication->psfAuthorizedSet) {
kfree(pstAddIndication);
return 0;
}
if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAuthorizedSet,
(ULONG)pstAddIndication->psfAuthorizedSet) != 1) {
kfree(pstAddIndication);
return 0;
}
/* this can't possibly be right */
pstAddIndication->psfAuthorizedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAuthorizedSet);
if (pstAddIndicationAlt->u8Type == DSA_REQ) {
stLocalSFAddRequest AddRequest;
AddRequest.u8Type = pstAddIndicationAlt->u8Type;
AddRequest.eConnectionDir = pstAddIndicationAlt->u8Direction;
AddRequest.u16TID = pstAddIndicationAlt->u16TID;
AddRequest.u16CID = pstAddIndicationAlt->u16CID;
AddRequest.u16VCID = pstAddIndicationAlt->u16VCID;
AddRequest.psfParameterSet = pstAddIndication->psfAuthorizedSet;
(*puBufferLength) = sizeof(stLocalSFAddRequest);
memcpy(pvBuffer, &AddRequest, sizeof(stLocalSFAddRequest));
kfree(pstAddIndication);
return 1;
}
/* Since it's not DSA_REQ, we can access all field in pstAddIndicationAlt */
/* We need to extract the structure from the buffer and pack it differently */
pstAddIndication->u8Type = pstAddIndicationAlt->u8Type;
pstAddIndication->eConnectionDir = pstAddIndicationAlt->u8Direction;
pstAddIndication->u16TID = pstAddIndicationAlt->u16TID;
pstAddIndication->u16CID = pstAddIndicationAlt->u16CID;
pstAddIndication->u16VCID = pstAddIndicationAlt->u16VCID;
pstAddIndication->u8CC = pstAddIndicationAlt->u8CC;
/* ADMITTED SET */
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
if (!pstAddIndication->psfAdmittedSet) {
kfree(pstAddIndication);
return 0;
}
if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfAdmittedSet, (ULONG)pstAddIndication->psfAdmittedSet) != 1) {
kfree(pstAddIndication);
return 0;
}
pstAddIndication->psfAdmittedSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfAdmittedSet);
/* ACTIVE SET */
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)
GetNextTargetBufferLocation(Adapter, pstAddIndicationAlt->u16TID);
if (!pstAddIndication->psfActiveSet) {
kfree(pstAddIndication);
return 0;
}
if (StoreSFParam(Adapter, (PUCHAR)&pstAddIndicationAlt->sfActiveSet, (ULONG)pstAddIndication->psfActiveSet) != 1) {
kfree(pstAddIndication);
return 0;
}
pstAddIndication->psfActiveSet = (stServiceFlowParamSI *)ntohl((ULONG)pstAddIndication->psfActiveSet);
(*puBufferLength) = sizeof(stLocalSFAddIndication);
*(stLocalSFAddIndication *)pvBuffer = *pstAddIndication;
kfree(pstAddIndication);
return 1;
}
static inline stLocalSFAddIndicationAlt
*RestoreCmControlResponseMessage(register PMINI_ADAPTER Adapter, register PVOID pvBuffer)
{
ULONG ulStatus = 0;
stLocalSFAddIndication *pstAddIndication = NULL;
stLocalSFAddIndicationAlt *pstAddIndicationDest = NULL;
pstAddIndication = (stLocalSFAddIndication *)(pvBuffer);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "=====>");
if ((pstAddIndication->u8Type == DSD_REQ) ||
(pstAddIndication->u8Type == DSD_RSP) ||
(pstAddIndication->u8Type == DSD_ACK))
return (stLocalSFAddIndicationAlt *)pvBuffer;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Inside RestoreCmControlResponseMessage ");
/*
* Need to Allocate memory to contain the SUPER Large structures
* Our driver can't create these structures on Stack :(
*/
pstAddIndicationDest = kmalloc(sizeof(stLocalSFAddIndicationAlt), GFP_KERNEL);
if (pstAddIndicationDest) {
memset(pstAddIndicationDest, 0, sizeof(stLocalSFAddIndicationAlt));
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Failed to allocate memory for SF Add Indication Structure ");
return NULL;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Type : 0x%X", pstAddIndication->u8Type);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8Direction : 0x%X", pstAddIndication->eConnectionDir);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8TID : 0x%X", ntohs(pstAddIndication->u16TID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u8CID : 0x%X", ntohs(pstAddIndication->u16CID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-u16VCID : 0x%X", ntohs(pstAddIndication->u16VCID));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-autorized set loc : %p", pstAddIndication->psfAuthorizedSet);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-admitted set loc : %p", pstAddIndication->psfAdmittedSet);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "AddIndication-Active set loc : %p", pstAddIndication->psfActiveSet);
pstAddIndicationDest->u8Type = pstAddIndication->u8Type;
pstAddIndicationDest->u8Direction = pstAddIndication->eConnectionDir;
pstAddIndicationDest->u16TID = pstAddIndication->u16TID;
pstAddIndicationDest->u16CID = pstAddIndication->u16CID;
pstAddIndicationDest->u16VCID = pstAddIndication->u16VCID;
pstAddIndicationDest->u8CC = pstAddIndication->u8CC;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Active Set ");
ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfActiveSet, (PUCHAR)&pstAddIndicationDest->sfActiveSet);
if (ulStatus != 1)
goto failed_restore_sf_param;
if (pstAddIndicationDest->sfActiveSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfActiveSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Admitted Set ");
ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAdmittedSet, (PUCHAR)&pstAddIndicationDest->sfAdmittedSet);
if (ulStatus != 1)
goto failed_restore_sf_param;
if (pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAdmittedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Restoring Authorized Set ");
ulStatus = RestoreSFParam(Adapter, (ULONG)pstAddIndication->psfAuthorizedSet, (PUCHAR)&pstAddIndicationDest->sfAuthorizedSet);
if (ulStatus != 1)
goto failed_restore_sf_param;
if (pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers > MAX_CLASSIFIERS_IN_SF)
pstAddIndicationDest->sfAuthorizedSet.u8TotalClassifiers = MAX_CLASSIFIERS_IN_SF;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Dumping the whole raw packet");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " pstAddIndicationDest->sfActiveSet size %zx %p", sizeof(*pstAddIndicationDest), pstAddIndicationDest);
/* BCM_DEBUG_PRINT_BUFFER(Adapter,DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, (unsigned char *)pstAddIndicationDest, sizeof(*pstAddIndicationDest)); */
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "============================================================");
return pstAddIndicationDest;
failed_restore_sf_param:
kfree(pstAddIndicationDest);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "<=====");
return NULL;
}
ULONG SetUpTargetDsxBuffers(PMINI_ADAPTER Adapter)
{
ULONG ulTargetDsxBuffersBase = 0;
ULONG ulCntTargetBuffers;
ULONG i;
int Status;
if (!Adapter) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Adapter was NULL!!!");
return 0;
}
if (Adapter->astTargetDsxBuffer[0].ulTargetDsxBuffer)
return 1;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Size of Each DSX Buffer(Also size of ServiceFlowParamSI): %zx ", sizeof(stServiceFlowParamSI));
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Reading DSX buffer From Target location %x ", DSX_MESSAGE_EXCHANGE_BUFFER);
Status = rdmalt(Adapter, DSX_MESSAGE_EXCHANGE_BUFFER, (PUINT)&ulTargetDsxBuffersBase, sizeof(UINT));
if (Status < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "RDM failed!!");
return 0;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Base Address Of DSX Target Buffer : 0x%lx", ulTargetDsxBuffersBase);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Tgt Buffer is Now %lx :", ulTargetDsxBuffersBase);
ulCntTargetBuffers = DSX_MESSAGE_EXCHANGE_BUFFER_SIZE / sizeof(stServiceFlowParamSI);
Adapter->ulTotalTargetBuffersAvailable =
ulCntTargetBuffers > MAX_TARGET_DSX_BUFFERS ?
MAX_TARGET_DSX_BUFFERS : ulCntTargetBuffers;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Total Target DSX Buffer setup %lx ", Adapter->ulTotalTargetBuffersAvailable);
for (i = 0; i < Adapter->ulTotalTargetBuffersAvailable; i++) {
Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer = ulTargetDsxBuffersBase;
Adapter->astTargetDsxBuffer[i].valid = 1;
Adapter->astTargetDsxBuffer[i].tid = 0;
ulTargetDsxBuffersBase += sizeof(stServiceFlowParamSI);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " Target DSX Buffer %lx setup at 0x%lx",
i, Adapter->astTargetDsxBuffer[i].ulTargetDsxBuffer);
}
Adapter->ulCurrentTargetBuffer = 0;
Adapter->ulFreeTargetBufferCnt = Adapter->ulTotalTargetBuffersAvailable;
return 1;
}
static ULONG GetNextTargetBufferLocation(PMINI_ADAPTER Adapter, B_UINT16 tid)
{
ULONG ulTargetDSXBufferAddress;
ULONG ulTargetDsxBufferIndexToUse, ulMaxTry;
if ((Adapter->ulTotalTargetBuffersAvailable == 0) || (Adapter->ulFreeTargetBufferCnt == 0)) {
ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
ulTargetDsxBufferIndexToUse = Adapter->ulCurrentTargetBuffer;
ulMaxTry = Adapter->ulTotalTargetBuffersAvailable;
while ((ulMaxTry) && (Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid != 1)) {
ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1) % Adapter->ulTotalTargetBuffersAvailable;
ulMaxTry--;
}
if (ulMaxTry == 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "\n GetNextTargetBufferLocation : Error No Free Target DSX Buffers FreeCnt : %lx ", Adapter->ulFreeTargetBufferCnt);
ClearTargetDSXBuffer(Adapter, tid, FALSE);
return 0;
}
ulTargetDSXBufferAddress = Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].ulTargetDsxBuffer;
Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].valid = 0;
Adapter->astTargetDsxBuffer[ulTargetDsxBufferIndexToUse].tid = tid;
Adapter->ulFreeTargetBufferCnt--;
ulTargetDsxBufferIndexToUse = (ulTargetDsxBufferIndexToUse+1)%Adapter->ulTotalTargetBuffersAvailable;
Adapter->ulCurrentTargetBuffer = ulTargetDsxBufferIndexToUse;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "GetNextTargetBufferLocation :Returning address %lx tid %d\n", ulTargetDSXBufferAddress, tid);
return ulTargetDSXBufferAddress;
}
int AllocAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
/*
* Need to Allocate memory to contain the SUPER Large structures
* Our driver can't create these structures on Stack
*/
Adapter->caDsxReqResp = kmalloc(sizeof(stLocalSFAddIndicationAlt)+LEADER_SIZE, GFP_KERNEL);
if (!Adapter->caDsxReqResp)
return -ENOMEM;
return 0;
}
int FreeAdapterDsxBuffer(PMINI_ADAPTER Adapter)
{
kfree(Adapter->caDsxReqResp);
return 0;
}
/*
* @ingroup ctrl_pkt_functions
* This routinue would process the Control responses
* for the Connection Management.
* @return - Queue index for the free SFID else returns Invalid Index.
*/
BOOLEAN CmControlResponseMessage(PMINI_ADAPTER Adapter, /* <Pointer to the Adapter structure */
PVOID pvBuffer /* Starting Address of the Buffer, that contains the AddIndication Data */)
{
stServiceFlowParamSI *psfLocalSet = NULL;
stLocalSFAddIndicationAlt *pstAddIndication = NULL;
stLocalSFChangeIndicationAlt *pstChangeIndication = NULL;
PLEADER pLeader = NULL;
/*
* Otherwise the message contains a target address from where we need to
* read out the rest of the service flow param structure
*/
pstAddIndication = RestoreCmControlResponseMessage(Adapter, pvBuffer);
if (pstAddIndication == NULL) {
ClearTargetDSXBuffer(Adapter, ((stLocalSFAddIndication *)pvBuffer)->u16TID, FALSE);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Error in restoring Service Flow param structure from DSx message");
return FALSE;
}
DumpCmControlPacket(pstAddIndication);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "====>");
pLeader = (PLEADER)Adapter->caDsxReqResp;
pLeader->Status = CM_CONTROL_NEWDSX_MULTICLASSIFIER_REQ;
pLeader->Vcid = 0;
ClearTargetDSXBuffer(Adapter, pstAddIndication->u16TID, FALSE);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "### TID RECEIVED %d\n", pstAddIndication->u16TID);
switch (pstAddIndication->u8Type) {
case DSA_REQ:
{
pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Sending DSA Response....\n");
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA RESPONSE TO MAC %d", pLeader->PLength);
*((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
= *pstAddIndication;
((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_RSP;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, " VCID = %x", ntohs(pstAddIndication->u16VCID));
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
kfree(pstAddIndication);
}
break;
case DSA_RSP:
{
pLeader->PLength = sizeof(stLocalSFAddIndicationAlt);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSA ACK TO MAC %d",
pLeader->PLength);
*((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))
= *pstAddIndication;
((stLocalSFAddIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSA_ACK;
} /* no break here..we should go down. */
case DSA_ACK:
{
UINT uiSearchRuleIndex = 0;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "VCID:0x%X",
ntohs(pstAddIndication->u16VCID));
uiSearchRuleIndex = SearchFreeSfid(Adapter);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "uiSearchRuleIndex:0x%X ",
uiSearchRuleIndex);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Direction:0x%X ",
pstAddIndication->u8Direction);
if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
Adapter->PackInfo[uiSearchRuleIndex].ucDirection =
pstAddIndication->u8Direction;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "bValid:0x%X ",
pstAddIndication->sfActiveSet.bValid);
if (pstAddIndication->sfActiveSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
if (pstAddIndication->sfAuthorizedSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
if (pstAddIndication->sfAdmittedSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
if (pstAddIndication->sfActiveSet.bValid == FALSE) {
Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
if (pstAddIndication->sfAdmittedSet.bValid)
psfLocalSet = &pstAddIndication->sfAdmittedSet;
else if (pstAddIndication->sfAuthorizedSet.bValid)
psfLocalSet = &pstAddIndication->sfAuthorizedSet;
} else {
psfLocalSet = &pstAddIndication->sfActiveSet;
Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
if (!psfLocalSet) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
} else if (psfLocalSet->bValid && (pstAddIndication->u8CC == 0)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSA ACK");
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstAddIndication->u16VCID);
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstAddIndication->u16CID);
if (UPLINK_DIR == pstAddIndication->u8Direction)
atomic_set(&Adapter->PackInfo[uiSearchRuleIndex].uiPerSFTxResourceCount, DEFAULT_PERSFCOUNT);
CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSA_ACK, pstAddIndication);
/* don't free pstAddIndication */
/* Inside CopyToAdapter, Sorting of all the SFs take place.
* Hence any access to the newly added SF through uiSearchRuleIndex is invalid.
* SHOULD BE STRICTLY AVOIDED.
*/
/* *(PULONG)(((PUCHAR)pvBuffer)+1)=psfLocalSet->u32SFID; */
memcpy((((PUCHAR)pvBuffer)+1), &psfLocalSet->u32SFID, 4);
if (pstAddIndication->sfActiveSet.bValid == TRUE) {
if (UPLINK_DIR == pstAddIndication->u8Direction) {
if (!Adapter->LinkUpStatus) {
netif_carrier_on(Adapter->dev);
netif_start_queue(Adapter->dev);
Adapter->LinkUpStatus = 1;
if (netif_msg_link(Adapter))
pr_info(PFX "%s: link up\n", Adapter->dev->name);
atomic_set(&Adapter->TxPktAvail, 1);
wake_up(&Adapter->tx_packet_wait_queue);
Adapter->liTimeSinceLastNetEntry = get_seconds();
}
}
}
} else {
Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
}
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSA ACK did not get valid SFID");
kfree(pstAddIndication);
return FALSE;
}
}
break;
case DSC_REQ:
{
pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC RESPONSE TO MAC %d", pLeader->PLength);
*((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_RSP;
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
kfree(pstAddIndication);
}
break;
case DSC_RSP:
{
pLeader->PLength = sizeof(stLocalSFChangeIndicationAlt);
pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSC ACK TO MAC %d", pLeader->PLength);
*((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *pstChangeIndication;
((stLocalSFChangeIndicationAlt *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSC_ACK;
}
case DSC_ACK:
{
UINT uiSearchRuleIndex = 0;
pstChangeIndication = (stLocalSFChangeIndicationAlt *)pstAddIndication;
uiSearchRuleIndex = SearchSfid(Adapter, ntohl(pstChangeIndication->sfActiveSet.u32SFID));
if (uiSearchRuleIndex > NO_OF_QUEUES-1)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "SF doesn't exist for which DSC_ACK is received");
if ((uiSearchRuleIndex < NO_OF_QUEUES)) {
Adapter->PackInfo[uiSearchRuleIndex].ucDirection = pstChangeIndication->u8Direction;
if (pstChangeIndication->sfActiveSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
if (pstChangeIndication->sfAuthorizedSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
if (pstChangeIndication->sfAdmittedSet.bValid == TRUE)
Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
if (pstChangeIndication->sfActiveSet.bValid == FALSE) {
Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bActivateRequestSent = FALSE;
if (pstChangeIndication->sfAdmittedSet.bValid)
psfLocalSet = &pstChangeIndication->sfAdmittedSet;
else if (pstChangeIndication->sfAuthorizedSet.bValid)
psfLocalSet = &pstChangeIndication->sfAuthorizedSet;
} else {
psfLocalSet = &pstChangeIndication->sfActiveSet;
Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
if (!psfLocalSet) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No set is valid\n");
Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].bValid = FALSE;
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = 0;
kfree(pstAddIndication);
} else if (psfLocalSet->bValid && (pstChangeIndication->u8CC == 0)) {
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pstChangeIndication->u16VCID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "CC field is %d bvalid = %d\n",
pstChangeIndication->u8CC, psfLocalSet->bValid);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "VCID= %d\n", ntohs(pstChangeIndication->u16VCID));
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pstChangeIndication->u16CID);
CopyToAdapter(Adapter, psfLocalSet, uiSearchRuleIndex, DSC_ACK, pstAddIndication);
*(PULONG)(((PUCHAR)pvBuffer)+1) = psfLocalSet->u32SFID;
} else if (pstChangeIndication->u8CC == 6) {
deleteSFBySfid(Adapter, uiSearchRuleIndex);
kfree(pstAddIndication);
}
} else {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "DSC ACK did not get valid SFID");
kfree(pstAddIndication);
return FALSE;
}
}
break;
case DSD_REQ:
{
UINT uiSearchRuleIndex;
ULONG ulSFID;
pLeader->PLength = sizeof(stLocalSFDeleteIndication);
*((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE])) = *((stLocalSFDeleteIndication *)pstAddIndication);
ulSFID = ntohl(((stLocalSFDeleteIndication *)pstAddIndication)->u32SFID);
uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD - Removing connection %x", uiSearchRuleIndex);
if (uiSearchRuleIndex < NO_OF_QUEUES) {
/* Delete All Classifiers Associated with this SFID */
deleteSFBySfid(Adapter, uiSearchRuleIndex);
Adapter->u32TotalDSD++;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SENDING DSD RESPONSE TO MAC");
((stLocalSFDeleteIndication *)&(Adapter->caDsxReqResp[LEADER_SIZE]))->u8Type = DSD_RSP;
CopyBufferToControlPacket(Adapter, (PVOID)Adapter->caDsxReqResp);
}
case DSD_RSP:
{
/* Do nothing as SF has already got Deleted */
}
break;
case DSD_ACK:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "DSD ACK Rcd, let App handle it\n");
break;
default:
kfree(pstAddIndication);
return FALSE;
}
return TRUE;
}
int get_dsx_sf_data_to_application(PMINI_ADAPTER Adapter, UINT uiSFId, void __user *user_buffer)
{
int status = 0;
struct _packet_info *psSfInfo = NULL;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
status = SearchSfid(Adapter, uiSFId);
if (status >= NO_OF_QUEUES) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID %d not present in queue !!!", uiSFId);
return -EINVAL;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "status =%d", status);
psSfInfo = &Adapter->PackInfo[status];
if (psSfInfo->pstSFIndication && copy_to_user(user_buffer,
psSfInfo->pstSFIndication, sizeof(stLocalSFAddIndicationAlt))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy to user failed SFID %d, present in queue !!!", uiSFId);
status = -EFAULT;
return status;
}
return STATUS_SUCCESS;
}
VOID OverrideServiceFlowParams(PMINI_ADAPTER Adapter, PUINT puiBuffer)
{
B_UINT32 u32NumofSFsinMsg = ntohl(*(puiBuffer + 1));
stIM_SFHostNotify *pHostInfo = NULL;
UINT uiSearchRuleIndex = 0;
ULONG ulSFID = 0;
puiBuffer += 2;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "u32NumofSFsinMsg: 0x%x\n", u32NumofSFsinMsg);
while (u32NumofSFsinMsg != 0 && u32NumofSFsinMsg < NO_OF_QUEUES) {
u32NumofSFsinMsg--;
pHostInfo = (stIM_SFHostNotify *)puiBuffer;
puiBuffer = (PUINT)(pHostInfo + 1);
ulSFID = ntohl(pHostInfo->SFID);
uiSearchRuleIndex = SearchSfid(Adapter, ulSFID);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "SFID: 0x%lx\n", ulSFID);
if (uiSearchRuleIndex >= NO_OF_QUEUES || uiSearchRuleIndex == HiPriority) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "The SFID <%lx> doesn't exist in host entry or is Invalid\n", ulSFID);
continue;
}
if (pHostInfo->RetainSF == FALSE) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "Going to Delete SF");
deleteSFBySfid(Adapter, uiSearchRuleIndex);
} else {
Adapter->PackInfo[uiSearchRuleIndex].usVCID_Value = ntohs(pHostInfo->VCID);
Adapter->PackInfo[uiSearchRuleIndex].usCID = ntohs(pHostInfo->newCID);
Adapter->PackInfo[uiSearchRuleIndex].bActive = FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CONN_MSG, DBG_LVL_ALL, "pHostInfo->QoSParamSet: 0x%x\n", pHostInfo->QoSParamSet);
if (pHostInfo->QoSParamSet & 0x1)
Adapter->PackInfo[uiSearchRuleIndex].bAuthorizedSet = TRUE;
if (pHostInfo->QoSParamSet & 0x2)
Adapter->PackInfo[uiSearchRuleIndex].bAdmittedSet = TRUE;
if (pHostInfo->QoSParamSet & 0x4) {
Adapter->PackInfo[uiSearchRuleIndex].bActiveSet = TRUE;
Adapter->PackInfo[uiSearchRuleIndex].bActive = TRUE;
}
}
}
}
|
gpl-2.0
|
syhost/A810S_KERNEL_3.4
|
drivers/infiniband/hw/qib/qib_pcie.c
|
4859
|
20989
|
/*
* Copyright (c) 2008, 2009 QLogic Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/aer.h>
#include <linux/module.h>
#include "qib.h"
/*
* This file contains PCIe utility routines that are common to the
* various QLogic InfiniPath adapters
*/
/*
* Code to adjust PCIe capabilities.
* To minimize the change footprint, we call it
* from qib_pcie_params, which every chip-specific
* file calls, even though this violates some
* expectations of harmlessness.
*/
static int qib_tune_pcie_caps(struct qib_devdata *);
static int qib_tune_pcie_coalesce(struct qib_devdata *);
/*
* Do all the common PCIe setup and initialization.
* devdata is not yet allocated, and is not allocated until after this
* routine returns success. Therefore qib_dev_err() can't be used for error
* printing.
*/
int qib_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int ret;
ret = pci_enable_device(pdev);
if (ret) {
/*
* This can happen (in theory) iff:
* We did a chip reset, and then failed to reprogram the
* BAR, or the chip reset due to an internal error. We then
* unloaded the driver and reloaded it.
*
* Both reset cases set the BAR back to initial state. For
* the latter case, the AER sticky error bit at offset 0x718
* should be set, but the Linux kernel doesn't yet know
* about that, it appears. If the original BAR was retained
* in the kernel data structures, this may be OK.
*/
qib_early_err(&pdev->dev, "pci enable failed: error %d\n",
-ret);
goto done;
}
ret = pci_request_regions(pdev, QIB_DRV_NAME);
if (ret) {
qib_devinfo(pdev, "pci_request_regions fails: err %d\n", -ret);
goto bail;
}
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret) {
/*
* If the 64 bit setup fails, try 32 bit. Some systems
* do not setup 64 bit maps on systems with 2GB or less
* memory installed.
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
qib_devinfo(pdev, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
} else
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (ret) {
qib_early_err(&pdev->dev,
"Unable to set DMA consistent mask: %d\n", ret);
goto bail;
}
pci_set_master(pdev);
ret = pci_enable_pcie_error_reporting(pdev);
if (ret) {
qib_early_err(&pdev->dev,
"Unable to enable pcie error reporting: %d\n",
ret);
ret = 0;
}
goto done;
bail:
pci_disable_device(pdev);
pci_release_regions(pdev);
done:
return ret;
}
/*
* Do remaining PCIe setup, once dd is allocated, and save away
* fields required to re-initialize after a chip reset, or for
* various other purposes
*/
int qib_pcie_ddinit(struct qib_devdata *dd, struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long len;
resource_size_t addr;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
#if defined(__powerpc__)
/* There isn't a generic way to specify writethrough mappings */
dd->kregbase = __ioremap(addr, len, _PAGE_NO_CACHE | _PAGE_WRITETHRU);
#else
dd->kregbase = ioremap_nocache(addr, len);
#endif
if (!dd->kregbase)
return -ENOMEM;
dd->kregend = (u64 __iomem *)((void __iomem *) dd->kregbase + len);
dd->physaddr = addr; /* used for io_remap, etc. */
/*
* Save BARs to rewrite after device reset. Save all 64 bits of
* BAR, just in case.
*/
dd->pcibar0 = addr;
dd->pcibar1 = addr >> 32;
dd->deviceid = ent->device; /* save for later use */
dd->vendorid = ent->vendor;
return 0;
}
/*
* Do PCIe cleanup, after chip-specific cleanup, etc. Just prior
* to releasing the dd memory.
* void because none of the core pcie cleanup returns are void
*/
void qib_pcie_ddcleanup(struct qib_devdata *dd)
{
u64 __iomem *base = (void __iomem *) dd->kregbase;
dd->kregbase = NULL;
iounmap(base);
if (dd->piobase)
iounmap(dd->piobase);
if (dd->userbase)
iounmap(dd->userbase);
if (dd->piovl15base)
iounmap(dd->piovl15base);
pci_disable_device(dd->pcidev);
pci_release_regions(dd->pcidev);
pci_set_drvdata(dd->pcidev, NULL);
}
static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
struct qib_msix_entry *qib_msix_entry)
{
int ret;
u32 tabsize = 0;
u16 msix_flags;
struct msix_entry *msix_entry;
int i;
/* We can't pass qib_msix_entry array to qib_msix_setup
* so use a dummy msix_entry array and copy the allocated
* irq back to the qib_msix_entry array. */
msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL);
if (!msix_entry) {
ret = -ENOMEM;
goto do_intx;
}
for (i = 0; i < *msixcnt; i++)
msix_entry[i] = qib_msix_entry[i].msix;
pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags);
tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE);
if (tabsize > *msixcnt)
tabsize = *msixcnt;
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
if (ret > 0) {
tabsize = ret;
ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize);
}
do_intx:
if (ret) {
qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, "
"falling back to INTx\n", tabsize, ret);
tabsize = 0;
}
for (i = 0; i < tabsize; i++)
qib_msix_entry[i].msix = msix_entry[i];
kfree(msix_entry);
*msixcnt = tabsize;
if (ret)
qib_enable_intx(dd->pcidev);
}
/**
* We save the msi lo and hi values, so we can restore them after
* chip reset (the kernel PCI infrastructure doesn't yet handle that
* correctly.
*/
static int qib_msi_setup(struct qib_devdata *dd, int pos)
{
struct pci_dev *pdev = dd->pcidev;
u16 control;
int ret;
ret = pci_enable_msi(pdev);
if (ret)
qib_dev_err(dd, "pci_enable_msi failed: %d, "
"interrupts may not work\n", ret);
/* continue even if it fails, we may still be OK... */
pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_LO,
&dd->msi_lo);
pci_read_config_dword(pdev, pos + PCI_MSI_ADDRESS_HI,
&dd->msi_hi);
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
/* now save the data (vector) info */
pci_read_config_word(pdev, pos + ((control & PCI_MSI_FLAGS_64BIT)
? 12 : 8),
&dd->msi_data);
return ret;
}
int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
struct qib_msix_entry *entry)
{
u16 linkstat, speed;
int pos = 0, pose, ret = 1;
pose = pci_pcie_cap(dd->pcidev);
if (!pose) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
dd->lbus_width = 1;
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
goto bail;
}
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
if (nent && *nent && pos) {
qib_msix_setup(dd, pos, nent, entry);
ret = 0; /* did it, either MSIx or INTx */
} else {
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
if (pos)
ret = qib_msi_setup(dd, pos);
else
qib_dev_err(dd, "No PCI MSI or MSIx capability!\n");
}
if (!pos)
qib_enable_intx(dd->pcidev);
pci_read_config_word(dd->pcidev, pose + PCI_EXP_LNKSTA, &linkstat);
/*
* speed is bits 0-3, linkwidth is bits 4-8
* no defines for them in headers
*/
speed = linkstat & 0xf;
linkstat >>= 4;
linkstat &= 0x1f;
dd->lbus_width = linkstat;
switch (speed) {
case 1:
dd->lbus_speed = 2500; /* Gen1, 2.5GHz */
break;
case 2:
dd->lbus_speed = 5000; /* Gen1, 5GHz */
break;
default: /* not defined, assume gen1 */
dd->lbus_speed = 2500;
break;
}
/*
* Check against expected pcie width and complain if "wrong"
* on first initialization, not afterwards (i.e., reset).
*/
if (minw && linkstat < minw)
qib_dev_err(dd,
"PCIe width %u (x%u HCA), performance reduced\n",
linkstat, minw);
qib_tune_pcie_caps(dd);
qib_tune_pcie_coalesce(dd);
bail:
/* fill in string, even on errors */
snprintf(dd->lbus_info, sizeof(dd->lbus_info),
"PCIe,%uMHz,x%u\n", dd->lbus_speed, dd->lbus_width);
return ret;
}
/*
* Setup pcie interrupt stuff again after a reset. I'd like to just call
* pci_enable_msi() again for msi, but when I do that,
* the MSI enable bit doesn't get set in the command word, and
* we switch to to a different interrupt vector, which is confusing,
* so I instead just do it all inline. Perhaps somehow can tie this
* into the PCIe hotplug support at some point
*/
int qib_reinit_intr(struct qib_devdata *dd)
{
int pos;
u16 control;
int ret = 0;
/* If we aren't using MSI, don't restore it */
if (!dd->msi_lo)
goto bail;
pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
if (!pos) {
qib_dev_err(dd, "Can't find MSI capability, "
"can't restore MSI settings\n");
ret = 0;
/* nothing special for MSIx, just MSI */
goto bail;
}
pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_LO,
dd->msi_lo);
pci_write_config_dword(dd->pcidev, pos + PCI_MSI_ADDRESS_HI,
dd->msi_hi);
pci_read_config_word(dd->pcidev, pos + PCI_MSI_FLAGS, &control);
if (!(control & PCI_MSI_FLAGS_ENABLE)) {
control |= PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dd->pcidev, pos + PCI_MSI_FLAGS,
control);
}
/* now rewrite the data (vector) info */
pci_write_config_word(dd->pcidev, pos +
((control & PCI_MSI_FLAGS_64BIT) ? 12 : 8),
dd->msi_data);
ret = 1;
bail:
if (!ret && (dd->flags & QIB_HAS_INTX)) {
qib_enable_intx(dd->pcidev);
ret = 1;
}
/* and now set the pci master bit again */
pci_set_master(dd->pcidev);
return ret;
}
/*
* Disable msi interrupt if enabled, and clear msi_lo.
* This is used primarily for the fallback to INTx, but
* is also used in reinit after reset, and during cleanup.
*/
void qib_nomsi(struct qib_devdata *dd)
{
dd->msi_lo = 0;
pci_disable_msi(dd->pcidev);
}
/*
* Same as qib_nosmi, but for MSIx.
*/
void qib_nomsix(struct qib_devdata *dd)
{
pci_disable_msix(dd->pcidev);
}
/*
* Similar to pci_intx(pdev, 1), except that we make sure
* msi(x) is off.
*/
void qib_enable_intx(struct pci_dev *pdev)
{
u16 cw, new;
int pos;
/* first, turn on INTx */
pci_read_config_word(pdev, PCI_COMMAND, &cw);
new = cw & ~PCI_COMMAND_INTX_DISABLE;
if (new != cw)
pci_write_config_word(pdev, PCI_COMMAND, new);
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
if (pos) {
/* then turn off MSI */
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
new = cw & ~PCI_MSI_FLAGS_ENABLE;
if (new != cw)
pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
}
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
if (pos) {
/* then turn off MSIx */
pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
new = cw & ~PCI_MSIX_FLAGS_ENABLE;
if (new != cw)
pci_write_config_word(pdev, pos + PCI_MSIX_FLAGS, new);
}
}
/*
* These two routines are helper routines for the device reset code
* to move all the pcie code out of the chip-specific driver code.
*/
void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
{
pci_read_config_word(dd->pcidev, PCI_COMMAND, cmd);
pci_read_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
pci_read_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
}
void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
{
int r;
r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
dd->pcibar0);
if (r)
qib_dev_err(dd, "rewrite of BAR0 failed: %d\n", r);
r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1,
dd->pcibar1);
if (r)
qib_dev_err(dd, "rewrite of BAR1 failed: %d\n", r);
/* now re-enable memory access, and restore cosmetic settings */
pci_write_config_word(dd->pcidev, PCI_COMMAND, cmd);
pci_write_config_byte(dd->pcidev, PCI_INTERRUPT_LINE, iline);
pci_write_config_byte(dd->pcidev, PCI_CACHE_LINE_SIZE, cline);
r = pci_enable_device(dd->pcidev);
if (r)
qib_dev_err(dd, "pci_enable_device failed after "
"reset: %d\n", r);
}
/* code to adjust PCIe capabilities. */
static int fld2val(int wd, int mask)
{
int lsbmask;
if (!mask)
return 0;
wd &= mask;
lsbmask = mask ^ (mask & (mask - 1));
wd /= lsbmask;
return wd;
}
static int val2fld(int wd, int mask)
{
int lsbmask;
if (!mask)
return 0;
lsbmask = mask ^ (mask & (mask - 1));
wd *= lsbmask;
return wd;
}
static int qib_pcie_coalesce;
module_param_named(pcie_coalesce, qib_pcie_coalesce, int, S_IRUGO);
MODULE_PARM_DESC(pcie_coalesce, "tune PCIe colescing on some Intel chipsets");
/*
* Enable PCIe completion and data coalescing, on Intel 5x00 and 7300
* chipsets. This is known to be unsafe for some revisions of some
* of these chipsets, with some BIOS settings, and enabling it on those
* systems may result in the system crashing, and/or data corruption.
*/
static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
{
int r;
struct pci_dev *parent;
int ppos;
u16 devid;
u32 mask, bits, val;
if (!qib_pcie_coalesce)
return 0;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (parent->bus->parent) {
qib_devinfo(dd->pcidev, "Parent not root\n");
return 1;
}
ppos = pci_pcie_cap(parent);
if (!ppos)
return 1;
if (parent->vendor != 0x8086)
return 1;
/*
* - bit 12: Max_rdcmp_Imt_EN: need to set to 1
* - bit 11: COALESCE_FORCE: need to set to 0
* - bit 10: COALESCE_EN: need to set to 1
* (but limitations on some on some chipsets)
*
* On the Intel 5000, 5100, and 7300 chipsets, there is
* also: - bit 25:24: COALESCE_MODE, need to set to 0
*/
devid = parent->device;
if (devid >= 0x25e2 && devid <= 0x25fa) {
/* 5000 P/V/X/Z */
if (parent->revision <= 0xb2)
bits = 1U << 10;
else
bits = 7U << 10;
mask = (3U << 24) | (7U << 10);
} else if (devid >= 0x65e2 && devid <= 0x65fa) {
/* 5100 */
bits = 1U << 10;
mask = (3U << 24) | (7U << 10);
} else if (devid >= 0x4021 && devid <= 0x402e) {
/* 5400 */
bits = 7U << 10;
mask = 7U << 10;
} else if (devid >= 0x3604 && devid <= 0x360a) {
/* 7300 */
bits = 7U << 10;
mask = (3U << 24) | (7U << 10);
} else {
/* not one of the chipsets that we know about */
return 1;
}
pci_read_config_dword(parent, 0x48, &val);
val &= ~mask;
val |= bits;
r = pci_write_config_dword(parent, 0x48, val);
return 0;
}
/*
* BIOS may not set PCIe bus-utilization parameters for best performance.
* Check and optionally adjust them to maximize our throughput.
*/
static int qib_pcie_caps;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
static int qib_tune_pcie_caps(struct qib_devdata *dd)
{
int ret = 1; /* Assume the worst */
struct pci_dev *parent;
int ppos, epos;
u16 pcaps, pctl, ecaps, ectl;
int rc_sup, ep_sup;
int rc_cur, ep_cur;
/* Find out supported and configured values for parent (root) */
parent = dd->pcidev->bus->self;
if (parent->bus->parent) {
qib_devinfo(dd->pcidev, "Parent not root\n");
goto bail;
}
ppos = pci_pcie_cap(parent);
if (ppos) {
pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
} else
goto bail;
/* Find out supported and configured values for endpoint (us) */
epos = pci_pcie_cap(dd->pcidev);
if (epos) {
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
} else
goto bail;
ret = 0;
/* Find max payload supported by root, endpoint */
rc_sup = fld2val(pcaps, PCI_EXP_DEVCAP_PAYLOAD);
ep_sup = fld2val(ecaps, PCI_EXP_DEVCAP_PAYLOAD);
if (rc_sup > ep_sup)
rc_sup = ep_sup;
rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_PAYLOAD);
ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_PAYLOAD);
/* If Supported greater than limit in module param, limit it */
if (rc_sup > (qib_pcie_caps & 7))
rc_sup = qib_pcie_caps & 7;
/* If less than (allowed, supported), bump root payload */
if (rc_sup > rc_cur) {
rc_cur = rc_sup;
pctl = (pctl & ~PCI_EXP_DEVCTL_PAYLOAD) |
val2fld(rc_cur, PCI_EXP_DEVCTL_PAYLOAD);
pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
}
/* If less than (allowed, supported), bump endpoint payload */
if (rc_sup > ep_cur) {
ep_cur = rc_sup;
ectl = (ectl & ~PCI_EXP_DEVCTL_PAYLOAD) |
val2fld(ep_cur, PCI_EXP_DEVCTL_PAYLOAD);
pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
}
/*
* Now the Read Request size.
* No field for max supported, but PCIe spec limits it to 4096,
* which is code '5' (log2(4096) - 7)
*/
rc_sup = 5;
if (rc_sup > ((qib_pcie_caps >> 4) & 7))
rc_sup = (qib_pcie_caps >> 4) & 7;
rc_cur = fld2val(pctl, PCI_EXP_DEVCTL_READRQ);
ep_cur = fld2val(ectl, PCI_EXP_DEVCTL_READRQ);
if (rc_sup > rc_cur) {
rc_cur = rc_sup;
pctl = (pctl & ~PCI_EXP_DEVCTL_READRQ) |
val2fld(rc_cur, PCI_EXP_DEVCTL_READRQ);
pci_write_config_word(parent, ppos + PCI_EXP_DEVCTL, pctl);
}
if (rc_sup > ep_cur) {
ep_cur = rc_sup;
ectl = (ectl & ~PCI_EXP_DEVCTL_READRQ) |
val2fld(ep_cur, PCI_EXP_DEVCTL_READRQ);
pci_write_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, ectl);
}
bail:
return ret;
}
/* End of PCIe capability tuning */
/*
* From here through qib_pci_err_handler definition is invoked via
* PCI error infrastructure, registered via pci
*/
static pci_ers_result_t
qib_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
switch (state) {
case pci_channel_io_normal:
qib_devinfo(pdev, "State Normal, ignoring\n");
break;
case pci_channel_io_frozen:
qib_devinfo(pdev, "State Frozen, requesting reset\n");
pci_disable_device(pdev);
ret = PCI_ERS_RESULT_NEED_RESET;
break;
case pci_channel_io_perm_failure:
qib_devinfo(pdev, "State Permanent Failure, disabling\n");
if (dd) {
/* no more register accesses! */
dd->flags &= ~QIB_PRESENT;
qib_disable_after_error(dd);
}
/* else early, or other problem */
ret = PCI_ERS_RESULT_DISCONNECT;
break;
default: /* shouldn't happen */
qib_devinfo(pdev, "QIB PCI errors detected (state %d)\n",
state);
break;
}
return ret;
}
static pci_ers_result_t
qib_pci_mmio_enabled(struct pci_dev *pdev)
{
u64 words = 0U;
struct qib_devdata *dd = pci_get_drvdata(pdev);
pci_ers_result_t ret = PCI_ERS_RESULT_RECOVERED;
if (dd && dd->pport) {
words = dd->f_portcntr(dd->pport, QIBPORTCNTR_WORDRCV);
if (words == ~0ULL)
ret = PCI_ERS_RESULT_NEED_RESET;
}
qib_devinfo(pdev, "QIB mmio_enabled function called, "
"read wordscntr %Lx, returning %d\n", words, ret);
return ret;
}
static pci_ers_result_t
qib_pci_slot_reset(struct pci_dev *pdev)
{
qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
return PCI_ERS_RESULT_CAN_RECOVER;
}
static pci_ers_result_t
qib_pci_link_reset(struct pci_dev *pdev)
{
qib_devinfo(pdev, "QIB link_reset function called, ignored\n");
return PCI_ERS_RESULT_CAN_RECOVER;
}
static void
qib_pci_resume(struct pci_dev *pdev)
{
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_devinfo(pdev, "QIB resume function called\n");
pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
* doing nothing.
*/
qib_init(dd, 1); /* same as re-init after reset */
}
struct pci_error_handlers qib_pci_err_handler = {
.error_detected = qib_pci_error_detected,
.mmio_enabled = qib_pci_mmio_enabled,
.link_reset = qib_pci_link_reset,
.slot_reset = qib_pci_slot_reset,
.resume = qib_pci_resume,
};
|
gpl-2.0
|
corphish/zapdos_condor
|
arch/x86/platform/uv/uv_irq.c
|
4859
|
7095
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* SGI UV IRQ functions
*
* Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <asm/apic.h>
#include <asm/uv/uv_irq.h>
#include <asm/uv/uv_hub.h>
/* MMR offset and pnode of hub sourcing interrupts for a given irq */
struct uv_irq_2_mmr_pnode{
struct rb_node list;
unsigned long offset;
int pnode;
int irq;
};
static DEFINE_SPINLOCK(uv_irq_lock);
static struct rb_root uv_irq_root;
static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
static void uv_noop(struct irq_data *data) { }
static void uv_ack_apic(struct irq_data *data)
{
ack_APIC_irq();
}
static struct irq_chip uv_irq_chip = {
.name = "UV-CORE",
.irq_mask = uv_noop,
.irq_unmask = uv_noop,
.irq_eoi = uv_ack_apic,
.irq_set_affinity = uv_set_irq_affinity,
};
/*
* Add offset and pnode information of the hub sourcing interrupts to the
* rb tree for a specific irq.
*/
static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
{
struct rb_node **link = &uv_irq_root.rb_node;
struct rb_node *parent = NULL;
struct uv_irq_2_mmr_pnode *n;
struct uv_irq_2_mmr_pnode *e;
unsigned long irqflags;
n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
uv_blade_to_memory_nid(blade));
if (!n)
return -ENOMEM;
n->irq = irq;
n->offset = offset;
n->pnode = uv_blade_to_pnode(blade);
spin_lock_irqsave(&uv_irq_lock, irqflags);
/* Find the right place in the rbtree: */
while (*link) {
parent = *link;
e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
if (unlikely(irq == e->irq)) {
/* irq entry exists */
e->pnode = uv_blade_to_pnode(blade);
e->offset = offset;
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
kfree(n);
return 0;
}
if (irq < e->irq)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
/* Insert the node into the rbtree. */
rb_link_node(&n->list, parent, link);
rb_insert_color(&n->list, &uv_irq_root);
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
return 0;
}
/* Retrieve offset and pnode information from the rb tree for a specific irq */
int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
{
struct uv_irq_2_mmr_pnode *e;
struct rb_node *n;
unsigned long irqflags;
spin_lock_irqsave(&uv_irq_lock, irqflags);
n = uv_irq_root.rb_node;
while (n) {
e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
if (e->irq == irq) {
*offset = e->offset;
*pnode = e->pnode;
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
return 0;
}
if (irq < e->irq)
n = n->rb_left;
else
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
return -1;
}
/*
* Re-target the irq to the specified CPU and enable the specified MMR located
* on the specified blade to allow the sending of MSIs to the specified CPU.
*/
static int
arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
const struct cpumask *eligible_cpu = cpumask_of(cpu);
struct irq_cfg *cfg = irq_get_chip_data(irq);
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode, err;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
err = assign_irq_vector(irq, cfg, eligible_cpu);
if (err != 0)
return err;
if (limit == UV_AFFINITY_CPU)
irq_set_status_flags(irq, IRQ_NO_BALANCING);
else
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
irq_name);
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->vector = cfg->vector;
entry->delivery_mode = apic->irq_delivery_mode;
entry->dest_mode = apic->irq_dest_mode;
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
mmr_pnode = uv_blade_to_pnode(mmr_blade);
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
return irq;
}
/*
* Disable the specified MMR located on the specified blade so that MSIs are
* longer allowed to be sent.
*/
static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
{
unsigned long mmr_value;
struct uv_IO_APIC_route_entry *entry;
BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
sizeof(unsigned long));
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->mask = 1;
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
}
static int
uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
struct irq_cfg *cfg = data->chip_data;
unsigned int dest;
unsigned long mmr_value, mmr_offset;
struct uv_IO_APIC_route_entry *entry;
int mmr_pnode;
if (__ioapic_set_affinity(data, mask, &dest))
return -1;
mmr_value = 0;
entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
entry->vector = cfg->vector;
entry->delivery_mode = apic->irq_delivery_mode;
entry->dest_mode = apic->irq_dest_mode;
entry->polarity = 0;
entry->trigger = 0;
entry->mask = 0;
entry->dest = dest;
/* Get previously stored MMR and pnode of hub sourcing interrupts */
if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
return -1;
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
if (cfg->move_in_progress)
send_cleanup_vector(cfg);
return 0;
}
/*
* Set up a mapping of an available irq and vector, and enable the specified
* MMR that defines the MSI that is to be sent to the specified CPU when an
* interrupt is raised.
*/
int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
unsigned long mmr_offset, int limit)
{
int irq, ret;
irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade));
if (irq <= 0)
return -EBUSY;
ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
limit);
if (ret == irq)
uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
else
destroy_irq(irq);
return ret;
}
EXPORT_SYMBOL_GPL(uv_setup_irq);
/*
* Tear down a mapping of an irq and vector, and disable the specified MMR that
* defined the MSI that was to be sent to the specified CPU when an interrupt
* was raised.
*
* Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
*/
void uv_teardown_irq(unsigned int irq)
{
struct uv_irq_2_mmr_pnode *e;
struct rb_node *n;
unsigned long irqflags;
spin_lock_irqsave(&uv_irq_lock, irqflags);
n = uv_irq_root.rb_node;
while (n) {
e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
if (e->irq == irq) {
arch_disable_uv_irq(e->pnode, e->offset);
rb_erase(n, &uv_irq_root);
kfree(e);
break;
}
if (irq < e->irq)
n = n->rb_left;
else
n = n->rb_right;
}
spin_unlock_irqrestore(&uv_irq_lock, irqflags);
destroy_irq(irq);
}
EXPORT_SYMBOL_GPL(uv_teardown_irq);
|
gpl-2.0
|
Eagerestwolf/android_kernel_huawei_y301a1
|
drivers/staging/sbe-2t3e3/intr.c
|
4859
|
18875
|
/*
* SBE 2T3E3 synchronous serial card driver for Linux
*
* Copyright (C) 2009-2010 Krzysztof Halasa <khc@pm.waw.pl>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This code is based on a driver written by SBE Inc.
*/
#include <linux/hdlc.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include "2t3e3.h"
irqreturn_t t3e3_intr(int irq, void *dev_instance)
{
struct channel *sc = dev_to_priv(dev_instance);
u32 val;
irqreturn_t ret = IRQ_NONE;
sc->interrupt_active = 1;
val = cpld_read(sc, SBE_2T3E3_CPLD_REG_PICSR);
if (val & SBE_2T3E3_CPLD_VAL_RECEIVE_LOSS_OF_SIGNAL_CHANGE) {
dev_dbg(&sc->pdev->dev,
"Rx LOS Chng Int r=%02x (LOS|OOF=%02x)\n",
val, (sc->s.LOS << 4) | sc->s.OOF);
cpld_LOS_update(sc);
ret = IRQ_HANDLED;
}
if (val & SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_ETHERNET_ASSERTED) {
dc_intr(sc);
ret = IRQ_HANDLED;
}
if (val & SBE_2T3E3_CPLD_VAL_INTERRUPT_FROM_FRAMER_ASSERTED) {
exar7250_intr(sc);
ret = IRQ_HANDLED;
}
/*
we don't care about other interrupt sources (DMO, LOS, LCV) because
they are handled by Framer too
*/
sc->interrupt_active = 0;
return ret;
}
void dc_intr(struct channel *sc)
{
u32 val;
/* disable ethernet interrupts */
/* grrr this clears interrupt summary bits !!! */
dc_write(sc->addr, SBE_2T3E3_21143_REG_INTERRUPT_ENABLE, 0);
while ((val = dc_read(sc->addr, SBE_2T3E3_21143_REG_STATUS)) &
(SBE_2T3E3_21143_VAL_RECEIVE_PROCESS_STOPPED |
SBE_2T3E3_21143_VAL_RECEIVE_BUFFER_UNAVAILABLE |
SBE_2T3E3_21143_VAL_RECEIVE_INTERRUPT |
SBE_2T3E3_21143_VAL_TRANSMIT_UNDERFLOW |
SBE_2T3E3_21143_VAL_TRANSMIT_BUFFER_UNAVAILABLE |
SBE_2T3E3_21143_VAL_TRANSMIT_PROCESS_STOPPED |
SBE_2T3E3_21143_VAL_TRANSMIT_INTERRUPT)) {
dc_write(sc->addr, SBE_2T3E3_21143_REG_STATUS, val);
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Ethernet controller interrupt! (CSR5 = %08X)\n",
val);
if (val & (SBE_2T3E3_21143_VAL_RECEIVE_INTERRUPT |
SBE_2T3E3_21143_VAL_RECEIVE_BUFFER_UNAVAILABLE |
SBE_2T3E3_21143_VAL_RECEIVE_PROCESS_STOPPED)) {
if (val & SBE_2T3E3_21143_VAL_RECEIVE_INTERRUPT)
dev_dbg(&sc->pdev->dev,
"Receive interrupt (LOS=%d, OOF=%d)\n",
sc->s.LOS, sc->s.OOF);
if (val & SBE_2T3E3_21143_VAL_RECEIVE_BUFFER_UNAVAILABLE)
dev_dbg(&sc->pdev->dev,
"Receive buffer unavailable\n");
if (val & SBE_2T3E3_21143_VAL_RECEIVE_PROCESS_STOPPED)
dev_dbg(&sc->pdev->dev,
"Receive process stopped\n");
dc_intr_rx(sc);
}
if (val & SBE_2T3E3_21143_VAL_TRANSMIT_UNDERFLOW) {
dev_dbg(&sc->pdev->dev, "Transmit underflow\n");
dc_intr_tx_underflow(sc);
}
if (val & (SBE_2T3E3_21143_VAL_TRANSMIT_BUFFER_UNAVAILABLE |
SBE_2T3E3_21143_VAL_TRANSMIT_INTERRUPT |
SBE_2T3E3_21143_VAL_TRANSMIT_PROCESS_STOPPED)) {
if (val & SBE_2T3E3_21143_VAL_TRANSMIT_INTERRUPT)
dev_dbg(&sc->pdev->dev, "Transmit interrupt\n");
if (val & SBE_2T3E3_21143_VAL_TRANSMIT_BUFFER_UNAVAILABLE)
dev_dbg(&sc->pdev->dev,
"Transmit buffer unavailable\n");
if (val & SBE_2T3E3_21143_VAL_TRANSMIT_PROCESS_STOPPED)
dev_dbg(&sc->pdev->dev,
"Transmit process stopped\n");
dc_intr_tx(sc);
}
}
/* enable ethernet interrupts */
dc_write(sc->addr, SBE_2T3E3_21143_REG_INTERRUPT_ENABLE,
sc->ether.interrupt_enable_mask);
}
void dc_intr_rx(struct channel *sc)
{
u32 current_read;
u32 error_mask, error;
t3e3_rx_desc_t *current_desc;
struct sk_buff *m, *m2;
unsigned rcv_len;
sc->rcv_count++; /* for the activity LED */
current_read = sc->ether.rx_ring_current_read;
dev_dbg(&sc->pdev->dev, "intr_rx current_read = %d\n", current_read);
/* when ethernet loopback is set, ignore framer signals */
if ((sc->p.loopback != SBE_2T3E3_LOOPBACK_ETHERNET) && sc->s.OOF) {
while (!(sc->ether.rx_ring[current_read].rdes0 &
SBE_2T3E3_RX_DESC_21143_OWN)) {
current_desc = &sc->ether.rx_ring[current_read];
current_desc->rdes1 &= SBE_2T3E3_RX_DESC_END_OF_RING |
SBE_2T3E3_RX_DESC_SECOND_ADDRESS_CHAINED;
current_desc->rdes1 |= SBE_2T3E3_MTU;
current_desc->rdes0 = SBE_2T3E3_RX_DESC_21143_OWN;
current_read = (current_read + 1) % SBE_2T3E3_RX_DESC_RING_SIZE;
}
sc->ether.rx_ring_current_read = current_read;
return;
}
while (!(sc->ether.rx_ring[current_read].rdes0 &
SBE_2T3E3_RX_DESC_21143_OWN)) {
current_desc = &sc->ether.rx_ring[current_read];
dev_dbg(&sc->pdev->dev, "rdes0: %08X rdes1: %08X\n",
current_desc->rdes0, current_desc->rdes1);
m = sc->ether.rx_data[current_read];
rcv_len = (current_desc->rdes0 & SBE_2T3E3_RX_DESC_FRAME_LENGTH) >>
SBE_2T3E3_RX_DESC_FRAME_LENGTH_SHIFT;
dev_dbg(&sc->pdev->dev, "mbuf was received (mbuf len = %d)\n",
rcv_len);
switch (sc->p.crc) {
case SBE_2T3E3_CRC_16:
rcv_len -= SBE_2T3E3_CRC16_LENGTH;
break;
case SBE_2T3E3_CRC_32:
rcv_len -= SBE_2T3E3_CRC32_LENGTH;
break;
default:
break;
}
if (current_desc->rdes0 & SBE_2T3E3_RX_DESC_LAST_DESC) {
/* TODO: is collision possible? */
error_mask = SBE_2T3E3_RX_DESC_DESC_ERROR |
SBE_2T3E3_RX_DESC_COLLISION_SEEN |
SBE_2T3E3_RX_DESC_DRIBBLING_BIT;
switch (sc->p.frame_mode) {
case SBE_2T3E3_FRAME_MODE_HDLC:
error_mask |= SBE_2T3E3_RX_DESC_MII_ERROR;
if (sc->p.crc == SBE_2T3E3_CRC_32)
error_mask |= SBE_2T3E3_RX_DESC_CRC_ERROR;
break;
case SBE_2T3E3_FRAME_MODE_TRANSPARENT:
case SBE_2T3E3_FRAME_MODE_RAW:
break;
default:
error_mask = 0;
}
if (sc->s.LOS) {
error_mask &= ~(SBE_2T3E3_RX_DESC_DRIBBLING_BIT |
SBE_2T3E3_RX_DESC_MII_ERROR);
}
error = current_desc->rdes0 & error_mask;
if (error) {
sc->s.in_errors++;
dev_dbg(&sc->pdev->dev,
"error interrupt: NO_ERROR_MESSAGE = %d\n",
sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES ? 1 : 0);
current_desc->rdes1 &= SBE_2T3E3_RX_DESC_END_OF_RING |
SBE_2T3E3_RX_DESC_SECOND_ADDRESS_CHAINED;
current_desc->rdes1 |= SBE_2T3E3_MTU;
current_desc->rdes0 = SBE_2T3E3_RX_DESC_21143_OWN;
if (error & SBE_2T3E3_RX_DESC_DESC_ERROR) {
if (!(sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES))
dev_err(&sc->pdev->dev,
"SBE 2T3E3: descriptor error\n");
sc->s.in_error_desc++;
}
if (error & SBE_2T3E3_RX_DESC_COLLISION_SEEN) {
if (!(sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES))
dev_err(&sc->pdev->dev,
"SBE 2T3E3: collision seen\n");
sc->s.in_error_coll++;
} else {
if (error & SBE_2T3E3_RX_DESC_DRIBBLING_BIT) {
if (!(sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES))
dev_err(&sc->pdev->dev,
"SBE 2T3E3: dribbling bits error\n");
sc->s.in_error_drib++;
}
if (error & SBE_2T3E3_RX_DESC_CRC_ERROR) {
if (!(sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES))
dev_err(&sc->pdev->dev,
"SBE 2T3E3: crc error\n");
sc->s.in_error_crc++;
}
}
if (error & SBE_2T3E3_RX_DESC_MII_ERROR) {
if (!(sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES))
dev_err(&sc->pdev->dev, "SBE 2T3E3: mii error\n");
sc->s.in_error_mii++;
}
current_read = (current_read + 1) % SBE_2T3E3_RX_DESC_RING_SIZE;
sc->r.flags |= SBE_2T3E3_FLAG_NO_ERROR_MESSAGES;
continue;
}
}
current_desc->rdes1 &= SBE_2T3E3_RX_DESC_END_OF_RING |
SBE_2T3E3_RX_DESC_SECOND_ADDRESS_CHAINED;
current_desc->rdes1 |= SBE_2T3E3_MTU;
if (rcv_len > 1600) {
sc->s.in_errors++;
sc->s.in_dropped++;
if (!(sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES))
dev_err(&sc->pdev->dev, "SBE 2T3E3: oversized rx: rdes0 = %08X\n",
current_desc->rdes0);
} else {
m2 = dev_alloc_skb(MCLBYTES);
if (m2 != NULL) {
current_desc->rdes2 = virt_to_phys(m2->data);
sc->ether.rx_data[current_read] = m2;
sc->s.in_packets++;
sc->s.in_bytes += rcv_len;
m->dev = sc->dev;
skb_put(m, rcv_len);
skb_reset_mac_header(m);
m->protocol = hdlc_type_trans(m, m->dev);
netif_rx(m);
/* good packet was received so we will show error messages again... */
if (sc->r.flags & SBE_2T3E3_FLAG_NO_ERROR_MESSAGES) {
dev_dbg(&sc->pdev->dev,
"setting ERROR_MESSAGES->0\n");
sc->r.flags &= ~SBE_2T3E3_FLAG_NO_ERROR_MESSAGES;
}
} else {
sc->s.in_errors++;
sc->s.in_dropped++;
}
}
current_desc->rdes0 = SBE_2T3E3_RX_DESC_21143_OWN;
current_read = (current_read + 1) % SBE_2T3E3_RX_DESC_RING_SIZE;
}
sc->ether.rx_ring_current_read = current_read;
dc_write(sc->addr, SBE_2T3E3_21143_REG_RECEIVE_POLL_DEMAND, 0xFFFFFFFF);
}
void dc_intr_tx(struct channel *sc)
{
u32 current_read, current_write;
u32 last_segment, error;
t3e3_tx_desc_t *current_desc;
spin_lock(&sc->ether.tx_lock);
current_read = sc->ether.tx_ring_current_read;
current_write = sc->ether.tx_ring_current_write;
while (current_read != current_write) {
current_desc = &sc->ether.tx_ring[current_read];
if (current_desc->tdes0 & SBE_2T3E3_RX_DESC_21143_OWN)
break;
dev_dbg(&sc->pdev->dev,
"txeof: tdes0 = %08X tdes1 = %08X\n",
current_desc->tdes0, current_desc->tdes1);
error = current_desc->tdes0 & (SBE_2T3E3_TX_DESC_ERROR_SUMMARY |
SBE_2T3E3_TX_DESC_TRANSMIT_JABBER_TIMEOUT |
SBE_2T3E3_TX_DESC_LOSS_OF_CARRIER |
SBE_2T3E3_TX_DESC_NO_CARRIER |
SBE_2T3E3_TX_DESC_LINK_FAIL_REPORT |
SBE_2T3E3_TX_DESC_UNDERFLOW_ERROR |
SBE_2T3E3_TX_DESC_DEFFERED);
last_segment = current_desc->tdes1 & SBE_2T3E3_TX_DESC_LAST_SEGMENT;
current_desc->tdes0 = 0;
current_desc->tdes1 &= SBE_2T3E3_TX_DESC_END_OF_RING |
SBE_2T3E3_TX_DESC_SECOND_ADDRESS_CHAINED;
current_desc->tdes2 = 0;
sc->ether.tx_free_cnt++;
if (last_segment != SBE_2T3E3_TX_DESC_LAST_SEGMENT) {
current_read = (current_read + 1) % SBE_2T3E3_TX_DESC_RING_SIZE;
continue;
}
if (sc->ether.tx_data[current_read]) {
sc->s.out_packets++;
sc->s.out_bytes += sc->ether.tx_data[current_read]->len;
dev_kfree_skb_any(sc->ether.tx_data[current_read]);
sc->ether.tx_data[current_read] = NULL;
}
if (error > 0) {
sc->s.out_errors++;
if (error & SBE_2T3E3_TX_DESC_TRANSMIT_JABBER_TIMEOUT) {
dev_err(&sc->pdev->dev, "SBE 2T3E3: transmit jabber timeout\n");
sc->s.out_error_jab++;
}
if (sc->p.loopback != SBE_2T3E3_LOOPBACK_ETHERNET) {
if (error & SBE_2T3E3_TX_DESC_LOSS_OF_CARRIER) {
dev_err(&sc->pdev->dev, "SBE 2T3E3: loss of carrier\n");
sc->s.out_error_lost_carr++;
}
if (error & SBE_2T3E3_TX_DESC_NO_CARRIER) {
dev_err(&sc->pdev->dev, "SBE 2T3E3: no carrier\n");
sc->s.out_error_no_carr++;
}
}
if (error & SBE_2T3E3_TX_DESC_LINK_FAIL_REPORT) {
dev_err(&sc->pdev->dev, "SBE 2T3E3: link fail report\n");
sc->s.out_error_link_fail++;
}
if (error & SBE_2T3E3_TX_DESC_UNDERFLOW_ERROR) {
dev_err(&sc->pdev->dev, "SBE 2T3E3:"
" transmission underflow error\n");
sc->s.out_error_underflow++;
spin_unlock(&sc->ether.tx_lock);
dc_restart(sc);
return;
}
if (error & SBE_2T3E3_TX_DESC_DEFFERED) {
dev_err(&sc->pdev->dev, "SBE 2T3E3: transmission deferred\n");
sc->s.out_error_dereferred++;
}
}
current_read = (current_read + 1) % SBE_2T3E3_TX_DESC_RING_SIZE;
}
sc->ether.tx_ring_current_read = current_read;
/* Relieve flow control when the TX queue is drained at least half way */
if (sc->ether.tx_full &&
(sc->ether.tx_free_cnt >= (SBE_2T3E3_TX_DESC_RING_SIZE / 2))) {
sc->ether.tx_full = 0;
netif_wake_queue(sc->dev);
}
spin_unlock(&sc->ether.tx_lock);
}
void dc_intr_tx_underflow(struct channel *sc)
{
u32 val;
dc_transmitter_onoff(sc, SBE_2T3E3_OFF);
val = dc_read(sc->addr, SBE_2T3E3_21143_REG_OPERATION_MODE);
dc_clear_bits(sc->addr, SBE_2T3E3_21143_REG_OPERATION_MODE,
SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS);
switch (val & SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS) {
case SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_1:
dc_set_bits(sc->addr, SBE_2T3E3_21143_REG_OPERATION_MODE,
SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_2);
break;
case SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_2:
dc_set_bits(sc->addr, SBE_2T3E3_21143_REG_OPERATION_MODE,
SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_3);
break;
case SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_3:
dc_set_bits(sc->addr, SBE_2T3E3_21143_REG_OPERATION_MODE,
SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_4);
break;
case SBE_2T3E3_21143_VAL_THRESHOLD_CONTROL_BITS_4:
default:
dc_set_bits(sc->addr, SBE_2T3E3_21143_REG_OPERATION_MODE,
SBE_2T3E3_21143_VAL_STORE_AND_FORWARD);
break;
}
dc_transmitter_onoff(sc, SBE_2T3E3_ON);
}
void exar7250_intr(struct channel *sc)
{
u32 status, old_OOF;
#if 0
/* disable interrupts */
exar7250_write(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_ENABLE, 0);
#endif
old_OOF = sc->s.OOF;
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_STATUS);
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Framer interrupt! (REG[0x05] = %02X)\n", status);
switch (sc->p.frame_type) {
case SBE_2T3E3_FRAME_TYPE_E3_G751:
case SBE_2T3E3_FRAME_TYPE_E3_G832:
exar7250_E3_intr(sc, status);
break;
case SBE_2T3E3_FRAME_TYPE_T3_CBIT:
case SBE_2T3E3_FRAME_TYPE_T3_M13:
exar7250_T3_intr(sc, status);
break;
default:
break;
}
if (sc->s.OOF != old_OOF) {
if (sc->s.OOF) {
if (sc->p.loopback == SBE_2T3E3_LOOPBACK_NONE) {
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Disabling eth interrupts\n");
/* turn off ethernet interrupts */
dc_stop_intr(sc);
}
} else if (sc->r.flags & SBE_2T3E3_FLAG_NETWORK_UP) {
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Enabling eth interrupts\n");
/* start interrupts */
sc->s.OOF = 1;
dc_intr_rx(sc);
sc->s.OOF = 0;
if (sc->p.receiver_on) {
dc_receiver_onoff(sc, SBE_2T3E3_OFF);
dc_receiver_onoff(sc, SBE_2T3E3_ON);
}
dc_start_intr(sc);
}
}
#if 0
/* reenable interrupts */
exar7250_write(sc, SBE_2T3E3_FRAMER_REG_BLOCK_INTERRUPT_ENABLE,
SBE_2T3E3_FRAMER_VAL_RX_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_TX_INTERRUPT_ENABLE
);
#endif
}
void exar7250_T3_intr(struct channel *sc, u32 block_status)
{
u32 status, result;
if (block_status & SBE_2T3E3_FRAMER_VAL_RX_INTERRUPT_STATUS) {
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_STATUS);
if (status) {
dev_dbg(&sc->pdev->dev,
"Framer interrupt T3 RX (REG[0x13] = %02X)\n",
status);
result = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_CONFIGURATION_STATUS);
#if 0
if (status & SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_STATUS) {
dev_dbg(&sc->pdev->dev,
"Framer interrupt T3: LOS\n");
sc->s.LOS = result & SBE_2T3E3_FRAMER_VAL_T3_RX_LOS ? 1 : 0;
}
#else
cpld_LOS_update(sc);
#endif
if (status & SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_STATUS) {
sc->s.OOF = result & SBE_2T3E3_FRAMER_VAL_T3_RX_OOF ? 1 : 0;
dev_dbg(&sc->pdev->dev,
"Framer interrupt T3: OOF (%d)\n",
sc->s.OOF);
}
exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_INTERRUPT_ENABLE,
SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_ENABLE);
#if 0
SBE_2T3E3_FRAMER_VAL_T3_RX_CP_BIT_ERROR_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_LOS_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_AIS_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_IDLE_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_FERF_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_AIC_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_OOF_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_P_BIT_INTERRUPT_ENABLE
#endif
}
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS);
if (status) {
dev_dbg(&sc->pdev->dev,
"Framer interrupt T3 RX (REG[0x17] = %02X)\n",
status);
#if 0
exar7250_write(sc, SBE_2T3E3_FRAMER_REG_T3_RX_FEAC_INTERRUPT_ENABLE_STATUS,
SBE_2T3E3_FRAMER_VAL_T3_RX_FEAC_REMOVE_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_T3_RX_FEAC_VALID_INTERRUPT_ENABLE
);
#endif
}
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_RX_LAPD_CONTROL);
if (status)
dev_dbg(&sc->pdev->dev,
"Framer interrupt T3 RX (REG[0x18] = %02X)\n",
status);
}
if (block_status & SBE_2T3E3_FRAMER_VAL_TX_INTERRUPT_STATUS) {
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_TX_FEAC_CONFIGURATION_STATUS);
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Framer interrupt T3 TX (REG[0x31] = %02X)\n",
status);
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_T3_TX_LAPD_STATUS);
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Framer interrupt T3 TX (REG[0x34] = %02X)\n",
status);
}
}
void exar7250_E3_intr(struct channel *sc, u32 block_status)
{
u32 status, result;
if (block_status & SBE_2T3E3_FRAMER_VAL_RX_INTERRUPT_STATUS) {
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_1);
if (status) {
dev_dbg(&sc->pdev->dev,
"Framer interrupt E3 RX (REG[0x14] = %02X)\n",
status);
result = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_CONFIGURATION_STATUS_2);
#if 0
if (status & SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_STATUS) {
dev_dbg(&sc->pdev->dev,
"Framer interrupt E3: LOS\n");
sc->s.LOS = result & SBE_2T3E3_FRAMER_VAL_E3_RX_LOS ? 1 : 0;
}
#else
cpld_LOS_update(sc);
#endif
if (status & SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_STATUS) {
sc->s.OOF = result & SBE_2T3E3_FRAMER_VAL_E3_RX_OOF ? 1 : 0;
dev_dbg(&sc->pdev->dev,
"Framer interrupt E3: OOF (%d)\n",
sc->s.OOF);
}
exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_1,
SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_ENABLE
);
#if 0
SBE_2T3E3_FRAMER_VAL_E3_RX_COFA_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_OOF_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_LOF_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_LOS_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_AIS_INTERRUPT_ENABLE
#endif
}
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_STATUS_2);
if (status) {
dev_dbg(&sc->pdev->dev,
"Framer interrupt E3 RX (REG[0x15] = %02X)\n",
status);
#if 0
exar7250_write(sc, SBE_2T3E3_FRAMER_REG_E3_RX_INTERRUPT_ENABLE_2,
SBE_2T3E3_FRAMER_VAL_E3_RX_FEBE_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_FERF_INTERRUPT_ENABLE |
SBE_2T3E3_FRAMER_VAL_E3_RX_FRAMING_BYTE_ERROR_INTERRUPT_ENABLE);
#endif
}
}
if (block_status & SBE_2T3E3_FRAMER_VAL_TX_INTERRUPT_STATUS) {
status = exar7250_read(sc, SBE_2T3E3_FRAMER_REG_E3_TX_LAPD_STATUS);
dev_dbg(&sc->pdev->dev, "SBE 2T3E3: Framer interrupt E3 TX (REG[0x34] = %02X)\n",
status);
}
}
|
gpl-2.0
|
GeeteshKhatavkar/gh0st_kernel_samsung_royxx
|
drivers/media/rc/keymaps/rc-msi-digivox-ii.c
|
4859
|
1971
|
/*
* MSI DIGIVOX mini II remote controller keytable
*
* Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <media/rc-map.h>
#include <linux/module.h>
static struct rc_map_table msi_digivox_ii[] = {
{ 0x0002, KEY_2 },
{ 0x0003, KEY_UP }, /* up */
{ 0x0004, KEY_3 },
{ 0x0005, KEY_CHANNELDOWN },
{ 0x0008, KEY_5 },
{ 0x0009, KEY_0 },
{ 0x000b, KEY_8 },
{ 0x000d, KEY_DOWN }, /* down */
{ 0x0010, KEY_9 },
{ 0x0011, KEY_7 },
{ 0x0014, KEY_VOLUMEUP },
{ 0x0015, KEY_CHANNELUP },
{ 0x0016, KEY_OK },
{ 0x0017, KEY_POWER2 },
{ 0x001a, KEY_1 },
{ 0x001c, KEY_4 },
{ 0x001d, KEY_6 },
{ 0x001f, KEY_VOLUMEDOWN },
};
static struct rc_map_list msi_digivox_ii_map = {
.map = {
.scan = msi_digivox_ii,
.size = ARRAY_SIZE(msi_digivox_ii),
.rc_type = RC_TYPE_NEC,
.name = RC_MAP_MSI_DIGIVOX_II,
}
};
static int __init init_rc_map_msi_digivox_ii(void)
{
return rc_map_register(&msi_digivox_ii_map);
}
static void __exit exit_rc_map_msi_digivox_ii(void)
{
rc_map_unregister(&msi_digivox_ii_map);
}
module_init(init_rc_map_msi_digivox_ii)
module_exit(exit_rc_map_msi_digivox_ii)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
|
gpl-2.0
|
NooNameR/k2.6.35.14-ICS-
|
drivers/isdn/pcbit/module.c
|
5115
|
2362
|
/*
* PCBIT-D module support
*
* Copyright (C) 1996 Universidade de Lisboa
*
* Written by Pedro Roque Marques (roque@di.fc.ul.pt)
*
* This software may be used and distributed according to the terms of
* the GNU General Public License, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/isdnif.h>
#include "pcbit.h"
MODULE_DESCRIPTION("ISDN4Linux: Driver for PCBIT-T card");
MODULE_AUTHOR("Pedro Roque Marques");
MODULE_LICENSE("GPL");
static int mem[MAX_PCBIT_CARDS];
static int irq[MAX_PCBIT_CARDS];
module_param_array(mem, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
static int num_boards;
struct pcbit_dev * dev_pcbit[MAX_PCBIT_CARDS];
static int __init pcbit_init(void)
{
int board;
num_boards = 0;
printk(KERN_NOTICE
"PCBIT-D device driver v 0.5-fjpc0 19991204 - "
"Copyright (C) 1996 Universidade de Lisboa\n");
if (mem[0] || irq[0])
{
for (board=0; board < MAX_PCBIT_CARDS && mem[board] && irq[board]; board++)
{
if (!mem[board])
mem[board] = 0xD0000;
if (!irq[board])
irq[board] = 5;
if (pcbit_init_dev(board, mem[board], irq[board]) == 0)
num_boards++;
else
{
printk(KERN_WARNING
"pcbit_init failed for dev %d",
board + 1);
return -EIO;
}
}
}
/* Hardcoded default settings detection */
if (!num_boards)
{
printk(KERN_INFO
"Trying to detect board using default settings\n");
if (pcbit_init_dev(0, 0xD0000, 5) == 0)
num_boards++;
else
return -EIO;
}
return 0;
}
static void __exit pcbit_exit(void)
{
#ifdef MODULE
int board;
for (board = 0; board < num_boards; board++)
pcbit_terminate(board);
printk(KERN_NOTICE
"PCBIT-D module unloaded\n");
#endif
}
#ifndef MODULE
#define MAX_PARA (MAX_PCBIT_CARDS * 2)
static int __init pcbit_setup(char *line)
{
int i, j, argc;
char *str;
int ints[MAX_PARA+1];
str = get_options(line, MAX_PARA, ints);
argc = ints[0];
i = 0;
j = 1;
while (argc && (i<MAX_PCBIT_CARDS)) {
if (argc) {
mem[i] = ints[j];
j++; argc--;
}
if (argc) {
irq[i] = ints[j];
j++; argc--;
}
i++;
}
return(1);
}
__setup("pcbit=", pcbit_setup);
#endif
module_init(pcbit_init);
module_exit(pcbit_exit);
|
gpl-2.0
|
wuxianlin/android_kernel_zte_pluto
|
drivers/s390/char/sclp_ocf.c
|
7931
|
3636
|
/*
* drivers/s390/char/sclp_ocf.c
* SCLP OCF communication parameters sysfs interface
*
* Copyright IBM Corp. 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#define KMSG_COMPONENT "sclp_ocf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/kmod.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <asm/ebcdic.h>
#include <asm/sclp.h>
#include "sclp.h"
#define OCF_LENGTH_HMC_NETWORK 8UL
#define OCF_LENGTH_CPC_NAME 8UL
static char hmc_network[OCF_LENGTH_HMC_NETWORK + 1];
static char cpc_name[OCF_LENGTH_CPC_NAME + 1];
static DEFINE_SPINLOCK(sclp_ocf_lock);
static struct work_struct sclp_ocf_change_work;
static struct kset *ocf_kset;
static void sclp_ocf_change_notify(struct work_struct *work)
{
kobject_uevent(&ocf_kset->kobj, KOBJ_CHANGE);
}
/* Handler for OCF event. Look for the CPC image name. */
static void sclp_ocf_handler(struct evbuf_header *evbuf)
{
struct gds_vector *v;
struct gds_subvector *sv, *netid, *cpc;
size_t size;
/* Find the 0x9f00 block. */
v = sclp_find_gds_vector(evbuf + 1, (void *) evbuf + evbuf->length,
0x9f00);
if (!v)
return;
/* Find the 0x9f22 block inside the 0x9f00 block. */
v = sclp_find_gds_vector(v + 1, (void *) v + v->length, 0x9f22);
if (!v)
return;
/* Find the 0x81 block inside the 0x9f22 block. */
sv = sclp_find_gds_subvector(v + 1, (void *) v + v->length, 0x81);
if (!sv)
return;
/* Find the 0x01 block inside the 0x81 block. */
netid = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 1);
/* Find the 0x02 block inside the 0x81 block. */
cpc = sclp_find_gds_subvector(sv + 1, (void *) sv + sv->length, 2);
/* Copy network name and cpc name. */
spin_lock(&sclp_ocf_lock);
if (netid) {
size = min(OCF_LENGTH_HMC_NETWORK, (size_t) netid->length);
memcpy(hmc_network, netid + 1, size);
EBCASC(hmc_network, size);
hmc_network[size] = 0;
}
if (cpc) {
size = min(OCF_LENGTH_CPC_NAME, (size_t) cpc->length);
memcpy(cpc_name, cpc + 1, size);
EBCASC(cpc_name, size);
cpc_name[size] = 0;
}
spin_unlock(&sclp_ocf_lock);
schedule_work(&sclp_ocf_change_work);
}
static struct sclp_register sclp_ocf_event = {
.receive_mask = EVTYP_OCF_MASK,
.receiver_fn = sclp_ocf_handler,
};
static ssize_t cpc_name_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", cpc_name);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute cpc_name_attr =
__ATTR(cpc_name, 0444, cpc_name_show, NULL);
static ssize_t hmc_network_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
int rc;
spin_lock_irq(&sclp_ocf_lock);
rc = snprintf(page, PAGE_SIZE, "%s\n", hmc_network);
spin_unlock_irq(&sclp_ocf_lock);
return rc;
}
static struct kobj_attribute hmc_network_attr =
__ATTR(hmc_network, 0444, hmc_network_show, NULL);
static struct attribute *ocf_attrs[] = {
&cpc_name_attr.attr,
&hmc_network_attr.attr,
NULL,
};
static struct attribute_group ocf_attr_group = {
.attrs = ocf_attrs,
};
static int __init ocf_init(void)
{
int rc;
INIT_WORK(&sclp_ocf_change_work, sclp_ocf_change_notify);
ocf_kset = kset_create_and_add("ocf", NULL, firmware_kobj);
if (!ocf_kset)
return -ENOMEM;
rc = sysfs_create_group(&ocf_kset->kobj, &ocf_attr_group);
if (rc) {
kset_unregister(ocf_kset);
return rc;
}
return sclp_register(&sclp_ocf_event);
}
device_initcall(ocf_init);
|
gpl-2.0
|
aznrice/android_kernel_samsung_afyonltetmo
|
arch/powerpc/perf/mpc7450-pmu.c
|
7931
|
10402
|
/*
* Performance counter support for MPC7450-family processors.
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/string.h>
#include <linux/perf_event.h>
#include <asm/reg.h>
#include <asm/cputable.h>
#define N_COUNTER 6 /* Number of hardware counters */
#define MAX_ALT 3 /* Maximum number of event alternative codes */
/*
* Bits in event code for MPC7450 family
*/
#define PM_THRMULT_MSKS 0x40000
#define PM_THRESH_SH 12
#define PM_THRESH_MSK 0x3f
#define PM_PMC_SH 8
#define PM_PMC_MSK 7
#define PM_PMCSEL_MSK 0x7f
/*
* Classify events according to how specific their PMC requirements are.
* Result is:
* 0: can go on any PMC
* 1: can go on PMCs 1-4
* 2: can go on PMCs 1,2,4
* 3: can go on PMCs 1 or 2
* 4: can only go on one PMC
* -1: event code is invalid
*/
#define N_CLASSES 5
static int mpc7450_classify_event(u32 event)
{
int pmc;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
if (pmc > N_COUNTER)
return -1;
return 4;
}
event &= PM_PMCSEL_MSK;
if (event <= 1)
return 0;
if (event <= 7)
return 1;
if (event <= 13)
return 2;
if (event <= 22)
return 3;
return -1;
}
/*
* Events using threshold and possible threshold scale:
* code scale? name
* 11e N PM_INSTQ_EXCEED_CYC
* 11f N PM_ALTV_IQ_EXCEED_CYC
* 128 Y PM_DTLB_SEARCH_EXCEED_CYC
* 12b Y PM_LD_MISS_EXCEED_L1_CYC
* 220 N PM_CQ_EXCEED_CYC
* 30c N PM_GPR_RB_EXCEED_CYC
* 30d ? PM_FPR_IQ_EXCEED_CYC ?
* 311 Y PM_ITLB_SEARCH_EXCEED
* 410 N PM_GPR_IQ_EXCEED_CYC
*/
/*
* Return use of threshold and threshold scale bits:
* 0 = uses neither, 1 = uses threshold, 2 = uses both
*/
static int mpc7450_threshold_use(u32 event)
{
int pmc, sel;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
sel = event & PM_PMCSEL_MSK;
switch (pmc) {
case 1:
if (sel == 0x1e || sel == 0x1f)
return 1;
if (sel == 0x28 || sel == 0x2b)
return 2;
break;
case 2:
if (sel == 0x20)
return 1;
break;
case 3:
if (sel == 0xc || sel == 0xd)
return 1;
if (sel == 0x11)
return 2;
break;
case 4:
if (sel == 0x10)
return 1;
break;
}
return 0;
}
/*
* Layout of constraint bits:
* 33222222222211111111110000000000
* 10987654321098765432109876543210
* |< >< > < > < ><><><><><><>
* TS TV G4 G3 G2P6P5P4P3P2P1
*
* P1 - P6
* 0 - 11: Count of events needing PMC1 .. PMC6
*
* G2
* 12 - 14: Count of events needing PMC1 or PMC2
*
* G3
* 16 - 18: Count of events needing PMC1, PMC2 or PMC4
*
* G4
* 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
*
* TV
* 24 - 29: Threshold value requested
*
* TS
* 30: Threshold scale value requested
*/
static u32 pmcbits[N_COUNTER][2] = {
{ 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
{ 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
{ 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
{ 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
{ 0x00000200, 0x00000100 }, /* PMC5: P5 */
{ 0x00000800, 0x00000400 } /* PMC6: P6 */
};
static u32 classbits[N_CLASSES - 1][2] = {
{ 0x00000000, 0x00000000 }, /* class 0: no constraint */
{ 0x00800000, 0x00100000 }, /* class 1: G4 */
{ 0x00040000, 0x00010000 }, /* class 2: G3 */
{ 0x00004000, 0x00001000 }, /* class 3: G2 */
};
static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
unsigned long *valp)
{
int pmc, class;
u32 mask, value;
int thresh, tuse;
class = mpc7450_classify_event(event);
if (class < 0)
return -1;
if (class == 4) {
pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
mask = pmcbits[pmc - 1][0];
value = pmcbits[pmc - 1][1];
} else {
mask = classbits[class][0];
value = classbits[class][1];
}
tuse = mpc7450_threshold_use(event);
if (tuse) {
thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
mask |= 0x3f << 24;
value |= thresh << 24;
if (tuse == 2) {
mask |= 0x40000000;
if ((unsigned int)event & PM_THRMULT_MSKS)
value |= 0x40000000;
}
}
*maskp = mask;
*valp = value;
return 0;
}
static const unsigned int event_alternatives[][MAX_ALT] = {
{ 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
{ 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
{ 0x502, 0x602 }, /* PM_L2_HIT */
{ 0x503, 0x603 }, /* PM_L3_HIT */
{ 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
{ 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
{ 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
{ 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
{ 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
{ 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
{ 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
{ 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
{ 0x512, 0x612 }, /* PM_INT_LOCAL */
{ 0x513, 0x61d }, /* PM_L2_MISS */
{ 0x514, 0x61e }, /* PM_L3_MISS */
};
/*
* Scan the alternatives table for a match and return the
* index into the alternatives table if found, else -1.
*/
static int find_alternative(u32 event)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
if (event < event_alternatives[i][0])
break;
for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
if (event == event_alternatives[i][j])
return i;
}
return -1;
}
static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int i, j, nalt = 1;
u32 ae;
alt[0] = event;
nalt = 1;
i = find_alternative((u32)event);
if (i >= 0) {
for (j = 0; j < MAX_ALT; ++j) {
ae = event_alternatives[i][j];
if (ae && ae != (u32)event)
alt[nalt++] = ae;
}
}
return nalt;
}
/*
* Bitmaps of which PMCs each class can use for classes 0 - 3.
* Bit i is set if PMC i+1 is usable.
*/
static const u8 classmap[N_CLASSES] = {
0x3f, 0x0f, 0x0b, 0x03, 0
};
/* Bit position and width of each PMCSEL field */
static const int pmcsel_shift[N_COUNTER] = {
6, 0, 27, 22, 17, 11
};
static const u32 pmcsel_mask[N_COUNTER] = {
0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
};
/*
* Compute MMCR0/1/2 values for a set of events.
*/
static int mpc7450_compute_mmcr(u64 event[], int n_ev,
unsigned int hwc[], unsigned long mmcr[])
{
u8 event_index[N_CLASSES][N_COUNTER];
int n_classevent[N_CLASSES];
int i, j, class, tuse;
u32 pmc_inuse = 0, pmc_avail;
u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
u32 ev, pmc, thresh;
if (n_ev > N_COUNTER)
return -1;
/* First pass: count usage in each class */
for (i = 0; i < N_CLASSES; ++i)
n_classevent[i] = 0;
for (i = 0; i < n_ev; ++i) {
class = mpc7450_classify_event(event[i]);
if (class < 0)
return -1;
j = n_classevent[class]++;
event_index[class][j] = i;
}
/* Second pass: allocate PMCs from most specific event to least */
for (class = N_CLASSES - 1; class >= 0; --class) {
for (i = 0; i < n_classevent[class]; ++i) {
ev = event[event_index[class][i]];
if (class == 4) {
pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc_inuse & (1 << (pmc - 1)))
return -1;
} else {
/* Find a suitable PMC */
pmc_avail = classmap[class] & ~pmc_inuse;
if (!pmc_avail)
return -1;
pmc = ffs(pmc_avail);
}
pmc_inuse |= 1 << (pmc - 1);
tuse = mpc7450_threshold_use(ev);
if (tuse) {
thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
mmcr0 |= thresh << 16;
if (tuse == 2 && (ev & PM_THRMULT_MSKS))
mmcr2 = 0x80000000;
}
ev &= pmcsel_mask[pmc - 1];
ev <<= pmcsel_shift[pmc - 1];
if (pmc <= 2)
mmcr0 |= ev;
else
mmcr1 |= ev;
hwc[event_index[class][i]] = pmc - 1;
}
}
if (pmc_inuse & 1)
mmcr0 |= MMCR0_PMC1CE;
if (pmc_inuse & 0x3e)
mmcr0 |= MMCR0_PMCnCE;
/* Return MMCRx values */
mmcr[0] = mmcr0;
mmcr[1] = mmcr1;
mmcr[2] = mmcr2;
return 0;
}
/*
* Disable counting by a PMC.
* Note that the pmc argument is 0-based here, not 1-based.
*/
static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 1)
mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
else
mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
}
static int mpc7450_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = 1,
[PERF_COUNT_HW_INSTRUCTIONS] = 2,
[PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
[PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
};
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x225 },
[C(OP_WRITE)] = { 0, 0x227 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x129, 0x115 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { 0x634, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0 },
[C(OP_WRITE)] = { 0, 0 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x312 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0, 0x223 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { 0x122, 0x41c },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
[C(NODE)] = { /* RESULT_ACCESS RESULT_MISS */
[C(OP_READ)] = { -1, -1 },
[C(OP_WRITE)] = { -1, -1 },
[C(OP_PREFETCH)] = { -1, -1 },
},
};
struct power_pmu mpc7450_pmu = {
.name = "MPC7450 family",
.n_counter = N_COUNTER,
.max_alternatives = MAX_ALT,
.add_fields = 0x00111555ul,
.test_adder = 0x00301000ul,
.compute_mmcr = mpc7450_compute_mmcr,
.get_constraint = mpc7450_get_constraint,
.get_alternatives = mpc7450_get_alternatives,
.disable_pmc = mpc7450_disable_pmc,
.n_generic = ARRAY_SIZE(mpc7450_generic_events),
.generic_events = mpc7450_generic_events,
.cache_events = &mpc7450_cache_events,
};
static int __init init_mpc7450_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
return -ENODEV;
return register_power_pmu(&mpc7450_pmu);
}
early_initcall(init_mpc7450_pmu);
|
gpl-2.0
|
iconiaN/android_kernel_acer_a700
|
net/netfilter/ipvs/ip_vs_sched.c
|
8187
|
5860
|
/*
* IPVS An implementation of the IP virtual server support for the
* LINUX operating system. IPVS is now implemented as a module
* over the Netfilter framework. IPVS can be used to build a
* high-performance and highly available server based on a
* cluster of servers.
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Peter Kese <peter.kese@ijs.si>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/string.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
#include <net/ip_vs.h>
EXPORT_SYMBOL(ip_vs_scheduler_err);
/*
* IPVS scheduler list
*/
static LIST_HEAD(ip_vs_schedulers);
/* lock for service table */
static DEFINE_SPINLOCK(ip_vs_sched_lock);
/*
* Bind a service with a scheduler
*/
int ip_vs_bind_scheduler(struct ip_vs_service *svc,
struct ip_vs_scheduler *scheduler)
{
int ret;
svc->scheduler = scheduler;
if (scheduler->init_service) {
ret = scheduler->init_service(svc);
if (ret) {
pr_err("%s(): init error\n", __func__);
return ret;
}
}
return 0;
}
/*
* Unbind a service with its scheduler
*/
int ip_vs_unbind_scheduler(struct ip_vs_service *svc)
{
struct ip_vs_scheduler *sched = svc->scheduler;
if (!sched)
return 0;
if (sched->done_service) {
if (sched->done_service(svc) != 0) {
pr_err("%s(): done error\n", __func__);
return -EINVAL;
}
}
svc->scheduler = NULL;
return 0;
}
/*
* Get scheduler in the scheduler list by name
*/
static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
{
struct ip_vs_scheduler *sched;
IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
spin_lock_bh(&ip_vs_sched_lock);
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
/*
* Test and get the modules atomically
*/
if (sched->module && !try_module_get(sched->module)) {
/*
* This scheduler is just deleted
*/
continue;
}
if (strcmp(sched_name, sched->name)==0) {
/* HIT */
spin_unlock_bh(&ip_vs_sched_lock);
return sched;
}
if (sched->module)
module_put(sched->module);
}
spin_unlock_bh(&ip_vs_sched_lock);
return NULL;
}
/*
* Lookup scheduler and try to load it if it doesn't exist
*/
struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name)
{
struct ip_vs_scheduler *sched;
/*
* Search for the scheduler by sched_name
*/
sched = ip_vs_sched_getbyname(sched_name);
/*
* If scheduler not found, load the module and search again
*/
if (sched == NULL) {
request_module("ip_vs_%s", sched_name);
sched = ip_vs_sched_getbyname(sched_name);
}
return sched;
}
void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
{
if (scheduler && scheduler->module)
module_put(scheduler->module);
}
/*
* Common error output helper for schedulers
*/
void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
{
if (svc->fwmark) {
IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
svc->scheduler->name, svc->fwmark,
svc->fwmark, msg);
#ifdef CONFIG_IP_VS_IPV6
} else if (svc->af == AF_INET6) {
IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
svc->scheduler->name,
ip_vs_proto_name(svc->protocol),
&svc->addr.in6, ntohs(svc->port), msg);
#endif
} else {
IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
svc->scheduler->name,
ip_vs_proto_name(svc->protocol),
&svc->addr.ip, ntohs(svc->port), msg);
}
}
/*
* Register a scheduler in the scheduler list
*/
int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
{
struct ip_vs_scheduler *sched;
if (!scheduler) {
pr_err("%s(): NULL arg\n", __func__);
return -EINVAL;
}
if (!scheduler->name) {
pr_err("%s(): NULL scheduler_name\n", __func__);
return -EINVAL;
}
/* increase the module use count */
ip_vs_use_count_inc();
spin_lock_bh(&ip_vs_sched_lock);
if (!list_empty(&scheduler->n_list)) {
spin_unlock_bh(&ip_vs_sched_lock);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already linked\n",
__func__, scheduler->name);
return -EINVAL;
}
/*
* Make sure that the scheduler with this name doesn't exist
* in the scheduler list.
*/
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
if (strcmp(scheduler->name, sched->name) == 0) {
spin_unlock_bh(&ip_vs_sched_lock);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already existed "
"in the system\n", __func__, scheduler->name);
return -EINVAL;
}
}
/*
* Add it into the d-linked scheduler list
*/
list_add(&scheduler->n_list, &ip_vs_schedulers);
spin_unlock_bh(&ip_vs_sched_lock);
pr_info("[%s] scheduler registered.\n", scheduler->name);
return 0;
}
/*
* Unregister a scheduler from the scheduler list
*/
int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
{
if (!scheduler) {
pr_err("%s(): NULL arg\n", __func__);
return -EINVAL;
}
spin_lock_bh(&ip_vs_sched_lock);
if (list_empty(&scheduler->n_list)) {
spin_unlock_bh(&ip_vs_sched_lock);
pr_err("%s(): [%s] scheduler is not in the list. failed\n",
__func__, scheduler->name);
return -EINVAL;
}
/*
* Remove it from the d-linked scheduler list
*/
list_del(&scheduler->n_list);
spin_unlock_bh(&ip_vs_sched_lock);
/* decrease the module use count */
ip_vs_use_count_dec();
pr_info("[%s] scheduler unregistered.\n", scheduler->name);
return 0;
}
|
gpl-2.0
|
3EleVen/android_kernel_google_msm
|
drivers/mtd/maps/rpxlite.c
|
8187
|
1371
|
/*
* Handle mapping of the flash on the RPX Lite and CLLF boards
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#define WINDOW_ADDR 0xfe000000
#define WINDOW_SIZE 0x800000
static struct mtd_info *mymtd;
static struct map_info rpxlite_map = {
.name = "RPX",
.size = WINDOW_SIZE,
.bankwidth = 4,
.phys = WINDOW_ADDR,
};
static int __init init_rpxlite(void)
{
printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR);
rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4);
if (!rpxlite_map.virt) {
printk("Failed to ioremap\n");
return -EIO;
}
simple_map_init(&rpxlite_map);
mymtd = do_map_probe("cfi_probe", &rpxlite_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
mtd_device_register(mymtd, NULL, 0);
return 0;
}
iounmap((void *)rpxlite_map.virt);
return -ENXIO;
}
static void __exit cleanup_rpxlite(void)
{
if (mymtd) {
mtd_device_unregister(mymtd);
map_destroy(mymtd);
}
if (rpxlite_map.virt) {
iounmap((void *)rpxlite_map.virt);
rpxlite_map.virt = 0;
}
}
module_init(init_rpxlite);
module_exit(cleanup_rpxlite);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnold Christensen <AKC@pel.dk>");
MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards");
|
gpl-2.0
|
RichardWithnell/mptcp
|
mm/failslab.c
|
8955
|
1316
|
#include <linux/fault-inject.h>
#include <linux/slab.h>
static struct {
struct fault_attr attr;
u32 ignore_gfp_wait;
int cache_filter;
} failslab = {
.attr = FAULT_ATTR_INITIALIZER,
.ignore_gfp_wait = 1,
.cache_filter = 0,
};
bool should_failslab(size_t size, gfp_t gfpflags, unsigned long cache_flags)
{
if (gfpflags & __GFP_NOFAIL)
return false;
if (failslab.ignore_gfp_wait && (gfpflags & __GFP_WAIT))
return false;
if (failslab.cache_filter && !(cache_flags & SLAB_FAILSLAB))
return false;
return should_fail(&failslab.attr, size);
}
static int __init setup_failslab(char *str)
{
return setup_fault_attr(&failslab.attr, str);
}
__setup("failslab=", setup_failslab);
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
static int __init failslab_debugfs_init(void)
{
struct dentry *dir;
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr);
if (IS_ERR(dir))
return PTR_ERR(dir);
if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
&failslab.ignore_gfp_wait))
goto fail;
if (!debugfs_create_bool("cache-filter", mode, dir,
&failslab.cache_filter))
goto fail;
return 0;
fail:
debugfs_remove_recursive(dir);
return -ENOMEM;
}
late_initcall(failslab_debugfs_init);
#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
|
gpl-2.0
|
i2t/rmptcp
|
drivers/scsi/libfc/fc_elsct.c
|
9467
|
3696
|
/*
* Copyright(c) 2008 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Provide interface to send ELS/CT FC frames
*/
#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/fc/fc_ns.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
* @lport: The local port to send the frame on
* @did: The destination ID for the frame
* @fp: The frame to be sent
* @op: The operational code
* @resp: The callback routine when the response is received
* @arg: The argument to pass to the response callback routine
* @timer_msec: The timeout period for the frame (in msecs)
*/
struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *fp, unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *,
void *),
void *arg, u32 timer_msec)
{
enum fc_rctl r_ctl;
enum fc_fh_type fh_type;
int rc;
/* ELS requests */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
else {
/* CT requests */
rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
}
if (rc) {
fc_frame_free(fp);
return NULL;
}
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
return lport->tt.exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
/**
* fc_elsct_init() - Initialize the ELS/CT layer
* @lport: The local port to initialize the ELS/CT layer for
*/
int fc_elsct_init(struct fc_lport *lport)
{
if (!lport->tt.elsct_send)
lport->tt.elsct_send = fc_elsct_send;
return 0;
}
EXPORT_SYMBOL(fc_elsct_init);
/**
* fc_els_resp_type() - Return a string describing the ELS response
* @fp: The frame pointer or possible error code
*/
const char *fc_els_resp_type(struct fc_frame *fp)
{
const char *msg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
if (IS_ERR(fp)) {
switch (-PTR_ERR(fp)) {
case FC_NO_ERR:
msg = "response no error";
break;
case FC_EX_TIMEOUT:
msg = "response timeout";
break;
case FC_EX_CLOSED:
msg = "response closed";
break;
default:
msg = "response unknown error";
break;
}
} else {
fh = fc_frame_header_get(fp);
switch (fh->fh_type) {
case FC_TYPE_ELS:
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
msg = "accept";
break;
case ELS_LS_RJT:
msg = "reject";
break;
default:
msg = "response unknown ELS";
break;
}
break;
case FC_TYPE_CT:
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (ct) {
switch (ntohs(ct->ct_cmd)) {
case FC_FS_ACC:
msg = "CT accept";
break;
case FC_FS_RJT:
msg = "CT reject";
break;
default:
msg = "response unknown CT";
break;
}
} else {
msg = "short CT response";
}
break;
default:
msg = "response not ELS or CT";
break;
}
}
return msg;
}
|
gpl-2.0
|
zparallax/amplitude_kernel_tw_exynos
|
drivers/net/wireless/b43/radio_2055.c
|
10235
|
54026
|
/*
Broadcom B43 wireless driver
IEEE 802.11n PHY and radio device data tables
Copyright (c) 2008 Michael Buesch <m@bues.ch>
Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43.h"
#include "radio_2055.h"
#include "phy_common.h"
struct b2055_inittab_entry {
/* Value to write if we use the 5GHz band. */
u16 ghz5;
/* Value to write if we use the 2.4GHz band. */
u16 ghz2;
/* Flags */
u8 flags;
#define B2055_INITTAB_ENTRY_OK 0x01
#define B2055_INITTAB_UPLOAD 0x02
};
#define UPLOAD .flags = B2055_INITTAB_ENTRY_OK | B2055_INITTAB_UPLOAD
#define NOUPLOAD .flags = B2055_INITTAB_ENTRY_OK
static const struct b2055_inittab_entry b2055_inittab [] = {
[B2055_SP_PINPD] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
[B2055_C1_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, },
[B2055_C2_SP_RSSI] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_SP_PDMISC] = { .ghz5 = 0x0027, .ghz2 = 0x0027, NOUPLOAD, },
[B2055_C1_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, },
[B2055_C1_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
[B2055_C2_SP_RXGC1] = { .ghz5 = 0x007F, .ghz2 = 0x007F, UPLOAD, },
[B2055_C2_SP_RXGC2] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
[B2055_C1_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
[B2055_C2_SP_LPFBWSEL] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
[B2055_C1_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, },
[B2055_C1_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
[B2055_C2_SP_TXGC1] = { .ghz5 = 0x004F, .ghz2 = 0x004F, UPLOAD, },
[B2055_C2_SP_TXGC2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
[B2055_MASTER1] = { .ghz5 = 0x00D0, .ghz2 = 0x00D0, NOUPLOAD, },
[B2055_MASTER2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
[B2055_PD_LGEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_PD_PLLTS] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
[B2055_C1_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_PD_LGBUF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_PD_TX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_PD_RXTX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_PD_RSSIMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_PWRDET_LGEN] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
[B2055_C1_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
[B2055_C1_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
[B2055_C2_PWRDET_LGBUF] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
[B2055_C2_PWRDET_RXTX] = { .ghz5 = 0x00C0, .ghz2 = 0x00C0, NOUPLOAD, },
[B2055_RRCCAL_CS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_RRCCAL_NOPTSEL] = { .ghz5 = 0x002C, .ghz2 = 0x002C, NOUPLOAD, },
[B2055_CAL_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_COUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_COUT2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_CVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_RVARCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_LPOCTL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_TS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_RCCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_CAL_RCALRTS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_PADDRV] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
[B2055_XOCTL1] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
[B2055_XOCTL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_XOREGUL] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
[B2055_XOMISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_PLL_LFC1] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
[B2055_PLL_CALVTH] = { .ghz5 = 0x0087, .ghz2 = 0x0087, NOUPLOAD, },
[B2055_PLL_LFC2] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, },
[B2055_PLL_REF] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
[B2055_PLL_LFR1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
[B2055_PLL_PFDCP] = { .ghz5 = 0x0018, .ghz2 = 0x0018, UPLOAD, },
[B2055_PLL_IDAC_CPOPAMP] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_PLL_CPREG] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
[B2055_PLL_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_RF_PLLMOD0] = { .ghz5 = 0x009E, .ghz2 = 0x009E, NOUPLOAD, },
[B2055_RF_PLLMOD1] = { .ghz5 = 0x0009, .ghz2 = 0x0009, NOUPLOAD, },
[B2055_RF_MMDIDAC1] = { .ghz5 = 0x00C8, .ghz2 = 0x00C8, UPLOAD, },
[B2055_RF_MMDIDAC0] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_RF_MMDSP] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL3] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2055_VCO_CAL4] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
[B2055_VCO_CAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
[B2055_VCO_CAL6] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, },
[B2055_VCO_CAL7] = { .ghz5 = 0x003E, .ghz2 = 0x003E, NOUPLOAD, },
[B2055_VCO_CAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
[B2055_VCO_CAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
[B2055_VCO_CAL10] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
[B2055_VCO_CAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2055_VCO_CAL12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_CAL16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_VCO_KVCO] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
[B2055_VCO_CAPTAIL] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
[B2055_VCO_IDACVCO] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_VCO_REG] = { .ghz5 = 0x0084, .ghz2 = 0x0084, UPLOAD, },
[B2055_PLL_RFVTH] = { .ghz5 = 0x00C3, .ghz2 = 0x00C3, NOUPLOAD, },
[B2055_LGBUF_CENBUF] = { .ghz5 = 0x008F, .ghz2 = 0x008F, NOUPLOAD, },
[B2055_LGEN_TUNE1] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
[B2055_LGEN_TUNE2] = { .ghz5 = 0x00FF, .ghz2 = 0x00FF, NOUPLOAD, },
[B2055_LGEN_IDAC1] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_LGEN_IDAC2] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_LGEN_BIASC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_LGEN_BIASIDAC] = { .ghz5 = 0x00CC, .ghz2 = 0x00CC, NOUPLOAD, },
[B2055_LGEN_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_LGEN_DIV] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
[B2055_LGEN_SPARE2] = { .ghz5 = 0x0080, .ghz2 = 0x0080, NOUPLOAD, },
[B2055_C1_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, },
[B2055_C1_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C1_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C1_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, },
[B2055_C1_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C1_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, },
[B2055_C1_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, },
[B2055_C1_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
[B2055_C1_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
[B2055_C1_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_C1_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, },
[B2055_C1_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
[B2055_C1_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
[B2055_C1_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C1_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C1_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C1_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C1_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
[B2055_C1_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, },
[B2055_C1_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, },
[B2055_C1_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, },
[B2055_C1_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, },
[B2055_C1_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, },
[B2055_C1_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
[B2055_C1_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_C1_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2055_C1_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2055_C1_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
[B2055_C1_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
[B2055_C1_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, },
[B2055_C1_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
[B2055_C1_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, },
[B2055_C1_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, },
[B2055_C1_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C1_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_C1_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
[B2055_C1_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
[B2055_C1_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
[B2055_C1_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C1_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
[B2055_C1_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
[B2055_C1_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, },
[B2055_C1_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, },
[B2055_C1_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_LGBUF_ATUNE] = { .ghz5 = 0x00F8, .ghz2 = 0x00F8, NOUPLOAD, },
[B2055_C2_LGBUF_GTUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C2_LGBUF_DIV] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C2_LGBUF_AIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0008, UPLOAD, },
[B2055_C2_LGBUF_GIDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C2_LGBUF_IDACFO] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_LGBUF_SPARE] = { .ghz5 = 0x0001, .ghz2 = 0x0001, UPLOAD, },
[B2055_C2_RX_RFSPC1] = { .ghz5 = 0x008A, .ghz2 = 0x008A, NOUPLOAD, },
[B2055_C2_RX_RFR1] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
[B2055_C2_RX_RFR2] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
[B2055_C2_RX_RFRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_C2_RX_BB_BLCMP] = { .ghz5 = 0x00A0, .ghz2 = 0x00A0, NOUPLOAD, },
[B2055_C2_RX_BB_LPF] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
[B2055_C2_RX_BB_MIDACHP] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
[B2055_C2_RX_BB_VGA1IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C2_RX_BB_VGA2IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C2_RX_BB_VGA3IDAC] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C2_RX_BB_BUFOCTL] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C2_RX_BB_RCCALCTL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
[B2055_C2_RX_BB_RSSICTL1] = { .ghz5 = 0x006A, .ghz2 = 0x006A, UPLOAD, },
[B2055_C2_RX_BB_RSSICTL2] = { .ghz5 = 0x00AB, .ghz2 = 0x00AB, UPLOAD, },
[B2055_C2_RX_BB_RSSICTL3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, UPLOAD, },
[B2055_C2_RX_BB_RSSICTL4] = { .ghz5 = 0x00C1, .ghz2 = 0x00C1, UPLOAD, },
[B2055_C2_RX_BB_RSSICTL5] = { .ghz5 = 0x00AA, .ghz2 = 0x00AA, UPLOAD, },
[B2055_C2_RX_BB_REG] = { .ghz5 = 0x0087, .ghz2 = 0x0087, UPLOAD, },
[B2055_C2_RX_BB_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_RX_TXBBRCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_C2_TX_RF_SPGA] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2055_C2_TX_RF_SPAD] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2055_C2_TX_RF_CNTPGA1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
[B2055_C2_TX_RF_CNTPAD1] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
[B2055_C2_TX_RF_PGAIDAC] = { .ghz5 = 0x0097, .ghz2 = 0x0097, UPLOAD, },
[B2055_C2_TX_PGAPADTN] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
[B2055_C2_TX_PADIDAC1] = { .ghz5 = 0x0014, .ghz2 = 0x0014, UPLOAD, },
[B2055_C2_TX_PADIDAC2] = { .ghz5 = 0x0033, .ghz2 = 0x0033, NOUPLOAD, },
[B2055_C2_TX_MXBGTRIM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[B2055_C2_TX_RF_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[B2055_C2_TX_RF_PADTSSI1] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
[B2055_C2_TX_RF_PADTSSI2] = { .ghz5 = 0x000A, .ghz2 = 0x000A, NOUPLOAD, },
[B2055_C2_TX_RF_SPARE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, UPLOAD, },
[B2055_C2_TX_RF_IQCAL1] = { .ghz5 = 0x002A, .ghz2 = 0x002A, NOUPLOAD, },
[B2055_C2_TX_RF_IQCAL2] = { .ghz5 = 0x00A4, .ghz2 = 0x00A4, NOUPLOAD, },
[B2055_C2_TXBB_RCCAL] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
[B2055_C2_TXBB_LPF1] = { .ghz5 = 0x0028, .ghz2 = 0x0028, NOUPLOAD, },
[B2055_C2_TX_VOSCNCL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_TX_LPF_MXGMIDAC] = { .ghz5 = 0x004A, .ghz2 = 0x004A, NOUPLOAD, },
[B2055_C2_TX_BB_MXGM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_PRG_GCHP21] = { .ghz5 = 0x0071, .ghz2 = 0x0071, NOUPLOAD, },
[B2055_PRG_GCHP22] = { .ghz5 = 0x0072, .ghz2 = 0x0072, NOUPLOAD, },
[B2055_PRG_GCHP23] = { .ghz5 = 0x0073, .ghz2 = 0x0073, NOUPLOAD, },
[B2055_PRG_GCHP24] = { .ghz5 = 0x0074, .ghz2 = 0x0074, NOUPLOAD, },
[B2055_PRG_GCHP25] = { .ghz5 = 0x0075, .ghz2 = 0x0075, NOUPLOAD, },
[B2055_PRG_GCHP26] = { .ghz5 = 0x0076, .ghz2 = 0x0076, NOUPLOAD, },
[B2055_PRG_GCHP27] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
[B2055_PRG_GCHP28] = { .ghz5 = 0x0078, .ghz2 = 0x0078, NOUPLOAD, },
[B2055_PRG_GCHP29] = { .ghz5 = 0x0079, .ghz2 = 0x0079, NOUPLOAD, },
[B2055_PRG_GCHP30] = { .ghz5 = 0x007A, .ghz2 = 0x007A, NOUPLOAD, },
[0xC7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xC8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xC9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xCA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xCE] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
[B2055_C1_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[0xD3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C1_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xDA] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
[0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
[B2055_C2_B0NB_RSSIVCM] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
[0xDF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xE0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[0xE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
[B2055_C2_GENSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
};
#define RADIOREGS(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, \
r12, r13, r14, r15, r16, r17, r18, r19, r20, r21) \
.radio_pll_ref = r0, \
.radio_rf_pllmod0 = r1, \
.radio_rf_pllmod1 = r2, \
.radio_vco_captail = r3, \
.radio_vco_cal1 = r4, \
.radio_vco_cal2 = r5, \
.radio_pll_lfc1 = r6, \
.radio_pll_lfr1 = r7, \
.radio_pll_lfc2 = r8, \
.radio_lgbuf_cenbuf = r9, \
.radio_lgen_tune1 = r10, \
.radio_lgen_tune2 = r11, \
.radio_c1_lgbuf_atune = r12, \
.radio_c1_lgbuf_gtune = r13, \
.radio_c1_rx_rfr1 = r14, \
.radio_c1_tx_pgapadtn = r15, \
.radio_c1_tx_mxbgtrim = r16, \
.radio_c2_lgbuf_atune = r17, \
.radio_c2_lgbuf_gtune = r18, \
.radio_c2_rx_rfr1 = r19, \
.radio_c2_tx_pgapadtn = r20, \
.radio_c2_tx_mxbgtrim = r21
#define PHYREGS(r0, r1, r2, r3, r4, r5) \
.phy_regs.phy_bw1a = r0, \
.phy_regs.phy_bw2 = r1, \
.phy_regs.phy_bw3 = r2, \
.phy_regs.phy_bw4 = r3, \
.phy_regs.phy_bw5 = r4, \
.phy_regs.phy_bw6 = r5
static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] = {
{ .channel = 184,
.freq = 4920, /* MHz */
.unk2 = 3280,
RADIOREGS(0x71, 0xEC, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07B4, 0x07B0, 0x07AC, 0x0214, 0x0215, 0x0216),
},
{ .channel = 186,
.freq = 4930, /* MHz */
.unk2 = 3287,
RADIOREGS(0x71, 0xED, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07B8, 0x07B4, 0x07B0, 0x0213, 0x0214, 0x0215),
},
{ .channel = 188,
.freq = 4940, /* MHz */
.unk2 = 3293,
RADIOREGS(0x71, 0xEE, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07BC, 0x07B8, 0x07B4, 0x0212, 0x0213, 0x0214),
},
{ .channel = 190,
.freq = 4950, /* MHz */
.unk2 = 3300,
RADIOREGS(0x71, 0xEF, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07C0, 0x07BC, 0x07B8, 0x0211, 0x0212, 0x0213),
},
{ .channel = 192,
.freq = 4960, /* MHz */
.unk2 = 3307,
RADIOREGS(0x71, 0xF0, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07C4, 0x07C0, 0x07BC, 0x020F, 0x0211, 0x0212),
},
{ .channel = 194,
.freq = 4970, /* MHz */
.unk2 = 3313,
RADIOREGS(0x71, 0xF1, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07C8, 0x07C4, 0x07C0, 0x020E, 0x020F, 0x0211),
},
{ .channel = 196,
.freq = 4980, /* MHz */
.unk2 = 3320,
RADIOREGS(0x71, 0xF2, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07CC, 0x07C8, 0x07C4, 0x020D, 0x020E, 0x020F),
},
{ .channel = 198,
.freq = 4990, /* MHz */
.unk2 = 3327,
RADIOREGS(0x71, 0xF3, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07D0, 0x07CC, 0x07C8, 0x020C, 0x020D, 0x020E),
},
{ .channel = 200,
.freq = 5000, /* MHz */
.unk2 = 3333,
RADIOREGS(0x71, 0xF4, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07D4, 0x07D0, 0x07CC, 0x020B, 0x020C, 0x020D),
},
{ .channel = 202,
.freq = 5010, /* MHz */
.unk2 = 3340,
RADIOREGS(0x71, 0xF5, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07D8, 0x07D4, 0x07D0, 0x020A, 0x020B, 0x020C),
},
{ .channel = 204,
.freq = 5020, /* MHz */
.unk2 = 3347,
RADIOREGS(0x71, 0xF6, 0x01, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07DC, 0x07D8, 0x07D4, 0x0209, 0x020A, 0x020B),
},
{ .channel = 206,
.freq = 5030, /* MHz */
.unk2 = 3353,
RADIOREGS(0x71, 0xF7, 0x01, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07E0, 0x07DC, 0x07D8, 0x0208, 0x0209, 0x020A),
},
{ .channel = 208,
.freq = 5040, /* MHz */
.unk2 = 3360,
RADIOREGS(0x71, 0xF8, 0x01, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07E4, 0x07E0, 0x07DC, 0x0207, 0x0208, 0x0209),
},
{ .channel = 210,
.freq = 5050, /* MHz */
.unk2 = 3367,
RADIOREGS(0x71, 0xF9, 0x01, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
PHYREGS(0x07E8, 0x07E4, 0x07E0, 0x0206, 0x0207, 0x0208),
},
{ .channel = 212,
.freq = 5060, /* MHz */
.unk2 = 3373,
RADIOREGS(0x71, 0xFA, 0x01, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
PHYREGS(0x07EC, 0x07E8, 0x07E4, 0x0205, 0x0206, 0x0207),
},
{ .channel = 214,
.freq = 5070, /* MHz */
.unk2 = 3380,
RADIOREGS(0x71, 0xFB, 0x01, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
PHYREGS(0x07F0, 0x07EC, 0x07E8, 0x0204, 0x0205, 0x0206),
},
{ .channel = 216,
.freq = 5080, /* MHz */
.unk2 = 3387,
RADIOREGS(0x71, 0xFC, 0x01, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
PHYREGS(0x07F4, 0x07F0, 0x07EC, 0x0203, 0x0204, 0x0205),
},
{ .channel = 218,
.freq = 5090, /* MHz */
.unk2 = 3393,
RADIOREGS(0x71, 0xFD, 0x01, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
PHYREGS(0x07F8, 0x07F4, 0x07F0, 0x0202, 0x0203, 0x0204),
},
{ .channel = 220,
.freq = 5100, /* MHz */
.unk2 = 3400,
RADIOREGS(0x71, 0xFE, 0x01, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
PHYREGS(0x07FC, 0x07F8, 0x07F4, 0x0201, 0x0202, 0x0203),
},
{ .channel = 222,
.freq = 5110, /* MHz */
.unk2 = 3407,
RADIOREGS(0x71, 0xFF, 0x01, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
PHYREGS(0x0800, 0x07FC, 0x07F8, 0x0200, 0x0201, 0x0202),
},
{ .channel = 224,
.freq = 5120, /* MHz */
.unk2 = 3413,
RADIOREGS(0x71, 0x00, 0x02, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
PHYREGS(0x0804, 0x0800, 0x07FC, 0x01FF, 0x0200, 0x0201),
},
{ .channel = 226,
.freq = 5130, /* MHz */
.unk2 = 3420,
RADIOREGS(0x71, 0x01, 0x02, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
PHYREGS(0x0808, 0x0804, 0x0800, 0x01FE, 0x01FF, 0x0200),
},
{ .channel = 228,
.freq = 5140, /* MHz */
.unk2 = 3427,
RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A,
0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E,
0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B),
PHYREGS(0x080C, 0x0808, 0x0804, 0x01FD, 0x01FE, 0x01FF),
},
{ .channel = 32,
.freq = 5160, /* MHz */
.unk2 = 3440,
RADIOREGS(0x71, 0x04, 0x02, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
PHYREGS(0x0814, 0x0810, 0x080C, 0x01FB, 0x01FC, 0x01FD),
},
{ .channel = 34,
.freq = 5170, /* MHz */
.unk2 = 3447,
RADIOREGS(0x71, 0x05, 0x02, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
PHYREGS(0x0818, 0x0814, 0x0810, 0x01FA, 0x01FB, 0x01FC),
},
{ .channel = 36,
.freq = 5180, /* MHz */
.unk2 = 3453,
RADIOREGS(0x71, 0x06, 0x02, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
PHYREGS(0x081C, 0x0818, 0x0814, 0x01F9, 0x01FA, 0x01FB),
},
{ .channel = 38,
.freq = 5190, /* MHz */
.unk2 = 3460,
RADIOREGS(0x71, 0x07, 0x02, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
PHYREGS(0x0820, 0x081C, 0x0818, 0x01F8, 0x01F9, 0x01FA),
},
{ .channel = 40,
.freq = 5200, /* MHz */
.unk2 = 3467,
RADIOREGS(0x71, 0x08, 0x02, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
PHYREGS(0x0824, 0x0820, 0x081C, 0x01F7, 0x01F8, 0x01F9),
},
{ .channel = 42,
.freq = 5210, /* MHz */
.unk2 = 3473,
RADIOREGS(0x71, 0x09, 0x02, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
PHYREGS(0x0828, 0x0824, 0x0820, 0x01F6, 0x01F7, 0x01F8),
},
{ .channel = 44,
.freq = 5220, /* MHz */
.unk2 = 3480,
RADIOREGS(0x71, 0x0A, 0x02, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
PHYREGS(0x082C, 0x0828, 0x0824, 0x01F5, 0x01F6, 0x01F7),
},
{ .channel = 46,
.freq = 5230, /* MHz */
.unk2 = 3487,
RADIOREGS(0x71, 0x0B, 0x02, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
PHYREGS(0x0830, 0x082C, 0x0828, 0x01F4, 0x01F5, 0x01F6),
},
{ .channel = 48,
.freq = 5240, /* MHz */
.unk2 = 3493,
RADIOREGS(0x71, 0x0C, 0x02, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
PHYREGS(0x0834, 0x0830, 0x082C, 0x01F3, 0x01F4, 0x01F5),
},
{ .channel = 50,
.freq = 5250, /* MHz */
.unk2 = 3500,
RADIOREGS(0x71, 0x0D, 0x02, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
PHYREGS(0x0838, 0x0834, 0x0830, 0x01F2, 0x01F3, 0x01F4),
},
{ .channel = 52,
.freq = 5260, /* MHz */
.unk2 = 3507,
RADIOREGS(0x71, 0x0E, 0x02, 0x0A, 0x98, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
PHYREGS(0x083C, 0x0838, 0x0834, 0x01F1, 0x01F2, 0x01F3),
},
{ .channel = 54,
.freq = 5270, /* MHz */
.unk2 = 3513,
RADIOREGS(0x71, 0x0F, 0x02, 0x0A, 0x98, 0x01, 0x04, 0x0A,
0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
PHYREGS(0x0840, 0x083C, 0x0838, 0x01F0, 0x01F1, 0x01F2),
},
{ .channel = 56,
.freq = 5280, /* MHz */
.unk2 = 3520,
RADIOREGS(0x71, 0x10, 0x02, 0x09, 0x91, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
PHYREGS(0x0844, 0x0840, 0x083C, 0x01F0, 0x01F0, 0x01F1),
},
{ .channel = 58,
.freq = 5290, /* MHz */
.unk2 = 3527,
RADIOREGS(0x71, 0x11, 0x02, 0x09, 0x91, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
PHYREGS(0x0848, 0x0844, 0x0840, 0x01EF, 0x01F0, 0x01F0),
},
{ .channel = 60,
.freq = 5300, /* MHz */
.unk2 = 3533,
RADIOREGS(0x71, 0x12, 0x02, 0x09, 0x8A, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
PHYREGS(0x084C, 0x0848, 0x0844, 0x01EE, 0x01EF, 0x01F0),
},
{ .channel = 62,
.freq = 5310, /* MHz */
.unk2 = 3540,
RADIOREGS(0x71, 0x13, 0x02, 0x09, 0x8A, 0x01, 0x04, 0x0A,
0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
PHYREGS(0x0850, 0x084C, 0x0848, 0x01ED, 0x01EE, 0x01EF),
},
{ .channel = 64,
.freq = 5320, /* MHz */
.unk2 = 3547,
RADIOREGS(0x71, 0x14, 0x02, 0x09, 0x83, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
PHYREGS(0x0854, 0x0850, 0x084C, 0x01EC, 0x01ED, 0x01EE),
},
{ .channel = 66,
.freq = 5330, /* MHz */
.unk2 = 3553,
RADIOREGS(0x71, 0x15, 0x02, 0x09, 0x83, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
PHYREGS(0x0858, 0x0854, 0x0850, 0x01EB, 0x01EC, 0x01ED),
},
{ .channel = 68,
.freq = 5340, /* MHz */
.unk2 = 3560,
RADIOREGS(0x71, 0x16, 0x02, 0x08, 0x7C, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
PHYREGS(0x085C, 0x0858, 0x0854, 0x01EA, 0x01EB, 0x01EC),
},
{ .channel = 70,
.freq = 5350, /* MHz */
.unk2 = 3567,
RADIOREGS(0x71, 0x17, 0x02, 0x08, 0x7C, 0x01, 0x04, 0x0A,
0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
PHYREGS(0x0860, 0x085C, 0x0858, 0x01E9, 0x01EA, 0x01EB),
},
{ .channel = 72,
.freq = 5360, /* MHz */
.unk2 = 3573,
RADIOREGS(0x71, 0x18, 0x02, 0x08, 0x75, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
PHYREGS(0x0864, 0x0860, 0x085C, 0x01E8, 0x01E9, 0x01EA),
},
{ .channel = 74,
.freq = 5370, /* MHz */
.unk2 = 3580,
RADIOREGS(0x71, 0x19, 0x02, 0x08, 0x75, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
PHYREGS(0x0868, 0x0864, 0x0860, 0x01E7, 0x01E8, 0x01E9),
},
{ .channel = 76,
.freq = 5380, /* MHz */
.unk2 = 3587,
RADIOREGS(0x71, 0x1A, 0x02, 0x08, 0x6E, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
PHYREGS(0x086C, 0x0868, 0x0864, 0x01E6, 0x01E7, 0x01E8),
},
{ .channel = 78,
.freq = 5390, /* MHz */
.unk2 = 3593,
RADIOREGS(0x71, 0x1B, 0x02, 0x08, 0x6E, 0x01, 0x04, 0x0A,
0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
PHYREGS(0x0870, 0x086C, 0x0868, 0x01E5, 0x01E6, 0x01E7),
},
{ .channel = 80,
.freq = 5400, /* MHz */
.unk2 = 3600,
RADIOREGS(0x71, 0x1C, 0x02, 0x07, 0x67, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
PHYREGS(0x0874, 0x0870, 0x086C, 0x01E5, 0x01E5, 0x01E6),
},
{ .channel = 82,
.freq = 5410, /* MHz */
.unk2 = 3607,
RADIOREGS(0x71, 0x1D, 0x02, 0x07, 0x67, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
PHYREGS(0x0878, 0x0874, 0x0870, 0x01E4, 0x01E5, 0x01E5),
},
{ .channel = 84,
.freq = 5420, /* MHz */
.unk2 = 3613,
RADIOREGS(0x71, 0x1E, 0x02, 0x07, 0x61, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
PHYREGS(0x087C, 0x0878, 0x0874, 0x01E3, 0x01E4, 0x01E5),
},
{ .channel = 86,
.freq = 5430, /* MHz */
.unk2 = 3620,
RADIOREGS(0x71, 0x1F, 0x02, 0x07, 0x61, 0x01, 0x04, 0x0A,
0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
PHYREGS(0x0880, 0x087C, 0x0878, 0x01E2, 0x01E3, 0x01E4),
},
{ .channel = 88,
.freq = 5440, /* MHz */
.unk2 = 3627,
RADIOREGS(0x71, 0x20, 0x02, 0x07, 0x5A, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
PHYREGS(0x0884, 0x0880, 0x087C, 0x01E1, 0x01E2, 0x01E3),
},
{ .channel = 90,
.freq = 5450, /* MHz */
.unk2 = 3633,
RADIOREGS(0x71, 0x21, 0x02, 0x07, 0x5A, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
PHYREGS(0x0888, 0x0884, 0x0880, 0x01E0, 0x01E1, 0x01E2),
},
{ .channel = 92,
.freq = 5460, /* MHz */
.unk2 = 3640,
RADIOREGS(0x71, 0x22, 0x02, 0x06, 0x53, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
PHYREGS(0x088C, 0x0888, 0x0884, 0x01DF, 0x01E0, 0x01E1),
},
{ .channel = 94,
.freq = 5470, /* MHz */
.unk2 = 3647,
RADIOREGS(0x71, 0x23, 0x02, 0x06, 0x53, 0x01, 0x04, 0x0A,
0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
PHYREGS(0x0890, 0x088C, 0x0888, 0x01DE, 0x01DF, 0x01E0),
},
{ .channel = 96,
.freq = 5480, /* MHz */
.unk2 = 3653,
RADIOREGS(0x71, 0x24, 0x02, 0x06, 0x4D, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
PHYREGS(0x0894, 0x0890, 0x088C, 0x01DD, 0x01DE, 0x01DF),
},
{ .channel = 98,
.freq = 5490, /* MHz */
.unk2 = 3660,
RADIOREGS(0x71, 0x25, 0x02, 0x06, 0x4D, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
PHYREGS(0x0898, 0x0894, 0x0890, 0x01DD, 0x01DD, 0x01DE),
},
{ .channel = 100,
.freq = 5500, /* MHz */
.unk2 = 3667,
RADIOREGS(0x71, 0x26, 0x02, 0x06, 0x47, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
PHYREGS(0x089C, 0x0898, 0x0894, 0x01DC, 0x01DD, 0x01DD),
},
{ .channel = 102,
.freq = 5510, /* MHz */
.unk2 = 3673,
RADIOREGS(0x71, 0x27, 0x02, 0x06, 0x47, 0x01, 0x04, 0x0A,
0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
PHYREGS(0x08A0, 0x089C, 0x0898, 0x01DB, 0x01DC, 0x01DD),
},
{ .channel = 104,
.freq = 5520, /* MHz */
.unk2 = 3680,
RADIOREGS(0x71, 0x28, 0x02, 0x05, 0x40, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
PHYREGS(0x08A4, 0x08A0, 0x089C, 0x01DA, 0x01DB, 0x01DC),
},
{ .channel = 106,
.freq = 5530, /* MHz */
.unk2 = 3687,
RADIOREGS(0x71, 0x29, 0x02, 0x05, 0x40, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
PHYREGS(0x08A8, 0x08A4, 0x08A0, 0x01D9, 0x01DA, 0x01DB),
},
{ .channel = 108,
.freq = 5540, /* MHz */
.unk2 = 3693,
RADIOREGS(0x71, 0x2A, 0x02, 0x05, 0x3A, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
PHYREGS(0x08AC, 0x08A8, 0x08A4, 0x01D8, 0x01D9, 0x01DA),
},
{ .channel = 110,
.freq = 5550, /* MHz */
.unk2 = 3700,
RADIOREGS(0x71, 0x2B, 0x02, 0x05, 0x3A, 0x01, 0x04, 0x0A,
0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
PHYREGS(0x08B0, 0x08AC, 0x08A8, 0x01D7, 0x01D8, 0x01D9),
},
{ .channel = 112,
.freq = 5560, /* MHz */
.unk2 = 3707,
RADIOREGS(0x71, 0x2C, 0x02, 0x05, 0x34, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
PHYREGS(0x08B4, 0x08B0, 0x08AC, 0x01D7, 0x01D7, 0x01D8),
},
{ .channel = 114,
.freq = 5570, /* MHz */
.unk2 = 3713,
RADIOREGS(0x71, 0x2D, 0x02, 0x05, 0x34, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
PHYREGS(0x08B8, 0x08B4, 0x08B0, 0x01D6, 0x01D7, 0x01D7),
},
{ .channel = 116,
.freq = 5580, /* MHz */
.unk2 = 3720,
RADIOREGS(0x71, 0x2E, 0x02, 0x04, 0x2E, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
PHYREGS(0x08BC, 0x08B8, 0x08B4, 0x01D5, 0x01D6, 0x01D7),
},
{ .channel = 118,
.freq = 5590, /* MHz */
.unk2 = 3727,
RADIOREGS(0x71, 0x2F, 0x02, 0x04, 0x2E, 0x01, 0x04, 0x0A,
0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
PHYREGS(0x08C0, 0x08BC, 0x08B8, 0x01D4, 0x01D5, 0x01D6),
},
{ .channel = 120,
.freq = 5600, /* MHz */
.unk2 = 3733,
RADIOREGS(0x71, 0x30, 0x02, 0x04, 0x28, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
PHYREGS(0x08C4, 0x08C0, 0x08BC, 0x01D3, 0x01D4, 0x01D5),
},
{ .channel = 122,
.freq = 5610, /* MHz */
.unk2 = 3740,
RADIOREGS(0x71, 0x31, 0x02, 0x04, 0x28, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
PHYREGS(0x08C8, 0x08C4, 0x08C0, 0x01D2, 0x01D3, 0x01D4),
},
{ .channel = 124,
.freq = 5620, /* MHz */
.unk2 = 3747,
RADIOREGS(0x71, 0x32, 0x02, 0x04, 0x21, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08CC, 0x08C8, 0x08C4, 0x01D2, 0x01D2, 0x01D3),
},
{ .channel = 126,
.freq = 5630, /* MHz */
.unk2 = 3753,
RADIOREGS(0x71, 0x33, 0x02, 0x04, 0x21, 0x01, 0x04, 0x0A,
0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08D0, 0x08CC, 0x08C8, 0x01D1, 0x01D2, 0x01D2),
},
{ .channel = 128,
.freq = 5640, /* MHz */
.unk2 = 3760,
RADIOREGS(0x71, 0x34, 0x02, 0x03, 0x1C, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08D4, 0x08D0, 0x08CC, 0x01D0, 0x01D1, 0x01D2),
},
{ .channel = 130,
.freq = 5650, /* MHz */
.unk2 = 3767,
RADIOREGS(0x71, 0x35, 0x02, 0x03, 0x1C, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08D8, 0x08D4, 0x08D0, 0x01CF, 0x01D0, 0x01D1),
},
{ .channel = 132,
.freq = 5660, /* MHz */
.unk2 = 3773,
RADIOREGS(0x71, 0x36, 0x02, 0x03, 0x16, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08DC, 0x08D8, 0x08D4, 0x01CE, 0x01CF, 0x01D0),
},
{ .channel = 134,
.freq = 5670, /* MHz */
.unk2 = 3780,
RADIOREGS(0x71, 0x37, 0x02, 0x03, 0x16, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08E0, 0x08DC, 0x08D8, 0x01CE, 0x01CE, 0x01CF),
},
{ .channel = 136,
.freq = 5680, /* MHz */
.unk2 = 3787,
RADIOREGS(0x71, 0x38, 0x02, 0x03, 0x10, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08E4, 0x08E0, 0x08DC, 0x01CD, 0x01CE, 0x01CE),
},
{ .channel = 138,
.freq = 5690, /* MHz */
.unk2 = 3793,
RADIOREGS(0x71, 0x39, 0x02, 0x03, 0x10, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08E8, 0x08E4, 0x08E0, 0x01CC, 0x01CD, 0x01CE),
},
{ .channel = 140,
.freq = 5700, /* MHz */
.unk2 = 3800,
RADIOREGS(0x71, 0x3A, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08EC, 0x08E8, 0x08E4, 0x01CB, 0x01CC, 0x01CD),
},
{ .channel = 142,
.freq = 5710, /* MHz */
.unk2 = 3807,
RADIOREGS(0x71, 0x3B, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08F0, 0x08EC, 0x08E8, 0x01CA, 0x01CB, 0x01CC),
},
{ .channel = 144,
.freq = 5720, /* MHz */
.unk2 = 3813,
RADIOREGS(0x71, 0x3C, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08F4, 0x08F0, 0x08EC, 0x01C9, 0x01CA, 0x01CB),
},
{ .channel = 145,
.freq = 5725, /* MHz */
.unk2 = 3817,
RADIOREGS(0x72, 0x79, 0x04, 0x02, 0x03, 0x01, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08F6, 0x08F2, 0x08EE, 0x01C9, 0x01CA, 0x01CB),
},
{ .channel = 146,
.freq = 5730, /* MHz */
.unk2 = 3820,
RADIOREGS(0x71, 0x3D, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08F8, 0x08F4, 0x08F0, 0x01C9, 0x01C9, 0x01CA),
},
{ .channel = 147,
.freq = 5735, /* MHz */
.unk2 = 3823,
RADIOREGS(0x72, 0x7B, 0x04, 0x02, 0x03, 0x01, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08FA, 0x08F6, 0x08F2, 0x01C8, 0x01C9, 0x01CA),
},
{ .channel = 148,
.freq = 5740, /* MHz */
.unk2 = 3827,
RADIOREGS(0x71, 0x3E, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08FC, 0x08F8, 0x08F4, 0x01C8, 0x01C9, 0x01C9),
},
{ .channel = 149,
.freq = 5745, /* MHz */
.unk2 = 3830,
RADIOREGS(0x72, 0x7D, 0x04, 0x02, 0xFE, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x08FE, 0x08FA, 0x08F6, 0x01C8, 0x01C8, 0x01C9),
},
{ .channel = 150,
.freq = 5750, /* MHz */
.unk2 = 3833,
RADIOREGS(0x71, 0x3F, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0900, 0x08FC, 0x08F8, 0x01C7, 0x01C8, 0x01C9),
},
{ .channel = 151,
.freq = 5755, /* MHz */
.unk2 = 3837,
RADIOREGS(0x72, 0x7F, 0x04, 0x02, 0xFE, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0902, 0x08FE, 0x08FA, 0x01C7, 0x01C8, 0x01C8),
},
{ .channel = 152,
.freq = 5760, /* MHz */
.unk2 = 3840,
RADIOREGS(0x71, 0x40, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0904, 0x0900, 0x08FC, 0x01C6, 0x01C7, 0x01C8),
},
{ .channel = 153,
.freq = 5765, /* MHz */
.unk2 = 3843,
RADIOREGS(0x72, 0x81, 0x04, 0x02, 0xF8, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0906, 0x0902, 0x08FE, 0x01C6, 0x01C7, 0x01C8),
},
{ .channel = 154,
.freq = 5770, /* MHz */
.unk2 = 3847,
RADIOREGS(0x71, 0x41, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0908, 0x0904, 0x0900, 0x01C6, 0x01C6, 0x01C7),
},
{ .channel = 155,
.freq = 5775, /* MHz */
.unk2 = 3850,
RADIOREGS(0x72, 0x83, 0x04, 0x02, 0xF8, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x090A, 0x0906, 0x0902, 0x01C5, 0x01C6, 0x01C7),
},
{ .channel = 156,
.freq = 5780, /* MHz */
.unk2 = 3853,
RADIOREGS(0x71, 0x42, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x090C, 0x0908, 0x0904, 0x01C5, 0x01C6, 0x01C6),
},
{ .channel = 157,
.freq = 5785, /* MHz */
.unk2 = 3857,
RADIOREGS(0x72, 0x85, 0x04, 0x02, 0xF2, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x090E, 0x090A, 0x0906, 0x01C4, 0x01C5, 0x01C6),
},
{ .channel = 158,
.freq = 5790, /* MHz */
.unk2 = 3860,
RADIOREGS(0x71, 0x43, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0910, 0x090C, 0x0908, 0x01C4, 0x01C5, 0x01C6),
},
{ .channel = 159,
.freq = 5795, /* MHz */
.unk2 = 3863,
RADIOREGS(0x72, 0x87, 0x04, 0x02, 0xF2, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0912, 0x090E, 0x090A, 0x01C4, 0x01C4, 0x01C5),
},
{ .channel = 160,
.freq = 5800, /* MHz */
.unk2 = 3867,
RADIOREGS(0x71, 0x44, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0914, 0x0910, 0x090C, 0x01C3, 0x01C4, 0x01C5),
},
{ .channel = 161,
.freq = 5805, /* MHz */
.unk2 = 3870,
RADIOREGS(0x72, 0x89, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0916, 0x0912, 0x090E, 0x01C3, 0x01C4, 0x01C4),
},
{ .channel = 162,
.freq = 5810, /* MHz */
.unk2 = 3873,
RADIOREGS(0x71, 0x45, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0918, 0x0914, 0x0910, 0x01C2, 0x01C3, 0x01C4),
},
{ .channel = 163,
.freq = 5815, /* MHz */
.unk2 = 3877,
RADIOREGS(0x72, 0x8B, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x091A, 0x0916, 0x0912, 0x01C2, 0x01C3, 0x01C4),
},
{ .channel = 164,
.freq = 5820, /* MHz */
.unk2 = 3880,
RADIOREGS(0x71, 0x46, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x091C, 0x0918, 0x0914, 0x01C2, 0x01C2, 0x01C3),
},
{ .channel = 165,
.freq = 5825, /* MHz */
.unk2 = 3883,
RADIOREGS(0x72, 0x8D, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x091E, 0x091A, 0x0916, 0x01C1, 0x01C2, 0x01C3),
},
{ .channel = 166,
.freq = 5830, /* MHz */
.unk2 = 3887,
RADIOREGS(0x71, 0x47, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0920, 0x091C, 0x0918, 0x01C1, 0x01C2, 0x01C2),
},
{ .channel = 168,
.freq = 5840, /* MHz */
.unk2 = 3893,
RADIOREGS(0x71, 0x48, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0924, 0x0920, 0x091C, 0x01C0, 0x01C1, 0x01C2),
},
{ .channel = 170,
.freq = 5850, /* MHz */
.unk2 = 3900,
RADIOREGS(0x71, 0x49, 0x02, 0x01, 0xE0, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0928, 0x0924, 0x0920, 0x01BF, 0x01C0, 0x01C1),
},
{ .channel = 172,
.freq = 5860, /* MHz */
.unk2 = 3907,
RADIOREGS(0x71, 0x4A, 0x02, 0x01, 0xDE, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x092C, 0x0928, 0x0924, 0x01BF, 0x01BF, 0x01C0),
},
{ .channel = 174,
.freq = 5870, /* MHz */
.unk2 = 3913,
RADIOREGS(0x71, 0x4B, 0x02, 0x00, 0xDB, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0930, 0x092C, 0x0928, 0x01BE, 0x01BF, 0x01BF),
},
{ .channel = 176,
.freq = 5880, /* MHz */
.unk2 = 3920,
RADIOREGS(0x71, 0x4C, 0x02, 0x00, 0xD8, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0934, 0x0930, 0x092C, 0x01BD, 0x01BE, 0x01BF),
},
{ .channel = 178,
.freq = 5890, /* MHz */
.unk2 = 3927,
RADIOREGS(0x71, 0x4D, 0x02, 0x00, 0xD6, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0938, 0x0934, 0x0930, 0x01BC, 0x01BD, 0x01BE),
},
{ .channel = 180,
.freq = 5900, /* MHz */
.unk2 = 3933,
RADIOREGS(0x71, 0x4E, 0x02, 0x00, 0xD3, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x093C, 0x0938, 0x0934, 0x01BC, 0x01BC, 0x01BD),
},
{ .channel = 182,
.freq = 5910, /* MHz */
.unk2 = 3940,
RADIOREGS(0x71, 0x4F, 0x02, 0x00, 0xD6, 0x00, 0x04, 0x0A,
0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
PHYREGS(0x0940, 0x093C, 0x0938, 0x01BB, 0x01BC, 0x01BC),
},
{ .channel = 1,
.freq = 2412, /* MHz */
.unk2 = 3216,
RADIOREGS(0x73, 0x6C, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C,
0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80),
PHYREGS(0x03C9, 0x03C5, 0x03C1, 0x043A, 0x043F, 0x0443),
},
{ .channel = 2,
.freq = 2417, /* MHz */
.unk2 = 3223,
RADIOREGS(0x73, 0x71, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B,
0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80),
PHYREGS(0x03CB, 0x03C7, 0x03C3, 0x0438, 0x043D, 0x0441),
},
{ .channel = 3,
.freq = 2422, /* MHz */
.unk2 = 3229,
RADIOREGS(0x73, 0x76, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
PHYREGS(0x03CD, 0x03C9, 0x03C5, 0x0436, 0x043A, 0x043F),
},
{ .channel = 4,
.freq = 2427, /* MHz */
.unk2 = 3236,
RADIOREGS(0x73, 0x7B, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
PHYREGS(0x03CF, 0x03CB, 0x03C7, 0x0434, 0x0438, 0x043D),
},
{ .channel = 5,
.freq = 2432, /* MHz */
.unk2 = 3243,
RADIOREGS(0x73, 0x80, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09,
0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80),
PHYREGS(0x03D1, 0x03CD, 0x03C9, 0x0431, 0x0436, 0x043A),
},
{ .channel = 6,
.freq = 2437, /* MHz */
.unk2 = 3249,
RADIOREGS(0x73, 0x85, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08,
0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80),
PHYREGS(0x03D3, 0x03CF, 0x03CB, 0x042F, 0x0434, 0x0438),
},
{ .channel = 7,
.freq = 2442, /* MHz */
.unk2 = 3256,
RADIOREGS(0x73, 0x8A, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07,
0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80),
PHYREGS(0x03D5, 0x03D1, 0x03CD, 0x042D, 0x0431, 0x0436),
},
{ .channel = 8,
.freq = 2447, /* MHz */
.unk2 = 3263,
RADIOREGS(0x73, 0x8F, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06,
0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80),
PHYREGS(0x03D7, 0x03D3, 0x03CF, 0x042B, 0x042F, 0x0434),
},
{ .channel = 9,
.freq = 2452, /* MHz */
.unk2 = 3269,
RADIOREGS(0x73, 0x94, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06,
0x80, 0xFF, 0x88, 0x09, 0x06, 0x80),
PHYREGS(0x03D9, 0x03D5, 0x03D1, 0x0429, 0x042D, 0x0431),
},
{ .channel = 10,
.freq = 2457, /* MHz */
.unk2 = 3276,
RADIOREGS(0x73, 0x99, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05,
0x80, 0xFF, 0x88, 0x08, 0x05, 0x80),
PHYREGS(0x03DB, 0x03D7, 0x03D3, 0x0427, 0x042B, 0x042F),
},
{ .channel = 11,
.freq = 2462, /* MHz */
.unk2 = 3283,
RADIOREGS(0x73, 0x9E, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04,
0x80, 0xFF, 0x88, 0x08, 0x04, 0x80),
PHYREGS(0x03DD, 0x03D9, 0x03D5, 0x0424, 0x0429, 0x042D),
},
{ .channel = 12,
.freq = 2467, /* MHz */
.unk2 = 3289,
RADIOREGS(0x73, 0xA3, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03,
0x80, 0xFF, 0x88, 0x08, 0x03, 0x80),
PHYREGS(0x03DF, 0x03DB, 0x03D7, 0x0422, 0x0427, 0x042B),
},
{ .channel = 13,
.freq = 2472, /* MHz */
.unk2 = 3296,
RADIOREGS(0x73, 0xA8, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03,
0x80, 0xFF, 0x88, 0x07, 0x03, 0x80),
PHYREGS(0x03E1, 0x03DD, 0x03D9, 0x0420, 0x0424, 0x0429),
},
{ .channel = 14,
.freq = 2484, /* MHz */
.unk2 = 3312,
RADIOREGS(0x73, 0xB4, 0x09, 0x0F, 0xFF, 0x01, 0x07, 0x15,
0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01,
0x80, 0xFF, 0x88, 0x07, 0x01, 0x80),
PHYREGS(0x03E6, 0x03E2, 0x03DE, 0x041B, 0x041F, 0x0424),
},
};
void b2055_upload_inittab(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag)
{
const struct b2055_inittab_entry *e;
unsigned int i, writes = 0;
u16 value;
for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
e = &(b2055_inittab[i]);
if (!(e->flags & B2055_INITTAB_ENTRY_OK))
continue;
if ((e->flags & B2055_INITTAB_UPLOAD) || ignore_uploadflag) {
if (ghz5)
value = e->ghz5;
else
value = e->ghz2;
b43_radio_write16(dev, i, value);
if (++writes % 4 == 0)
b43_read32(dev, B43_MMIO_MACCTL); /* flush */
}
}
}
const struct b43_nphy_channeltab_entry_rev2 *
b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
{
const struct b43_nphy_channeltab_entry_rev2 *e;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab_rev2); i++) {
e = &(b43_nphy_channeltab_rev2[i]);
if (e->channel == channel)
return e;
}
return NULL;
}
|
gpl-2.0
|
moonlightly/AK-OnePone
|
drivers/char/agp/frontend.c
|
10491
|
24472
|
/*
* AGPGART driver frontend
* Copyright (C) 2004 Silicon Graphics, Inc.
* Copyright (C) 2002-2003 Dave Jones
* Copyright (C) 1999 Jeff Hartmann
* Copyright (C) 1999 Precision Insight, Inc.
* Copyright (C) 1999 Xi Graphics, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
* OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/agp_backend.h>
#include <linux/agpgart.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include "agp.h"
struct agp_front_data agp_fe;
struct agp_memory *agp_find_mem_by_key(int key)
{
struct agp_memory *curr;
if (agp_fe.current_controller == NULL)
return NULL;
curr = agp_fe.current_controller->pool;
while (curr != NULL) {
if (curr->key == key)
break;
curr = curr->next;
}
DBG("key=%d -> mem=%p", key, curr);
return curr;
}
static void agp_remove_from_pool(struct agp_memory *temp)
{
struct agp_memory *prev;
struct agp_memory *next;
/* Check to see if this is even in the memory pool */
DBG("mem=%p", temp);
if (agp_find_mem_by_key(temp->key) != NULL) {
next = temp->next;
prev = temp->prev;
if (prev != NULL) {
prev->next = next;
if (next != NULL)
next->prev = prev;
} else {
/* This is the first item on the list */
if (next != NULL)
next->prev = NULL;
agp_fe.current_controller->pool = next;
}
}
}
/*
* Routines for managing each client's segment list -
* These routines handle adding and removing segments
* to each auth'ed client.
*/
static struct
agp_segment_priv *agp_find_seg_in_client(const struct agp_client *client,
unsigned long offset,
int size, pgprot_t page_prot)
{
struct agp_segment_priv *seg;
int num_segments, i;
off_t pg_start;
size_t pg_count;
pg_start = offset / 4096;
pg_count = size / 4096;
seg = *(client->segments);
num_segments = client->num_segments;
for (i = 0; i < client->num_segments; i++) {
if ((seg[i].pg_start == pg_start) &&
(seg[i].pg_count == pg_count) &&
(pgprot_val(seg[i].prot) == pgprot_val(page_prot))) {
return seg + i;
}
}
return NULL;
}
static void agp_remove_seg_from_client(struct agp_client *client)
{
DBG("client=%p", client);
if (client->segments != NULL) {
if (*(client->segments) != NULL) {
DBG("Freeing %p from client %p", *(client->segments), client);
kfree(*(client->segments));
}
DBG("Freeing %p from client %p", client->segments, client);
kfree(client->segments);
client->segments = NULL;
}
}
static void agp_add_seg_to_client(struct agp_client *client,
struct agp_segment_priv ** seg, int num_segments)
{
struct agp_segment_priv **prev_seg;
prev_seg = client->segments;
if (prev_seg != NULL)
agp_remove_seg_from_client(client);
DBG("Adding seg %p (%d segments) to client %p", seg, num_segments, client);
client->num_segments = num_segments;
client->segments = seg;
}
static pgprot_t agp_convert_mmap_flags(int prot)
{
unsigned long prot_bits;
prot_bits = calc_vm_prot_bits(prot) | VM_SHARED;
return vm_get_page_prot(prot_bits);
}
int agp_create_segment(struct agp_client *client, struct agp_region *region)
{
struct agp_segment_priv **ret_seg;
struct agp_segment_priv *seg;
struct agp_segment *user_seg;
size_t i;
seg = kzalloc((sizeof(struct agp_segment_priv) * region->seg_count), GFP_KERNEL);
if (seg == NULL) {
kfree(region->seg_list);
region->seg_list = NULL;
return -ENOMEM;
}
user_seg = region->seg_list;
for (i = 0; i < region->seg_count; i++) {
seg[i].pg_start = user_seg[i].pg_start;
seg[i].pg_count = user_seg[i].pg_count;
seg[i].prot = agp_convert_mmap_flags(user_seg[i].prot);
}
kfree(region->seg_list);
region->seg_list = NULL;
ret_seg = kmalloc(sizeof(void *), GFP_KERNEL);
if (ret_seg == NULL) {
kfree(seg);
return -ENOMEM;
}
*ret_seg = seg;
agp_add_seg_to_client(client, ret_seg, region->seg_count);
return 0;
}
/* End - Routines for managing each client's segment list */
/* This function must only be called when current_controller != NULL */
static void agp_insert_into_pool(struct agp_memory * temp)
{
struct agp_memory *prev;
prev = agp_fe.current_controller->pool;
if (prev != NULL) {
prev->prev = temp;
temp->next = prev;
}
agp_fe.current_controller->pool = temp;
}
/* File private list routines */
struct agp_file_private *agp_find_private(pid_t pid)
{
struct agp_file_private *curr;
curr = agp_fe.file_priv_list;
while (curr != NULL) {
if (curr->my_pid == pid)
return curr;
curr = curr->next;
}
return NULL;
}
static void agp_insert_file_private(struct agp_file_private * priv)
{
struct agp_file_private *prev;
prev = agp_fe.file_priv_list;
if (prev != NULL)
prev->prev = priv;
priv->next = prev;
agp_fe.file_priv_list = priv;
}
static void agp_remove_file_private(struct agp_file_private * priv)
{
struct agp_file_private *next;
struct agp_file_private *prev;
next = priv->next;
prev = priv->prev;
if (prev != NULL) {
prev->next = next;
if (next != NULL)
next->prev = prev;
} else {
if (next != NULL)
next->prev = NULL;
agp_fe.file_priv_list = next;
}
}
/* End - File flag list routines */
/*
* Wrappers for agp_free_memory & agp_allocate_memory
* These make sure that internal lists are kept updated.
*/
void agp_free_memory_wrap(struct agp_memory *memory)
{
agp_remove_from_pool(memory);
agp_free_memory(memory);
}
struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type)
{
struct agp_memory *memory;
memory = agp_allocate_memory(agp_bridge, pg_count, type);
if (memory == NULL)
return NULL;
agp_insert_into_pool(memory);
return memory;
}
/* Routines for managing the list of controllers -
* These routines manage the current controller, and the list of
* controllers
*/
static struct agp_controller *agp_find_controller_by_pid(pid_t id)
{
struct agp_controller *controller;
controller = agp_fe.controllers;
while (controller != NULL) {
if (controller->pid == id)
return controller;
controller = controller->next;
}
return NULL;
}
static struct agp_controller *agp_create_controller(pid_t id)
{
struct agp_controller *controller;
controller = kzalloc(sizeof(struct agp_controller), GFP_KERNEL);
if (controller == NULL)
return NULL;
controller->pid = id;
return controller;
}
static int agp_insert_controller(struct agp_controller *controller)
{
struct agp_controller *prev_controller;
prev_controller = agp_fe.controllers;
controller->next = prev_controller;
if (prev_controller != NULL)
prev_controller->prev = controller;
agp_fe.controllers = controller;
return 0;
}
static void agp_remove_all_clients(struct agp_controller *controller)
{
struct agp_client *client;
struct agp_client *temp;
client = controller->clients;
while (client) {
struct agp_file_private *priv;
temp = client;
agp_remove_seg_from_client(temp);
priv = agp_find_private(temp->pid);
if (priv != NULL) {
clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
clear_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
}
client = client->next;
kfree(temp);
}
}
static void agp_remove_all_memory(struct agp_controller *controller)
{
struct agp_memory *memory;
struct agp_memory *temp;
memory = controller->pool;
while (memory) {
temp = memory;
memory = memory->next;
agp_free_memory_wrap(temp);
}
}
static int agp_remove_controller(struct agp_controller *controller)
{
struct agp_controller *prev_controller;
struct agp_controller *next_controller;
prev_controller = controller->prev;
next_controller = controller->next;
if (prev_controller != NULL) {
prev_controller->next = next_controller;
if (next_controller != NULL)
next_controller->prev = prev_controller;
} else {
if (next_controller != NULL)
next_controller->prev = NULL;
agp_fe.controllers = next_controller;
}
agp_remove_all_memory(controller);
agp_remove_all_clients(controller);
if (agp_fe.current_controller == controller) {
agp_fe.current_controller = NULL;
agp_fe.backend_acquired = false;
agp_backend_release(agp_bridge);
}
kfree(controller);
return 0;
}
static void agp_controller_make_current(struct agp_controller *controller)
{
struct agp_client *clients;
clients = controller->clients;
while (clients != NULL) {
struct agp_file_private *priv;
priv = agp_find_private(clients->pid);
if (priv != NULL) {
set_bit(AGP_FF_IS_VALID, &priv->access_flags);
set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
}
clients = clients->next;
}
agp_fe.current_controller = controller;
}
static void agp_controller_release_current(struct agp_controller *controller,
struct agp_file_private *controller_priv)
{
struct agp_client *clients;
clear_bit(AGP_FF_IS_VALID, &controller_priv->access_flags);
clients = controller->clients;
while (clients != NULL) {
struct agp_file_private *priv;
priv = agp_find_private(clients->pid);
if (priv != NULL)
clear_bit(AGP_FF_IS_VALID, &priv->access_flags);
clients = clients->next;
}
agp_fe.current_controller = NULL;
agp_fe.used_by_controller = false;
agp_backend_release(agp_bridge);
}
/*
* Routines for managing client lists -
* These routines are for managing the list of auth'ed clients.
*/
static struct agp_client
*agp_find_client_in_controller(struct agp_controller *controller, pid_t id)
{
struct agp_client *client;
if (controller == NULL)
return NULL;
client = controller->clients;
while (client != NULL) {
if (client->pid == id)
return client;
client = client->next;
}
return NULL;
}
static struct agp_controller *agp_find_controller_for_client(pid_t id)
{
struct agp_controller *controller;
controller = agp_fe.controllers;
while (controller != NULL) {
if ((agp_find_client_in_controller(controller, id)) != NULL)
return controller;
controller = controller->next;
}
return NULL;
}
struct agp_client *agp_find_client_by_pid(pid_t id)
{
struct agp_client *temp;
if (agp_fe.current_controller == NULL)
return NULL;
temp = agp_find_client_in_controller(agp_fe.current_controller, id);
return temp;
}
static void agp_insert_client(struct agp_client *client)
{
struct agp_client *prev_client;
prev_client = agp_fe.current_controller->clients;
client->next = prev_client;
if (prev_client != NULL)
prev_client->prev = client;
agp_fe.current_controller->clients = client;
agp_fe.current_controller->num_clients++;
}
struct agp_client *agp_create_client(pid_t id)
{
struct agp_client *new_client;
new_client = kzalloc(sizeof(struct agp_client), GFP_KERNEL);
if (new_client == NULL)
return NULL;
new_client->pid = id;
agp_insert_client(new_client);
return new_client;
}
int agp_remove_client(pid_t id)
{
struct agp_client *client;
struct agp_client *prev_client;
struct agp_client *next_client;
struct agp_controller *controller;
controller = agp_find_controller_for_client(id);
if (controller == NULL)
return -EINVAL;
client = agp_find_client_in_controller(controller, id);
if (client == NULL)
return -EINVAL;
prev_client = client->prev;
next_client = client->next;
if (prev_client != NULL) {
prev_client->next = next_client;
if (next_client != NULL)
next_client->prev = prev_client;
} else {
if (next_client != NULL)
next_client->prev = NULL;
controller->clients = next_client;
}
controller->num_clients--;
agp_remove_seg_from_client(client);
kfree(client);
return 0;
}
/* End - Routines for managing client lists */
/* File Operations */
static int agp_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned int size, current_size;
unsigned long offset;
struct agp_client *client;
struct agp_file_private *priv = file->private_data;
struct agp_kern_info kerninfo;
mutex_lock(&(agp_fe.agp_mutex));
if (agp_fe.backend_acquired != true)
goto out_eperm;
if (!(test_bit(AGP_FF_IS_VALID, &priv->access_flags)))
goto out_eperm;
agp_copy_info(agp_bridge, &kerninfo);
size = vma->vm_end - vma->vm_start;
current_size = kerninfo.aper_size;
current_size = current_size * 0x100000;
offset = vma->vm_pgoff << PAGE_SHIFT;
DBG("%lx:%lx", offset, offset+size);
if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags)) {
if ((size + offset) > current_size)
goto out_inval;
client = agp_find_client_by_pid(current->pid);
if (client == NULL)
goto out_eperm;
if (!agp_find_seg_in_client(client, offset, size, vma->vm_page_prot))
goto out_inval;
DBG("client vm_ops=%p", kerninfo.vm_ops);
if (kerninfo.vm_ops) {
vma->vm_ops = kerninfo.vm_ops;
} else if (io_remap_pfn_range(vma, vma->vm_start,
(kerninfo.aper_base + offset) >> PAGE_SHIFT,
size, vma->vm_page_prot)) {
goto out_again;
}
mutex_unlock(&(agp_fe.agp_mutex));
return 0;
}
if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
if (size != current_size)
goto out_inval;
DBG("controller vm_ops=%p", kerninfo.vm_ops);
if (kerninfo.vm_ops) {
vma->vm_ops = kerninfo.vm_ops;
} else if (io_remap_pfn_range(vma, vma->vm_start,
kerninfo.aper_base >> PAGE_SHIFT,
size, vma->vm_page_prot)) {
goto out_again;
}
mutex_unlock(&(agp_fe.agp_mutex));
return 0;
}
out_eperm:
mutex_unlock(&(agp_fe.agp_mutex));
return -EPERM;
out_inval:
mutex_unlock(&(agp_fe.agp_mutex));
return -EINVAL;
out_again:
mutex_unlock(&(agp_fe.agp_mutex));
return -EAGAIN;
}
static int agp_release(struct inode *inode, struct file *file)
{
struct agp_file_private *priv = file->private_data;
mutex_lock(&(agp_fe.agp_mutex));
DBG("priv=%p", priv);
if (test_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags)) {
struct agp_controller *controller;
controller = agp_find_controller_by_pid(priv->my_pid);
if (controller != NULL) {
if (controller == agp_fe.current_controller)
agp_controller_release_current(controller, priv);
agp_remove_controller(controller);
controller = NULL;
}
}
if (test_bit(AGP_FF_IS_CLIENT, &priv->access_flags))
agp_remove_client(priv->my_pid);
agp_remove_file_private(priv);
kfree(priv);
file->private_data = NULL;
mutex_unlock(&(agp_fe.agp_mutex));
return 0;
}
static int agp_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct agp_file_private *priv;
struct agp_client *client;
if (minor != AGPGART_MINOR)
return -ENXIO;
mutex_lock(&(agp_fe.agp_mutex));
priv = kzalloc(sizeof(struct agp_file_private), GFP_KERNEL);
if (priv == NULL) {
mutex_unlock(&(agp_fe.agp_mutex));
return -ENOMEM;
}
set_bit(AGP_FF_ALLOW_CLIENT, &priv->access_flags);
priv->my_pid = current->pid;
if (capable(CAP_SYS_RAWIO))
/* Root priv, can be controller */
set_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags);
client = agp_find_client_by_pid(current->pid);
if (client != NULL) {
set_bit(AGP_FF_IS_CLIENT, &priv->access_flags);
set_bit(AGP_FF_IS_VALID, &priv->access_flags);
}
file->private_data = (void *) priv;
agp_insert_file_private(priv);
DBG("private=%p, client=%p", priv, client);
mutex_unlock(&(agp_fe.agp_mutex));
return 0;
}
static ssize_t agp_read(struct file *file, char __user *buf,
size_t count, loff_t * ppos)
{
return -EINVAL;
}
static ssize_t agp_write(struct file *file, const char __user *buf,
size_t count, loff_t * ppos)
{
return -EINVAL;
}
static int agpioc_info_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_info userinfo;
struct agp_kern_info kerninfo;
agp_copy_info(agp_bridge, &kerninfo);
userinfo.version.major = kerninfo.version.major;
userinfo.version.minor = kerninfo.version.minor;
userinfo.bridge_id = kerninfo.device->vendor |
(kerninfo.device->device << 16);
userinfo.agp_mode = kerninfo.mode;
userinfo.aper_base = kerninfo.aper_base;
userinfo.aper_size = kerninfo.aper_size;
userinfo.pg_total = userinfo.pg_system = kerninfo.max_memory;
userinfo.pg_used = kerninfo.current_memory;
if (copy_to_user(arg, &userinfo, sizeof(struct agp_info)))
return -EFAULT;
return 0;
}
int agpioc_acquire_wrap(struct agp_file_private *priv)
{
struct agp_controller *controller;
DBG("");
if (!(test_bit(AGP_FF_ALLOW_CONTROLLER, &priv->access_flags)))
return -EPERM;
if (agp_fe.current_controller != NULL)
return -EBUSY;
if (!agp_bridge)
return -ENODEV;
if (atomic_read(&agp_bridge->agp_in_use))
return -EBUSY;
atomic_inc(&agp_bridge->agp_in_use);
agp_fe.backend_acquired = true;
controller = agp_find_controller_by_pid(priv->my_pid);
if (controller != NULL) {
agp_controller_make_current(controller);
} else {
controller = agp_create_controller(priv->my_pid);
if (controller == NULL) {
agp_fe.backend_acquired = false;
agp_backend_release(agp_bridge);
return -ENOMEM;
}
agp_insert_controller(controller);
agp_controller_make_current(controller);
}
set_bit(AGP_FF_IS_CONTROLLER, &priv->access_flags);
set_bit(AGP_FF_IS_VALID, &priv->access_flags);
return 0;
}
int agpioc_release_wrap(struct agp_file_private *priv)
{
DBG("");
agp_controller_release_current(agp_fe.current_controller, priv);
return 0;
}
int agpioc_setup_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_setup mode;
DBG("");
if (copy_from_user(&mode, arg, sizeof(struct agp_setup)))
return -EFAULT;
agp_enable(agp_bridge, mode.agp_mode);
return 0;
}
static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_region reserve;
struct agp_client *client;
struct agp_file_private *client_priv;
DBG("");
if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
return -EFAULT;
if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
return -EFAULT;
client = agp_find_client_by_pid(reserve.pid);
if (reserve.seg_count == 0) {
/* remove a client */
client_priv = agp_find_private(reserve.pid);
if (client_priv != NULL) {
set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
}
if (client == NULL) {
/* client is already removed */
return 0;
}
return agp_remove_client(reserve.pid);
} else {
struct agp_segment *segment;
if (reserve.seg_count >= 16384)
return -EINVAL;
segment = kmalloc((sizeof(struct agp_segment) * reserve.seg_count),
GFP_KERNEL);
if (segment == NULL)
return -ENOMEM;
if (copy_from_user(segment, (void __user *) reserve.seg_list,
sizeof(struct agp_segment) * reserve.seg_count)) {
kfree(segment);
return -EFAULT;
}
reserve.seg_list = segment;
if (client == NULL) {
/* Create the client and add the segment */
client = agp_create_client(reserve.pid);
if (client == NULL) {
kfree(segment);
return -ENOMEM;
}
client_priv = agp_find_private(reserve.pid);
if (client_priv != NULL) {
set_bit(AGP_FF_IS_CLIENT, &client_priv->access_flags);
set_bit(AGP_FF_IS_VALID, &client_priv->access_flags);
}
}
return agp_create_segment(client, &reserve);
}
/* Will never really happen */
return -EINVAL;
}
int agpioc_protect_wrap(struct agp_file_private *priv)
{
DBG("");
/* This function is not currently implemented */
return -EINVAL;
}
static int agpioc_allocate_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_memory *memory;
struct agp_allocate alloc;
DBG("");
if (copy_from_user(&alloc, arg, sizeof(struct agp_allocate)))
return -EFAULT;
if (alloc.type >= AGP_USER_TYPES)
return -EINVAL;
memory = agp_allocate_memory_wrap(alloc.pg_count, alloc.type);
if (memory == NULL)
return -ENOMEM;
alloc.key = memory->key;
alloc.physical = memory->physical;
if (copy_to_user(arg, &alloc, sizeof(struct agp_allocate))) {
agp_free_memory_wrap(memory);
return -EFAULT;
}
return 0;
}
int agpioc_deallocate_wrap(struct agp_file_private *priv, int arg)
{
struct agp_memory *memory;
DBG("");
memory = agp_find_mem_by_key(arg);
if (memory == NULL)
return -EINVAL;
agp_free_memory_wrap(memory);
return 0;
}
static int agpioc_bind_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_bind bind_info;
struct agp_memory *memory;
DBG("");
if (copy_from_user(&bind_info, arg, sizeof(struct agp_bind)))
return -EFAULT;
memory = agp_find_mem_by_key(bind_info.key);
if (memory == NULL)
return -EINVAL;
return agp_bind_memory(memory, bind_info.pg_start);
}
static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
{
struct agp_memory *memory;
struct agp_unbind unbind;
DBG("");
if (copy_from_user(&unbind, arg, sizeof(struct agp_unbind)))
return -EFAULT;
memory = agp_find_mem_by_key(unbind.key);
if (memory == NULL)
return -EINVAL;
return agp_unbind_memory(memory);
}
static long agp_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct agp_file_private *curr_priv = file->private_data;
int ret_val = -ENOTTY;
DBG("priv=%p, cmd=%x", curr_priv, cmd);
mutex_lock(&(agp_fe.agp_mutex));
if ((agp_fe.current_controller == NULL) &&
(cmd != AGPIOC_ACQUIRE)) {
ret_val = -EINVAL;
goto ioctl_out;
}
if ((agp_fe.backend_acquired != true) &&
(cmd != AGPIOC_ACQUIRE)) {
ret_val = -EBUSY;
goto ioctl_out;
}
if (cmd != AGPIOC_ACQUIRE) {
if (!(test_bit(AGP_FF_IS_CONTROLLER, &curr_priv->access_flags))) {
ret_val = -EPERM;
goto ioctl_out;
}
/* Use the original pid of the controller,
* in case it's threaded */
if (agp_fe.current_controller->pid != curr_priv->my_pid) {
ret_val = -EBUSY;
goto ioctl_out;
}
}
switch (cmd) {
case AGPIOC_INFO:
ret_val = agpioc_info_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_ACQUIRE:
ret_val = agpioc_acquire_wrap(curr_priv);
break;
case AGPIOC_RELEASE:
ret_val = agpioc_release_wrap(curr_priv);
break;
case AGPIOC_SETUP:
ret_val = agpioc_setup_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_RESERVE:
ret_val = agpioc_reserve_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_PROTECT:
ret_val = agpioc_protect_wrap(curr_priv);
break;
case AGPIOC_ALLOCATE:
ret_val = agpioc_allocate_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_DEALLOCATE:
ret_val = agpioc_deallocate_wrap(curr_priv, (int) arg);
break;
case AGPIOC_BIND:
ret_val = agpioc_bind_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_UNBIND:
ret_val = agpioc_unbind_wrap(curr_priv, (void __user *) arg);
break;
case AGPIOC_CHIPSET_FLUSH:
break;
}
ioctl_out:
DBG("ioctl returns %d\n", ret_val);
mutex_unlock(&(agp_fe.agp_mutex));
return ret_val;
}
static const struct file_operations agp_fops =
{
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = agp_read,
.write = agp_write,
.unlocked_ioctl = agp_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_agp_ioctl,
#endif
.mmap = agp_mmap,
.open = agp_open,
.release = agp_release,
};
static struct miscdevice agp_miscdev =
{
.minor = AGPGART_MINOR,
.name = "agpgart",
.fops = &agp_fops
};
int agp_frontend_initialize(void)
{
memset(&agp_fe, 0, sizeof(struct agp_front_data));
mutex_init(&(agp_fe.agp_mutex));
if (misc_register(&agp_miscdev)) {
printk(KERN_ERR PFX "unable to get minor: %d\n", AGPGART_MINOR);
return -EIO;
}
return 0;
}
void agp_frontend_cleanup(void)
{
misc_deregister(&agp_miscdev);
}
|
gpl-2.0
|
davidmueller13/kernel_samsung_trelte
|
drivers/hid/hid-lg4ff.c
|
252
|
19533
|
/*
* Force feedback support for Logitech Gaming Wheels
*
* Including G27, G25, DFP, DFGT, FFEX, Momo, Momo2 &
* Speed Force Wireless (WiiWheel)
*
* Copyright (c) 2010 Simon Wood <simon@mungewell.org>
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/hid.h>
#include "usbhid/usbhid.h"
#include "hid-lg.h"
#include "hid-ids.h"
#define DFGT_REV_MAJ 0x13
#define DFGT_REV_MIN 0x22
#define DFGT2_REV_MIN 0x26
#define DFP_REV_MAJ 0x11
#define DFP_REV_MIN 0x06
#define FFEX_REV_MAJ 0x21
#define FFEX_REV_MIN 0x00
#define G25_REV_MAJ 0x12
#define G25_REV_MIN 0x22
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range);
static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range);
static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf);
static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count);
static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store);
struct lg4ff_device_entry {
__u32 product_id;
__u16 range;
__u16 min_range;
__u16 max_range;
#ifdef CONFIG_LEDS_CLASS
__u8 led_state;
struct led_classdev *led[5];
#endif
struct list_head list;
void (*set_range)(struct hid_device *hid, u16 range);
};
static const signed short lg4ff_wheel_effects[] = {
FF_CONSTANT,
FF_AUTOCENTER,
-1
};
struct lg4ff_wheel {
const __u32 product_id;
const signed short *ff_effects;
const __u16 min_range;
const __u16 max_range;
void (*set_range)(struct hid_device *hid, u16 range);
};
static const struct lg4ff_wheel lg4ff_devices[] = {
{USB_DEVICE_ID_LOGITECH_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_DFP_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_dfp},
{USB_DEVICE_ID_LOGITECH_G25_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
{USB_DEVICE_ID_LOGITECH_DFGT_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
{USB_DEVICE_ID_LOGITECH_G27_WHEEL, lg4ff_wheel_effects, 40, 900, hid_lg4ff_set_range_g25},
{USB_DEVICE_ID_LOGITECH_MOMO_WHEEL2, lg4ff_wheel_effects, 40, 270, NULL},
{USB_DEVICE_ID_LOGITECH_WII_WHEEL, lg4ff_wheel_effects, 40, 270, NULL}
};
struct lg4ff_native_cmd {
const __u8 cmd_num; /* Number of commands to send */
const __u8 cmd[];
};
struct lg4ff_usb_revision {
const __u16 rev_maj;
const __u16 rev_min;
const struct lg4ff_native_cmd *command;
};
static const struct lg4ff_native_cmd native_dfp = {
1,
{0xf8, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00}
};
static const struct lg4ff_native_cmd native_dfgt = {
2,
{0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
0xf8, 0x09, 0x03, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
};
static const struct lg4ff_native_cmd native_g25 = {
1,
{0xf8, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00}
};
static const struct lg4ff_native_cmd native_g27 = {
2,
{0xf8, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, /* 1st command */
0xf8, 0x09, 0x04, 0x01, 0x00, 0x00, 0x00} /* 2nd command */
};
static const struct lg4ff_usb_revision lg4ff_revs[] = {
{DFGT_REV_MAJ, DFGT_REV_MIN, &native_dfgt}, /* Driving Force GT */
{DFGT_REV_MAJ, DFGT2_REV_MIN, &native_dfgt}, /* Driving Force GT v2 */
{DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
{G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
{G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
};
/* Recalculates X axis value accordingly to currently selected range */
static __s32 lg4ff_adjust_dfp_x_axis(__s32 value, __u16 range)
{
__u16 max_range;
__s32 new_value;
if (range == 900)
return value;
else if (range == 200)
return value;
else if (range < 200)
max_range = 200;
else
max_range = 900;
new_value = 8192 + mult_frac(value - 8192, max_range, range);
if (new_value < 0)
return 0;
else if (new_value > 16383)
return 16383;
else
return new_value;
}
int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data)
{
struct lg4ff_device_entry *entry = drv_data->device_props;
__s32 new_value = 0;
if (!entry) {
hid_err(hid, "Device properties not found");
return 0;
}
switch (entry->product_id) {
case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
switch (usage->code) {
case ABS_X:
new_value = lg4ff_adjust_dfp_x_axis(value, entry->range);
input_event(field->hidinput->input, usage->type, usage->code, new_value);
return 1;
default:
return 0;
}
default:
return 0;
}
}
static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
int x;
#define CLAMP(x) do { if (x < 0) x = 0; else if (x > 0xff) x = 0xff; } while (0)
switch (effect->type) {
case FF_CONSTANT:
x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */
CLAMP(x);
value[0] = 0x11; /* Slot 1 */
value[1] = 0x08;
value[2] = x;
value[3] = 0x80;
value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
break;
}
return 0;
}
/* Sends default autocentering command compatible with
* all wheels except Formula Force EX */
static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
value[0] = 0xfe;
value[1] = 0x0d;
value[2] = magnitude >> 13;
value[3] = magnitude >> 13;
value[4] = magnitude >> 8;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
}
/* Sends autocentering command compatible with Formula Force EX */
static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude)
{
struct hid_device *hid = input_get_drvdata(dev);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
magnitude = magnitude * 90 / 65535;
value[0] = 0xfe;
value[1] = 0x03;
value[2] = magnitude >> 14;
value[3] = magnitude >> 14;
value[4] = magnitude;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
}
/* Sends command to set range compatible with G25/G27/Driving Force GT */
static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range)
{
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
dbg_hid("G25/G27/DFGT: setting range to %u\n", range);
value[0] = 0xf8;
value[1] = 0x81;
value[2] = range & 0x00ff;
value[3] = (range & 0xff00) >> 8;
value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
}
/* Sends commands to set range compatible with Driving Force Pro wheel */
static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range)
{
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
int start_left, start_right, full_range;
__s32 *value = report->field[0]->value;
dbg_hid("Driving Force Pro: setting range to %u\n", range);
/* Prepare "coarse" limit command */
value[0] = 0xf8;
value[1] = 0x00; /* Set later */
value[2] = 0x00;
value[3] = 0x00;
value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
if (range > 200) {
report->field[0]->value[1] = 0x03;
full_range = 900;
} else {
report->field[0]->value[1] = 0x02;
full_range = 200;
}
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
/* Prepare "fine" limit command */
value[0] = 0x81;
value[1] = 0x0b;
value[2] = 0x00;
value[3] = 0x00;
value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
if (range == 200 || range == 900) { /* Do not apply any fine limit */
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
return;
}
/* Construct fine limit command */
start_left = (((full_range - range + 1) * 2047) / full_range);
start_right = 0xfff - start_left;
value[2] = start_left >> 4;
value[3] = start_right >> 4;
value[4] = 0xff;
value[5] = (start_right & 0xe) << 4 | (start_left & 0xe);
value[6] = 0xff;
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
}
static void hid_lg4ff_switch_native(struct hid_device *hid, const struct lg4ff_native_cmd *cmd)
{
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__u8 i, j;
j = 0;
while (j < 7*cmd->cmd_num) {
for (i = 0; i < 7; i++)
report->field[0]->value[i] = cmd->cmd[j++];
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
}
}
/* Read current range and display it in terminal */
static ssize_t lg4ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hid_device *hid = to_hid_device(dev);
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
size_t count;
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
hid_err(hid, "Private driver data not found!\n");
return 0;
}
entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found!\n");
return 0;
}
count = scnprintf(buf, PAGE_SIZE, "%u\n", entry->range);
return count;
}
/* Set range to user specified value, call appropriate function
* according to the type of the wheel */
static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct hid_device *hid = to_hid_device(dev);
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
__u16 range = simple_strtoul(buf, NULL, 10);
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
hid_err(hid, "Private driver data not found!\n");
return 0;
}
entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found!\n");
return 0;
}
if (range == 0)
range = entry->max_range;
/* Check if the wheel supports range setting
* and that the range is within limits for the wheel */
if (entry->set_range != NULL && range >= entry->min_range && range <= entry->max_range) {
entry->set_range(hid, range);
entry->range = range;
}
return count;
}
#ifdef CONFIG_LEDS_CLASS
static void lg4ff_set_leds(struct hid_device *hid, __u8 leds)
{
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
__s32 *value = report->field[0]->value;
value[0] = 0xf8;
value[1] = 0x12;
value[2] = leds;
value[3] = 0x00;
value[4] = 0x00;
value[5] = 0x00;
value[6] = 0x00;
hid_hw_request(hid, report, HID_REQ_SET_REPORT);
}
static void lg4ff_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness value)
{
struct device *dev = led_cdev->dev->parent;
struct hid_device *hid = container_of(dev, struct hid_device, dev);
struct lg_drv_data *drv_data = hid_get_drvdata(hid);
struct lg4ff_device_entry *entry;
int i, state = 0;
if (!drv_data) {
hid_err(hid, "Device data not found.");
return;
}
entry = (struct lg4ff_device_entry *)drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found.");
return;
}
for (i = 0; i < 5; i++) {
if (led_cdev != entry->led[i])
continue;
state = (entry->led_state >> i) & 1;
if (value == LED_OFF && state) {
entry->led_state &= ~(1 << i);
lg4ff_set_leds(hid, entry->led_state);
} else if (value != LED_OFF && !state) {
entry->led_state |= 1 << i;
lg4ff_set_leds(hid, entry->led_state);
}
break;
}
}
static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cdev)
{
struct device *dev = led_cdev->dev->parent;
struct hid_device *hid = container_of(dev, struct hid_device, dev);
struct lg_drv_data *drv_data = hid_get_drvdata(hid);
struct lg4ff_device_entry *entry;
int i, value = 0;
if (!drv_data) {
hid_err(hid, "Device data not found.");
return LED_OFF;
}
entry = (struct lg4ff_device_entry *)drv_data->device_props;
if (!entry) {
hid_err(hid, "Device properties not found.");
return LED_OFF;
}
for (i = 0; i < 5; i++)
if (led_cdev == entry->led[i]) {
value = (entry->led_state >> i) & 1;
break;
}
return value ? LED_FULL : LED_OFF;
}
#endif
int lg4ff_init(struct hid_device *hid)
{
struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
struct input_dev *dev = hidinput->input;
struct hid_report *report;
struct hid_field *field;
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
struct usb_device_descriptor *udesc;
int error, i, j;
__u16 bcdDevice, rev_maj, rev_min;
/* Find the report to use */
if (list_empty(report_list)) {
hid_err(hid, "No output report found\n");
return -1;
}
/* Check that the report looks ok */
report = list_entry(report_list->next, struct hid_report, list);
if (!report) {
hid_err(hid, "NULL output report\n");
return -1;
}
field = report->field[0];
if (!field) {
hid_err(hid, "NULL field\n");
return -1;
}
/* Check what wheel has been connected */
for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
if (hid->product == lg4ff_devices[i].product_id) {
dbg_hid("Found compatible device, product ID %04X\n", lg4ff_devices[i].product_id);
break;
}
}
if (i == ARRAY_SIZE(lg4ff_devices)) {
hid_err(hid, "Device is not supported by lg4ff driver. If you think it should be, consider reporting a bug to"
"LKML, Simon Wood <simon@mungewell.org> or Michal Maly <madcatxster@gmail.com>\n");
return -1;
}
/* Attempt to switch wheel to native mode when applicable */
udesc = &(hid_to_usb_dev(hid)->descriptor);
if (!udesc) {
hid_err(hid, "NULL USB device descriptor\n");
return -1;
}
bcdDevice = le16_to_cpu(udesc->bcdDevice);
rev_maj = bcdDevice >> 8;
rev_min = bcdDevice & 0xff;
if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_WHEEL) {
dbg_hid("Generic wheel detected, can it do native?\n");
dbg_hid("USB revision: %2x.%02x\n", rev_maj, rev_min);
for (j = 0; j < ARRAY_SIZE(lg4ff_revs); j++) {
if (lg4ff_revs[j].rev_maj == rev_maj && lg4ff_revs[j].rev_min == rev_min) {
hid_lg4ff_switch_native(hid, lg4ff_revs[j].command);
hid_info(hid, "Switched to native mode\n");
}
}
}
/* Set supported force feedback capabilities */
for (j = 0; lg4ff_devices[i].ff_effects[j] >= 0; j++)
set_bit(lg4ff_devices[i].ff_effects[j], dev->ffbit);
error = input_ff_create_memless(dev, NULL, hid_lg4ff_play);
if (error)
return error;
/* Check if autocentering is available and
* set the centering force to zero by default */
if (test_bit(FF_AUTOCENTER, dev->ffbit)) {
if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */
dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex;
else
dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default;
dev->ff->set_autocenter(dev, 0);
}
/* Get private driver data */
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
hid_err(hid, "Cannot add device, private driver data not allocated\n");
return -1;
}
/* Initialize device properties */
entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
if (!entry) {
hid_err(hid, "Cannot add device, insufficient memory to allocate device properties.\n");
return -ENOMEM;
}
drv_data->device_props = entry;
entry->product_id = lg4ff_devices[i].product_id;
entry->min_range = lg4ff_devices[i].min_range;
entry->max_range = lg4ff_devices[i].max_range;
entry->set_range = lg4ff_devices[i].set_range;
/* Create sysfs interface */
error = device_create_file(&hid->dev, &dev_attr_range);
if (error)
return error;
dbg_hid("sysfs interface created\n");
/* Set the maximum range to start with */
entry->range = entry->max_range;
if (entry->set_range != NULL)
entry->set_range(hid, entry->range);
#ifdef CONFIG_LEDS_CLASS
/* register led subsystem - G27 only */
entry->led_state = 0;
for (j = 0; j < 5; j++)
entry->led[j] = NULL;
if (lg4ff_devices[i].product_id == USB_DEVICE_ID_LOGITECH_G27_WHEEL) {
struct led_classdev *led;
size_t name_sz;
char *name;
lg4ff_set_leds(hid, 0);
name_sz = strlen(dev_name(&hid->dev)) + 8;
for (j = 0; j < 5; j++) {
led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL);
if (!led) {
hid_err(hid, "can't allocate memory for LED %d\n", j);
goto err;
}
name = (void *)(&led[1]);
snprintf(name, name_sz, "%s::RPM%d", dev_name(&hid->dev), j+1);
led->name = name;
led->brightness = 0;
led->max_brightness = 1;
led->brightness_get = lg4ff_led_get_brightness;
led->brightness_set = lg4ff_led_set_brightness;
entry->led[j] = led;
error = led_classdev_register(&hid->dev, led);
if (error) {
hid_err(hid, "failed to register LED %d. Aborting.\n", j);
err:
/* Deregister LEDs (if any) */
for (j = 0; j < 5; j++) {
led = entry->led[j];
entry->led[j] = NULL;
if (!led)
continue;
led_classdev_unregister(led);
kfree(led);
}
goto out; /* Let the driver continue without LEDs */
}
}
}
out:
#endif
hid_info(hid, "Force feedback support for Logitech Gaming Wheels\n");
return 0;
}
int lg4ff_deinit(struct hid_device *hid)
{
struct lg4ff_device_entry *entry;
struct lg_drv_data *drv_data;
device_remove_file(&hid->dev, &dev_attr_range);
drv_data = hid_get_drvdata(hid);
if (!drv_data) {
hid_err(hid, "Error while deinitializing device, no private driver data.\n");
return -1;
}
entry = drv_data->device_props;
if (!entry) {
hid_err(hid, "Error while deinitializing device, no device properties data.\n");
return -1;
}
#ifdef CONFIG_LEDS_CLASS
{
int j;
struct led_classdev *led;
/* Deregister LEDs (if any) */
for (j = 0; j < 5; j++) {
led = entry->led[j];
entry->led[j] = NULL;
if (!led)
continue;
led_classdev_unregister(led);
kfree(led);
}
}
#endif
/* Deallocate memory */
kfree(entry);
dbg_hid("Device successfully unregistered\n");
return 0;
}
|
gpl-2.0
|
bergwolf/redpatch
|
drivers/media/video/adv7180.c
|
508
|
5299
|
/*
* adv7180.c Analog Devices ADV7180 video decoder driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/i2c-id.h>
#include <media/v4l2-ioctl.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#define DRIVER_NAME "adv7180"
#define ADV7180_INPUT_CONTROL_REG 0x00
#define ADV7180_INPUT_CONTROL_PAL_BG_NTSC_J_SECAM 0x00
#define ADV7180_AUTODETECT_ENABLE_REG 0x07
#define ADV7180_AUTODETECT_DEFAULT 0x7f
#define ADV7180_STATUS1_REG 0x10
#define ADV7180_STATUS1_AUTOD_MASK 0x70
#define ADV7180_STATUS1_AUTOD_NTSM_M_J 0x00
#define ADV7180_STATUS1_AUTOD_NTSC_4_43 0x10
#define ADV7180_STATUS1_AUTOD_PAL_M 0x20
#define ADV7180_STATUS1_AUTOD_PAL_60 0x30
#define ADV7180_STATUS1_AUTOD_PAL_B_G 0x40
#define ADV7180_STATUS1_AUTOD_SECAM 0x50
#define ADV7180_STATUS1_AUTOD_PAL_COMB 0x60
#define ADV7180_STATUS1_AUTOD_SECAM_525 0x70
#define ADV7180_IDENT_REG 0x11
#define ADV7180_ID_7180 0x18
struct adv7180_state {
struct v4l2_subdev sd;
};
static v4l2_std_id determine_norm(struct i2c_client *client)
{
u8 status1 = i2c_smbus_read_byte_data(client, ADV7180_STATUS1_REG);
switch (status1 & ADV7180_STATUS1_AUTOD_MASK) {
case ADV7180_STATUS1_AUTOD_NTSM_M_J:
return V4L2_STD_NTSC_M_JP;
case ADV7180_STATUS1_AUTOD_NTSC_4_43:
return V4L2_STD_NTSC_443;
case ADV7180_STATUS1_AUTOD_PAL_M:
return V4L2_STD_PAL_M;
case ADV7180_STATUS1_AUTOD_PAL_60:
return V4L2_STD_PAL_60;
case ADV7180_STATUS1_AUTOD_PAL_B_G:
return V4L2_STD_PAL;
case ADV7180_STATUS1_AUTOD_SECAM:
return V4L2_STD_SECAM;
case ADV7180_STATUS1_AUTOD_PAL_COMB:
return V4L2_STD_PAL_Nc | V4L2_STD_PAL_N;
case ADV7180_STATUS1_AUTOD_SECAM_525:
return V4L2_STD_SECAM;
default:
return V4L2_STD_UNKNOWN;
}
}
static inline struct adv7180_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct adv7180_state, sd);
}
static int adv7180_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
*std = determine_norm(client);
return 0;
}
static int adv7180_g_chip_ident(struct v4l2_subdev *sd,
struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_ADV7180, 0);
}
static const struct v4l2_subdev_video_ops adv7180_video_ops = {
.querystd = adv7180_querystd,
};
static const struct v4l2_subdev_core_ops adv7180_core_ops = {
.g_chip_ident = adv7180_g_chip_ident,
};
static const struct v4l2_subdev_ops adv7180_ops = {
.core = &adv7180_core_ops,
.video = &adv7180_video_ops,
};
/*
* Generic i2c probe
* concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
*/
static int adv7180_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct adv7180_state *state;
struct v4l2_subdev *sd;
int ret;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
state = kzalloc(sizeof(struct adv7180_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &adv7180_ops);
/* Initialize adv7180 */
/* enable autodetection */
ret = i2c_smbus_write_byte_data(client, ADV7180_INPUT_CONTROL_REG,
ADV7180_INPUT_CONTROL_PAL_BG_NTSC_J_SECAM);
if (ret > 0)
ret = i2c_smbus_write_byte_data(client,
ADV7180_AUTODETECT_ENABLE_REG,
ADV7180_AUTODETECT_DEFAULT);
if (ret < 0) {
printk(KERN_ERR DRIVER_NAME
": Failed to communicate to chip: %d\n", ret);
return ret;
}
return 0;
}
static int adv7180_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
v4l2_device_unregister_subdev(sd);
kfree(to_state(sd));
return 0;
}
static const struct i2c_device_id adv7180_id[] = {
{DRIVER_NAME, 0},
{},
};
MODULE_DEVICE_TABLE(i2c, adv7180_id);
static struct i2c_driver adv7180_driver = {
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
},
.probe = adv7180_probe,
.remove = adv7180_remove,
.id_table = adv7180_id,
};
static __init int adv7180_init(void)
{
return i2c_add_driver(&adv7180_driver);
}
static __exit void adv7180_exit(void)
{
i2c_del_driver(&adv7180_driver);
}
module_init(adv7180_init);
module_exit(adv7180_exit);
MODULE_DESCRIPTION("Analog Devices ADV7180 video decoder driver");
MODULE_AUTHOR("Mocean Laboratories");
MODULE_LICENSE("GPL v2");
|
gpl-2.0
|
s0be/kernel_htc_msm7227
|
block/blk-lib.c
|
764
|
5475
|
/*
* Functions related to generic helpers functions
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
#include "blk.h"
static void blkdev_discard_end_io(struct bio *bio, int err)
{
if (err) {
if (err == -EOPNOTSUPP)
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
}
if (bio->bi_private)
complete(bio->bi_private);
__free_page(bio_page(bio));
bio_put(bio);
}
/**
* blkdev_issue_discard - queue a discard
* @bdev: blockdev to issue discard for
* @sector: start sector
* @nr_sects: number of sectors to discard
* @gfp_mask: memory allocation flags (for bio_alloc)
* @flags: BLKDEV_IFL_* flags to control behaviour
*
* Description:
* Issue a discard request for the sectors in question.
*/
int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q = bdev_get_queue(bdev);
int type = flags & BLKDEV_IFL_BARRIER ?
DISCARD_BARRIER : DISCARD_NOBARRIER;
struct bio *bio;
struct page *page;
int ret = 0;
if (!q)
return -ENXIO;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
while (nr_sects && !ret) {
unsigned int sector_size = q->limits.logical_block_size;
unsigned int max_discard_sectors =
min(q->limits.max_discard_sectors, UINT_MAX >> 9);
bio = bio_alloc(gfp_mask, 1);
if (!bio)
goto out;
bio->bi_sector = sector;
bio->bi_end_io = blkdev_discard_end_io;
bio->bi_bdev = bdev;
if (flags & BLKDEV_IFL_WAIT)
bio->bi_private = &wait;
/*
* Add a zeroed one-sector payload as that's what
* our current implementations need. If we'll ever need
* more the interface will need revisiting.
*/
page = alloc_page(gfp_mask | __GFP_ZERO);
if (!page)
goto out_free_bio;
if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
goto out_free_page;
/*
* And override the bio size - the way discard works we
* touch many more blocks on disk than the actual payload
* length.
*/
if (nr_sects > max_discard_sectors) {
bio->bi_size = max_discard_sectors << 9;
nr_sects -= max_discard_sectors;
sector += max_discard_sectors;
} else {
bio->bi_size = nr_sects << 9;
nr_sects = 0;
}
bio_get(bio);
submit_bio(type, bio);
if (flags & BLKDEV_IFL_WAIT)
wait_for_completion(&wait);
if (bio_flagged(bio, BIO_EOPNOTSUPP))
ret = -EOPNOTSUPP;
else if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
}
return ret;
out_free_page:
__free_page(page);
out_free_bio:
bio_put(bio);
out:
return -ENOMEM;
}
EXPORT_SYMBOL(blkdev_issue_discard);
struct bio_batch
{
atomic_t done;
unsigned long flags;
struct completion *wait;
bio_end_io_t *end_io;
};
static void bio_batch_end_io(struct bio *bio, int err)
{
struct bio_batch *bb = bio->bi_private;
if (err) {
if (err == -EOPNOTSUPP)
set_bit(BIO_EOPNOTSUPP, &bb->flags);
else
clear_bit(BIO_UPTODATE, &bb->flags);
}
if (bb) {
if (bb->end_io)
bb->end_io(bio, err);
atomic_inc(&bb->done);
complete(bb->wait);
}
bio_put(bio);
}
/**
* blkdev_issue_zeroout generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
* @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc)
* @flags: BLKDEV_IFL_* flags to control behaviour
*
* Description:
* Generate and issue number of bios with zerofiled pages.
* Send barrier at the beginning and at the end if requested. This guarantie
* correct request ordering. Empty barrier allow us to avoid post queue flush.
*/
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
int ret = 0;
struct bio *bio;
struct bio_batch bb;
unsigned int sz, issued = 0;
DECLARE_COMPLETION_ONSTACK(wait);
atomic_set(&bb.done, 0);
bb.flags = 1 << BIO_UPTODATE;
bb.wait = &wait;
bb.end_io = NULL;
if (flags & BLKDEV_IFL_BARRIER) {
/* issue async barrier before the data */
ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0);
if (ret)
return ret;
}
submit:
while (nr_sects != 0) {
bio = bio_alloc(gfp_mask,
min(nr_sects, (sector_t)BIO_MAX_PAGES));
if (!bio)
break;
bio->bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = bio_batch_end_io;
if (flags & BLKDEV_IFL_WAIT)
bio->bi_private = &bb;
while (nr_sects != 0) {
sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects);
if (sz == 0)
/* bio has maximum size possible */
break;
ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0);
nr_sects -= ret >> 9;
sector += ret >> 9;
if (ret < (sz << 9))
break;
}
issued++;
submit_bio(WRITE, bio);
}
/*
* When all data bios are in flight. Send final barrier if requeted.
*/
if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER)
ret = blkdev_issue_flush(bdev, gfp_mask, NULL,
flags & BLKDEV_IFL_WAIT);
if (flags & BLKDEV_IFL_WAIT)
/* Wait for bios in-flight */
while ( issued != atomic_read(&bb.done))
wait_for_completion(&wait);
if (!test_bit(BIO_UPTODATE, &bb.flags))
/* One of bios in the batch was completed with error.*/
ret = -EIO;
if (ret)
goto out;
if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) {
ret = -EOPNOTSUPP;
goto out;
}
if (nr_sects != 0)
goto submit;
out:
return ret;
}
EXPORT_SYMBOL(blkdev_issue_zeroout);
|
gpl-2.0
|
sktjdgns1189/android_kernel_samsung_SHW-M290S
|
drivers/media/video/ov511.c
|
764
|
146000
|
/*
* OmniVision OV511 Camera-to-USB Bridge Driver
*
* Copyright (c) 1999-2003 Mark W. McClelland
* Original decompression code Copyright 1998-2000 OmniVision Technologies
* Many improvements by Bret Wallach <bwallac1@san.rr.com>
* Color fixes by by Orion Sky Lawlor <olawlor@acm.org> (2/26/2000)
* Snapshot code by Kevin Moore
* OV7620 fixes by Charl P. Botha <cpbotha@ieee.org>
* Changes by Claudio Matsuoka <claudio@conectiva.com>
* Original SAA7111A code by Dave Perks <dperks@ibm.net>
* URB error messages from pwc driver by Nemosoft
* generic_ioctl() code from videodev.c by Gerd Knorr and Alan Cox
* Memory management (rvmalloc) code from bttv driver, by Gerd Knorr and others
*
* Based on the Linux CPiA driver written by Peter Pregler,
* Scott J. Bertin and Johannes Erdfelt.
*
* Please see the file: Documentation/usb/ov511.txt
* and the website at: http://alpha.dyndns.org/ov511
* for more info.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <linux/pagemap.h>
#include <asm/processor.h>
#include <linux/mm.h>
#include <linux/device.h>
#if defined (__i386__)
#include <asm/cpufeature.h>
#endif
#include "ov511.h"
/*
* Version Information
*/
#define DRIVER_VERSION "v1.64 for Linux 2.5"
#define EMAIL "mark@alpha.dyndns.org"
#define DRIVER_AUTHOR "Mark McClelland <mark@alpha.dyndns.org> & Bret Wallach \
& Orion Sky Lawlor <olawlor@acm.org> & Kevin Moore & Charl P. Botha \
<cpbotha@ieee.org> & Claudio Matsuoka <claudio@conectiva.com>"
#define DRIVER_DESC "ov511 USB Camera Driver"
#define OV511_I2C_RETRIES 3
#define ENABLE_Y_QUANTABLE 1
#define ENABLE_UV_QUANTABLE 1
#define OV511_MAX_UNIT_VIDEO 16
/* Pixel count * bytes per YUV420 pixel (1.5) */
#define MAX_FRAME_SIZE(w, h) ((w) * (h) * 3 / 2)
#define MAX_DATA_SIZE(w, h) (MAX_FRAME_SIZE(w, h) + sizeof(struct timeval))
/* Max size * bytes per YUV420 pixel (1.5) + one extra isoc frame for safety */
#define MAX_RAW_DATA_SIZE(w, h) ((w) * (h) * 3 / 2 + 1024)
#define FATAL_ERROR(rc) ((rc) < 0 && (rc) != -EPERM)
/**********************************************************************
* Module Parameters
* (See ov511.txt for detailed descriptions of these)
**********************************************************************/
/* These variables (and all static globals) default to zero */
static int autobright = 1;
static int autogain = 1;
static int autoexp = 1;
static int debug;
static int snapshot;
static int cams = 1;
static int compress;
static int testpat;
static int dumppix;
static int led = 1;
static int dump_bridge;
static int dump_sensor;
static int printph;
static int phy = 0x1f;
static int phuv = 0x05;
static int pvy = 0x06;
static int pvuv = 0x06;
static int qhy = 0x14;
static int qhuv = 0x03;
static int qvy = 0x04;
static int qvuv = 0x04;
static int lightfreq;
static int bandingfilter;
static int clockdiv = -1;
static int packetsize = -1;
static int framedrop = -1;
static int fastset;
static int force_palette;
static int backlight;
/* Bitmask marking allocated devices from 0 to OV511_MAX_UNIT_VIDEO */
static unsigned long ov511_devused;
static int unit_video[OV511_MAX_UNIT_VIDEO];
static int remove_zeros;
static int mirror;
static int ov518_color;
module_param(autobright, int, 0);
MODULE_PARM_DESC(autobright, "Sensor automatically changes brightness");
module_param(autogain, int, 0);
MODULE_PARM_DESC(autogain, "Sensor automatically changes gain");
module_param(autoexp, int, 0);
MODULE_PARM_DESC(autoexp, "Sensor automatically changes exposure");
module_param(debug, int, 0);
MODULE_PARM_DESC(debug,
"Debug level: 0=none, 1=inits, 2=warning, 3=config, 4=functions, 5=max");
module_param(snapshot, int, 0);
MODULE_PARM_DESC(snapshot, "Enable snapshot mode");
module_param(cams, int, 0);
MODULE_PARM_DESC(cams, "Number of simultaneous cameras");
module_param(compress, int, 0);
MODULE_PARM_DESC(compress, "Turn on compression");
module_param(testpat, int, 0);
MODULE_PARM_DESC(testpat,
"Replace image with vertical bar testpattern (only partially working)");
module_param(dumppix, int, 0);
MODULE_PARM_DESC(dumppix, "Dump raw pixel data");
module_param(led, int, 0);
MODULE_PARM_DESC(led,
"LED policy (OV511+ or later). 0=off, 1=on (default), 2=auto (on when open)");
module_param(dump_bridge, int, 0);
MODULE_PARM_DESC(dump_bridge, "Dump the bridge registers");
module_param(dump_sensor, int, 0);
MODULE_PARM_DESC(dump_sensor, "Dump the sensor registers");
module_param(printph, int, 0);
MODULE_PARM_DESC(printph, "Print frame start/end headers");
module_param(phy, int, 0);
MODULE_PARM_DESC(phy, "Prediction range (horiz. Y)");
module_param(phuv, int, 0);
MODULE_PARM_DESC(phuv, "Prediction range (horiz. UV)");
module_param(pvy, int, 0);
MODULE_PARM_DESC(pvy, "Prediction range (vert. Y)");
module_param(pvuv, int, 0);
MODULE_PARM_DESC(pvuv, "Prediction range (vert. UV)");
module_param(qhy, int, 0);
MODULE_PARM_DESC(qhy, "Quantization threshold (horiz. Y)");
module_param(qhuv, int, 0);
MODULE_PARM_DESC(qhuv, "Quantization threshold (horiz. UV)");
module_param(qvy, int, 0);
MODULE_PARM_DESC(qvy, "Quantization threshold (vert. Y)");
module_param(qvuv, int, 0);
MODULE_PARM_DESC(qvuv, "Quantization threshold (vert. UV)");
module_param(lightfreq, int, 0);
MODULE_PARM_DESC(lightfreq,
"Light frequency. Set to 50 or 60 Hz, or zero for default settings");
module_param(bandingfilter, int, 0);
MODULE_PARM_DESC(bandingfilter,
"Enable banding filter (to reduce effects of fluorescent lighting)");
module_param(clockdiv, int, 0);
MODULE_PARM_DESC(clockdiv, "Force pixel clock divisor to a specific value");
module_param(packetsize, int, 0);
MODULE_PARM_DESC(packetsize, "Force a specific isoc packet size");
module_param(framedrop, int, 0);
MODULE_PARM_DESC(framedrop, "Force a specific frame drop register setting");
module_param(fastset, int, 0);
MODULE_PARM_DESC(fastset, "Allows picture settings to take effect immediately");
module_param(force_palette, int, 0);
MODULE_PARM_DESC(force_palette, "Force the palette to a specific value");
module_param(backlight, int, 0);
MODULE_PARM_DESC(backlight, "For objects that are lit from behind");
static unsigned int num_uv;
module_param_array(unit_video, int, &num_uv, 0);
MODULE_PARM_DESC(unit_video,
"Force use of specific minor number(s). 0 is not allowed.");
module_param(remove_zeros, int, 0);
MODULE_PARM_DESC(remove_zeros,
"Remove zero-padding from uncompressed incoming data");
module_param(mirror, int, 0);
MODULE_PARM_DESC(mirror, "Reverse image horizontally");
module_param(ov518_color, int, 0);
MODULE_PARM_DESC(ov518_color, "Enable OV518 color (experimental)");
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/**********************************************************************
* Miscellaneous Globals
**********************************************************************/
static struct usb_driver ov511_driver;
/* Number of times to retry a failed I2C transaction. Increase this if you
* are getting "Failed to read sensor ID..." */
static const int i2c_detect_tries = 5;
static struct usb_device_id device_table [] = {
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511) },
{ USB_DEVICE(VEND_OMNIVISION, PROD_OV511PLUS) },
{ USB_DEVICE(VEND_MATTEL, PROD_ME2CAM) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, device_table);
static unsigned char yQuanTable511[] = OV511_YQUANTABLE;
static unsigned char uvQuanTable511[] = OV511_UVQUANTABLE;
static unsigned char yQuanTable518[] = OV518_YQUANTABLE;
static unsigned char uvQuanTable518[] = OV518_UVQUANTABLE;
/**********************************************************************
* Symbolic Names
**********************************************************************/
/* Known OV511-based cameras */
static struct symbolic_list camlist[] = {
{ 0, "Generic Camera (no ID)" },
{ 1, "Mustek WCam 3X" },
{ 3, "D-Link DSB-C300" },
{ 4, "Generic OV511/OV7610" },
{ 5, "Puretek PT-6007" },
{ 6, "Lifeview USB Life TV (NTSC)" },
{ 21, "Creative Labs WebCam 3" },
{ 22, "Lifeview USB Life TV (PAL D/K+B/G)" },
{ 36, "Koala-Cam" },
{ 38, "Lifeview USB Life TV (PAL)" },
{ 41, "Samsung Anycam MPC-M10" },
{ 43, "Mtekvision Zeca MV402" },
{ 46, "Suma eON" },
{ 70, "Lifeview USB Life TV (PAL/SECAM)" },
{ 100, "Lifeview RoboCam" },
{ 102, "AverMedia InterCam Elite" },
{ 112, "MediaForte MV300" }, /* or OV7110 evaluation kit */
{ 134, "Ezonics EZCam II" },
{ 192, "Webeye 2000B" },
{ 253, "Alpha Vision Tech. AlphaCam SE" },
{ -1, NULL }
};
/* Video4Linux1 Palettes */
static struct symbolic_list v4l1_plist[] = {
{ VIDEO_PALETTE_GREY, "GREY" },
{ VIDEO_PALETTE_HI240, "HI240" },
{ VIDEO_PALETTE_RGB565, "RGB565" },
{ VIDEO_PALETTE_RGB24, "RGB24" },
{ VIDEO_PALETTE_RGB32, "RGB32" },
{ VIDEO_PALETTE_RGB555, "RGB555" },
{ VIDEO_PALETTE_YUV422, "YUV422" },
{ VIDEO_PALETTE_YUYV, "YUYV" },
{ VIDEO_PALETTE_UYVY, "UYVY" },
{ VIDEO_PALETTE_YUV420, "YUV420" },
{ VIDEO_PALETTE_YUV411, "YUV411" },
{ VIDEO_PALETTE_RAW, "RAW" },
{ VIDEO_PALETTE_YUV422P,"YUV422P" },
{ VIDEO_PALETTE_YUV411P,"YUV411P" },
{ VIDEO_PALETTE_YUV420P,"YUV420P" },
{ VIDEO_PALETTE_YUV410P,"YUV410P" },
{ -1, NULL }
};
static struct symbolic_list brglist[] = {
{ BRG_OV511, "OV511" },
{ BRG_OV511PLUS, "OV511+" },
{ BRG_OV518, "OV518" },
{ BRG_OV518PLUS, "OV518+" },
{ -1, NULL }
};
static struct symbolic_list senlist[] = {
{ SEN_OV76BE, "OV76BE" },
{ SEN_OV7610, "OV7610" },
{ SEN_OV7620, "OV7620" },
{ SEN_OV7620AE, "OV7620AE" },
{ SEN_OV6620, "OV6620" },
{ SEN_OV6630, "OV6630" },
{ SEN_OV6630AE, "OV6630AE" },
{ SEN_OV6630AF, "OV6630AF" },
{ SEN_OV8600, "OV8600" },
{ SEN_KS0127, "KS0127" },
{ SEN_KS0127B, "KS0127B" },
{ SEN_SAA7111A, "SAA7111A" },
{ -1, NULL }
};
/* URB error codes: */
static struct symbolic_list urb_errlist[] = {
{ -ENOSR, "Buffer error (overrun)" },
{ -EPIPE, "Stalled (device not responding)" },
{ -EOVERFLOW, "Babble (device sends too much data)" },
{ -EPROTO, "Bit-stuff error (bad cable?)" },
{ -EILSEQ, "CRC/Timeout (bad cable?)" },
{ -ETIME, "Device does not respond to token" },
{ -ETIMEDOUT, "Device does not respond to command" },
{ -1, NULL }
};
/**********************************************************************
* Memory management
**********************************************************************/
static void *
rvmalloc(unsigned long size)
{
void *mem;
unsigned long adr;
size = PAGE_ALIGN(size);
mem = vmalloc_32(size);
if (!mem)
return NULL;
memset(mem, 0, size); /* Clear the ram out, no junk to the user */
adr = (unsigned long) mem;
while (size > 0) {
SetPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
return mem;
}
static void
rvfree(void *mem, unsigned long size)
{
unsigned long adr;
if (!mem)
return;
adr = (unsigned long) mem;
while ((long) size > 0) {
ClearPageReserved(vmalloc_to_page((void *)adr));
adr += PAGE_SIZE;
size -= PAGE_SIZE;
}
vfree(mem);
}
/**********************************************************************
*
* Register I/O
*
**********************************************************************/
/* Write an OV51x register */
static int
reg_w(struct usb_ov511 *ov, unsigned char reg, unsigned char value)
{
int rc;
PDEBUG(5, "0x%02X:0x%02X", reg, value);
mutex_lock(&ov->cbuf_lock);
ov->cbuf[0] = value;
rc = usb_control_msg(ov->dev,
usb_sndctrlpipe(ov->dev, 0),
(ov->bclass == BCL_OV518)?1:2 /* REG_IO */,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, (__u16)reg, &ov->cbuf[0], 1, 1000);
mutex_unlock(&ov->cbuf_lock);
if (rc < 0)
err("reg write: error %d: %s", rc, symbolic(urb_errlist, rc));
return rc;
}
/* Read from an OV51x register */
/* returns: negative is error, pos or zero is data */
static int
reg_r(struct usb_ov511 *ov, unsigned char reg)
{
int rc;
mutex_lock(&ov->cbuf_lock);
rc = usb_control_msg(ov->dev,
usb_rcvctrlpipe(ov->dev, 0),
(ov->bclass == BCL_OV518)?1:3 /* REG_IO */,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, (__u16)reg, &ov->cbuf[0], 1, 1000);
if (rc < 0) {
err("reg read: error %d: %s", rc, symbolic(urb_errlist, rc));
} else {
rc = ov->cbuf[0];
PDEBUG(5, "0x%02X:0x%02X", reg, ov->cbuf[0]);
}
mutex_unlock(&ov->cbuf_lock);
return rc;
}
/*
* Writes bits at positions specified by mask to an OV51x reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
* of their respective state in "value".
*/
static int
reg_w_mask(struct usb_ov511 *ov,
unsigned char reg,
unsigned char value,
unsigned char mask)
{
int ret;
unsigned char oldval, newval;
ret = reg_r(ov, reg);
if (ret < 0)
return ret;
oldval = (unsigned char) ret;
oldval &= (~mask); /* Clear the masked bits */
value &= mask; /* Enforce mask on value */
newval = oldval | value; /* Set the desired bits */
return (reg_w(ov, reg, newval));
}
/*
* Writes multiple (n) byte value to a single register. Only valid with certain
* registers (0x30 and 0xc4 - 0xce).
*/
static int
ov518_reg_w32(struct usb_ov511 *ov, unsigned char reg, u32 val, int n)
{
int rc;
PDEBUG(5, "0x%02X:%7d, n=%d", reg, val, n);
mutex_lock(&ov->cbuf_lock);
*((__le32 *)ov->cbuf) = __cpu_to_le32(val);
rc = usb_control_msg(ov->dev,
usb_sndctrlpipe(ov->dev, 0),
1 /* REG_IO */,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, (__u16)reg, ov->cbuf, n, 1000);
mutex_unlock(&ov->cbuf_lock);
if (rc < 0)
err("reg write multiple: error %d: %s", rc,
symbolic(urb_errlist, rc));
return rc;
}
static int
ov511_upload_quan_tables(struct usb_ov511 *ov)
{
unsigned char *pYTable = yQuanTable511;
unsigned char *pUVTable = uvQuanTable511;
unsigned char val0, val1;
int i, rc, reg = R511_COMP_LUT_BEGIN;
PDEBUG(4, "Uploading quantization tables");
for (i = 0; i < OV511_QUANTABLESIZE / 2; i++) {
if (ENABLE_Y_QUANTABLE) {
val0 = *pYTable++;
val1 = *pYTable++;
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
rc = reg_w(ov, reg, val0);
if (rc < 0)
return rc;
}
if (ENABLE_UV_QUANTABLE) {
val0 = *pUVTable++;
val1 = *pUVTable++;
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
rc = reg_w(ov, reg + OV511_QUANTABLESIZE/2, val0);
if (rc < 0)
return rc;
}
reg++;
}
return 0;
}
/* OV518 quantization tables are 8x4 (instead of 8x8) */
static int
ov518_upload_quan_tables(struct usb_ov511 *ov)
{
unsigned char *pYTable = yQuanTable518;
unsigned char *pUVTable = uvQuanTable518;
unsigned char val0, val1;
int i, rc, reg = R511_COMP_LUT_BEGIN;
PDEBUG(4, "Uploading quantization tables");
for (i = 0; i < OV518_QUANTABLESIZE / 2; i++) {
if (ENABLE_Y_QUANTABLE) {
val0 = *pYTable++;
val1 = *pYTable++;
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
rc = reg_w(ov, reg, val0);
if (rc < 0)
return rc;
}
if (ENABLE_UV_QUANTABLE) {
val0 = *pUVTable++;
val1 = *pUVTable++;
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
rc = reg_w(ov, reg + OV518_QUANTABLESIZE/2, val0);
if (rc < 0)
return rc;
}
reg++;
}
return 0;
}
static int
ov51x_reset(struct usb_ov511 *ov, unsigned char reset_type)
{
int rc;
/* Setting bit 0 not allowed on 518/518Plus */
if (ov->bclass == BCL_OV518)
reset_type &= 0xfe;
PDEBUG(4, "Reset: type=0x%02X", reset_type);
rc = reg_w(ov, R51x_SYS_RESET, reset_type);
rc = reg_w(ov, R51x_SYS_RESET, 0);
if (rc < 0)
err("reset: command failed");
return rc;
}
/**********************************************************************
*
* Low-level I2C I/O functions
*
**********************************************************************/
/* NOTE: Do not call this function directly!
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_w(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
static int
ov518_i2c_write_internal(struct usb_ov511 *ov,
unsigned char reg,
unsigned char value)
{
int rc;
PDEBUG(5, "0x%02X:0x%02X", reg, value);
/* Select camera register */
rc = reg_w(ov, R51x_I2C_SADDR_3, reg);
if (rc < 0)
return rc;
/* Write "value" to I2C data port of OV511 */
rc = reg_w(ov, R51x_I2C_DATA, value);
if (rc < 0)
return rc;
/* Initiate 3-byte write cycle */
rc = reg_w(ov, R518_I2C_CTL, 0x01);
if (rc < 0)
return rc;
return 0;
}
/* NOTE: Do not call this function directly! */
static int
ov511_i2c_write_internal(struct usb_ov511 *ov,
unsigned char reg,
unsigned char value)
{
int rc, retries;
PDEBUG(5, "0x%02X:0x%02X", reg, value);
/* Three byte write cycle */
for (retries = OV511_I2C_RETRIES; ; ) {
/* Select camera register */
rc = reg_w(ov, R51x_I2C_SADDR_3, reg);
if (rc < 0)
break;
/* Write "value" to I2C data port of OV511 */
rc = reg_w(ov, R51x_I2C_DATA, value);
if (rc < 0)
break;
/* Initiate 3-byte write cycle */
rc = reg_w(ov, R511_I2C_CTL, 0x01);
if (rc < 0)
break;
/* Retry until idle */
do {
rc = reg_r(ov, R511_I2C_CTL);
} while (rc > 0 && ((rc&1) == 0));
if (rc < 0)
break;
/* Ack? */
if ((rc&2) == 0) {
rc = 0;
break;
}
#if 0
/* I2C abort */
reg_w(ov, R511_I2C_CTL, 0x10);
#endif
if (--retries < 0) {
err("i2c write retries exhausted");
rc = -1;
break;
}
}
return rc;
}
/* NOTE: Do not call this function directly!
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_r(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
static int
ov518_i2c_read_internal(struct usb_ov511 *ov, unsigned char reg)
{
int rc, value;
/* Select camera register */
rc = reg_w(ov, R51x_I2C_SADDR_2, reg);
if (rc < 0)
return rc;
/* Initiate 2-byte write cycle */
rc = reg_w(ov, R518_I2C_CTL, 0x03);
if (rc < 0)
return rc;
/* Initiate 2-byte read cycle */
rc = reg_w(ov, R518_I2C_CTL, 0x05);
if (rc < 0)
return rc;
value = reg_r(ov, R51x_I2C_DATA);
PDEBUG(5, "0x%02X:0x%02X", reg, value);
return value;
}
/* NOTE: Do not call this function directly!
* returns: negative is error, pos or zero is data */
static int
ov511_i2c_read_internal(struct usb_ov511 *ov, unsigned char reg)
{
int rc, value, retries;
/* Two byte write cycle */
for (retries = OV511_I2C_RETRIES; ; ) {
/* Select camera register */
rc = reg_w(ov, R51x_I2C_SADDR_2, reg);
if (rc < 0)
return rc;
/* Initiate 2-byte write cycle */
rc = reg_w(ov, R511_I2C_CTL, 0x03);
if (rc < 0)
return rc;
/* Retry until idle */
do {
rc = reg_r(ov, R511_I2C_CTL);
} while (rc > 0 && ((rc & 1) == 0));
if (rc < 0)
return rc;
if ((rc&2) == 0) /* Ack? */
break;
/* I2C abort */
reg_w(ov, R511_I2C_CTL, 0x10);
if (--retries < 0) {
err("i2c write retries exhausted");
return -1;
}
}
/* Two byte read cycle */
for (retries = OV511_I2C_RETRIES; ; ) {
/* Initiate 2-byte read cycle */
rc = reg_w(ov, R511_I2C_CTL, 0x05);
if (rc < 0)
return rc;
/* Retry until idle */
do {
rc = reg_r(ov, R511_I2C_CTL);
} while (rc > 0 && ((rc&1) == 0));
if (rc < 0)
return rc;
if ((rc&2) == 0) /* Ack? */
break;
/* I2C abort */
rc = reg_w(ov, R511_I2C_CTL, 0x10);
if (rc < 0)
return rc;
if (--retries < 0) {
err("i2c read retries exhausted");
return -1;
}
}
value = reg_r(ov, R51x_I2C_DATA);
PDEBUG(5, "0x%02X:0x%02X", reg, value);
/* This is needed to make i2c_w() work */
rc = reg_w(ov, R511_I2C_CTL, 0x05);
if (rc < 0)
return rc;
return value;
}
/* returns: negative is error, pos or zero is data */
static int
i2c_r(struct usb_ov511 *ov, unsigned char reg)
{
int rc;
mutex_lock(&ov->i2c_lock);
if (ov->bclass == BCL_OV518)
rc = ov518_i2c_read_internal(ov, reg);
else
rc = ov511_i2c_read_internal(ov, reg);
mutex_unlock(&ov->i2c_lock);
return rc;
}
static int
i2c_w(struct usb_ov511 *ov, unsigned char reg, unsigned char value)
{
int rc;
mutex_lock(&ov->i2c_lock);
if (ov->bclass == BCL_OV518)
rc = ov518_i2c_write_internal(ov, reg, value);
else
rc = ov511_i2c_write_internal(ov, reg, value);
mutex_unlock(&ov->i2c_lock);
return rc;
}
/* Do not call this function directly! */
static int
ov51x_i2c_write_mask_internal(struct usb_ov511 *ov,
unsigned char reg,
unsigned char value,
unsigned char mask)
{
int rc;
unsigned char oldval, newval;
if (mask == 0xff) {
newval = value;
} else {
if (ov->bclass == BCL_OV518)
rc = ov518_i2c_read_internal(ov, reg);
else
rc = ov511_i2c_read_internal(ov, reg);
if (rc < 0)
return rc;
oldval = (unsigned char) rc;
oldval &= (~mask); /* Clear the masked bits */
value &= mask; /* Enforce mask on value */
newval = oldval | value; /* Set the desired bits */
}
if (ov->bclass == BCL_OV518)
return (ov518_i2c_write_internal(ov, reg, newval));
else
return (ov511_i2c_write_internal(ov, reg, newval));
}
/* Writes bits at positions specified by mask to an I2C reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
* of their respective state in "value".
*/
static int
i2c_w_mask(struct usb_ov511 *ov,
unsigned char reg,
unsigned char value,
unsigned char mask)
{
int rc;
mutex_lock(&ov->i2c_lock);
rc = ov51x_i2c_write_mask_internal(ov, reg, value, mask);
mutex_unlock(&ov->i2c_lock);
return rc;
}
/* Set the read and write slave IDs. The "slave" argument is the write slave,
* and the read slave will be set to (slave + 1). ov->i2c_lock should be held
* when calling this. This should not be called from outside the i2c I/O
* functions.
*/
static int
i2c_set_slave_internal(struct usb_ov511 *ov, unsigned char slave)
{
int rc;
rc = reg_w(ov, R51x_I2C_W_SID, slave);
if (rc < 0)
return rc;
rc = reg_w(ov, R51x_I2C_R_SID, slave + 1);
if (rc < 0)
return rc;
return 0;
}
/* Write to a specific I2C slave ID and register, using the specified mask */
static int
i2c_w_slave(struct usb_ov511 *ov,
unsigned char slave,
unsigned char reg,
unsigned char value,
unsigned char mask)
{
int rc = 0;
mutex_lock(&ov->i2c_lock);
/* Set new slave IDs */
rc = i2c_set_slave_internal(ov, slave);
if (rc < 0)
goto out;
rc = ov51x_i2c_write_mask_internal(ov, reg, value, mask);
out:
/* Restore primary IDs */
if (i2c_set_slave_internal(ov, ov->primary_i2c_slave) < 0)
err("Couldn't restore primary I2C slave");
mutex_unlock(&ov->i2c_lock);
return rc;
}
/* Read from a specific I2C slave ID and register */
static int
i2c_r_slave(struct usb_ov511 *ov,
unsigned char slave,
unsigned char reg)
{
int rc;
mutex_lock(&ov->i2c_lock);
/* Set new slave IDs */
rc = i2c_set_slave_internal(ov, slave);
if (rc < 0)
goto out;
if (ov->bclass == BCL_OV518)
rc = ov518_i2c_read_internal(ov, reg);
else
rc = ov511_i2c_read_internal(ov, reg);
out:
/* Restore primary IDs */
if (i2c_set_slave_internal(ov, ov->primary_i2c_slave) < 0)
err("Couldn't restore primary I2C slave");
mutex_unlock(&ov->i2c_lock);
return rc;
}
/* Sets I2C read and write slave IDs. Returns <0 for error */
static int
ov51x_set_slave_ids(struct usb_ov511 *ov, unsigned char sid)
{
int rc;
mutex_lock(&ov->i2c_lock);
rc = i2c_set_slave_internal(ov, sid);
if (rc < 0)
goto out;
// FIXME: Is this actually necessary?
rc = ov51x_reset(ov, OV511_RESET_NOREGS);
out:
mutex_unlock(&ov->i2c_lock);
return rc;
}
static int
write_regvals(struct usb_ov511 *ov, struct ov511_regvals * pRegvals)
{
int rc;
while (pRegvals->bus != OV511_DONE_BUS) {
if (pRegvals->bus == OV511_REG_BUS) {
if ((rc = reg_w(ov, pRegvals->reg, pRegvals->val)) < 0)
return rc;
} else if (pRegvals->bus == OV511_I2C_BUS) {
if ((rc = i2c_w(ov, pRegvals->reg, pRegvals->val)) < 0)
return rc;
} else {
err("Bad regval array");
return -1;
}
pRegvals++;
}
return 0;
}
#ifdef OV511_DEBUG
static void
dump_i2c_range(struct usb_ov511 *ov, int reg1, int regn)
{
int i, rc;
for (i = reg1; i <= regn; i++) {
rc = i2c_r(ov, i);
dev_info(&ov->dev->dev, "Sensor[0x%02X] = 0x%02X\n", i, rc);
}
}
static void
dump_i2c_regs(struct usb_ov511 *ov)
{
dev_info(&ov->dev->dev, "I2C REGS\n");
dump_i2c_range(ov, 0x00, 0x7C);
}
static void
dump_reg_range(struct usb_ov511 *ov, int reg1, int regn)
{
int i, rc;
for (i = reg1; i <= regn; i++) {
rc = reg_r(ov, i);
dev_info(&ov->dev->dev, "OV511[0x%02X] = 0x%02X\n", i, rc);
}
}
static void
ov511_dump_regs(struct usb_ov511 *ov)
{
dev_info(&ov->dev->dev, "CAMERA INTERFACE REGS\n");
dump_reg_range(ov, 0x10, 0x1f);
dev_info(&ov->dev->dev, "DRAM INTERFACE REGS\n");
dump_reg_range(ov, 0x20, 0x23);
dev_info(&ov->dev->dev, "ISO FIFO REGS\n");
dump_reg_range(ov, 0x30, 0x31);
dev_info(&ov->dev->dev, "PIO REGS\n");
dump_reg_range(ov, 0x38, 0x39);
dump_reg_range(ov, 0x3e, 0x3e);
dev_info(&ov->dev->dev, "I2C REGS\n");
dump_reg_range(ov, 0x40, 0x49);
dev_info(&ov->dev->dev, "SYSTEM CONTROL REGS\n");
dump_reg_range(ov, 0x50, 0x55);
dump_reg_range(ov, 0x5e, 0x5f);
dev_info(&ov->dev->dev, "OmniCE REGS\n");
dump_reg_range(ov, 0x70, 0x79);
/* NOTE: Quantization tables are not readable. You will get the value
* in reg. 0x79 for every table register */
dump_reg_range(ov, 0x80, 0x9f);
dump_reg_range(ov, 0xa0, 0xbf);
}
static void
ov518_dump_regs(struct usb_ov511 *ov)
{
dev_info(&ov->dev->dev, "VIDEO MODE REGS\n");
dump_reg_range(ov, 0x20, 0x2f);
dev_info(&ov->dev->dev, "DATA PUMP AND SNAPSHOT REGS\n");
dump_reg_range(ov, 0x30, 0x3f);
dev_info(&ov->dev->dev, "I2C REGS\n");
dump_reg_range(ov, 0x40, 0x4f);
dev_info(&ov->dev->dev, "SYSTEM CONTROL AND VENDOR REGS\n");
dump_reg_range(ov, 0x50, 0x5f);
dev_info(&ov->dev->dev, "60 - 6F\n");
dump_reg_range(ov, 0x60, 0x6f);
dev_info(&ov->dev->dev, "70 - 7F\n");
dump_reg_range(ov, 0x70, 0x7f);
dev_info(&ov->dev->dev, "Y QUANTIZATION TABLE\n");
dump_reg_range(ov, 0x80, 0x8f);
dev_info(&ov->dev->dev, "UV QUANTIZATION TABLE\n");
dump_reg_range(ov, 0x90, 0x9f);
dev_info(&ov->dev->dev, "A0 - BF\n");
dump_reg_range(ov, 0xa0, 0xbf);
dev_info(&ov->dev->dev, "CBR\n");
dump_reg_range(ov, 0xc0, 0xcf);
}
#endif
/*****************************************************************************/
/* Temporarily stops OV511 from functioning. Must do this before changing
* registers while the camera is streaming */
static inline int
ov51x_stop(struct usb_ov511 *ov)
{
PDEBUG(4, "stopping");
ov->stopped = 1;
if (ov->bclass == BCL_OV518)
return (reg_w_mask(ov, R51x_SYS_RESET, 0x3a, 0x3a));
else
return (reg_w(ov, R51x_SYS_RESET, 0x3d));
}
/* Restarts OV511 after ov511_stop() is called. Has no effect if it is not
* actually stopped (for performance). */
static inline int
ov51x_restart(struct usb_ov511 *ov)
{
if (ov->stopped) {
PDEBUG(4, "restarting");
ov->stopped = 0;
/* Reinitialize the stream */
if (ov->bclass == BCL_OV518)
reg_w(ov, 0x2f, 0x80);
return (reg_w(ov, R51x_SYS_RESET, 0x00));
}
return 0;
}
/* Sleeps until no frames are active. Returns !0 if got signal */
static int
ov51x_wait_frames_inactive(struct usb_ov511 *ov)
{
return wait_event_interruptible(ov->wq, ov->curframe < 0);
}
/* Resets the hardware snapshot button */
static void
ov51x_clear_snapshot(struct usb_ov511 *ov)
{
if (ov->bclass == BCL_OV511) {
reg_w(ov, R51x_SYS_SNAP, 0x00);
reg_w(ov, R51x_SYS_SNAP, 0x02);
reg_w(ov, R51x_SYS_SNAP, 0x00);
} else if (ov->bclass == BCL_OV518) {
dev_warn(&ov->dev->dev,
"snapshot reset not supported yet on OV518(+)\n");
} else {
dev_err(&ov->dev->dev, "clear snap: invalid bridge type\n");
}
}
#if 0
/* Checks the status of the snapshot button. Returns 1 if it was pressed since
* it was last cleared, and zero in all other cases (including errors) */
static int
ov51x_check_snapshot(struct usb_ov511 *ov)
{
int ret, status = 0;
if (ov->bclass == BCL_OV511) {
ret = reg_r(ov, R51x_SYS_SNAP);
if (ret < 0) {
dev_err(&ov->dev->dev,
"Error checking snspshot status (%d)\n", ret);
} else if (ret & 0x08) {
status = 1;
}
} else if (ov->bclass == BCL_OV518) {
dev_warn(&ov->dev->dev,
"snapshot check not supported yet on OV518(+)\n");
} else {
dev_err(&ov->dev->dev, "clear snap: invalid bridge type\n");
}
return status;
}
#endif
/* This does an initial reset of an OmniVision sensor and ensures that I2C
* is synchronized. Returns <0 for failure.
*/
static int
init_ov_sensor(struct usb_ov511 *ov)
{
int i, success;
/* Reset the sensor */
if (i2c_w(ov, 0x12, 0x80) < 0)
return -EIO;
/* Wait for it to initialize */
msleep(150);
for (i = 0, success = 0; i < i2c_detect_tries && !success; i++) {
if ((i2c_r(ov, OV7610_REG_ID_HIGH) == 0x7F) &&
(i2c_r(ov, OV7610_REG_ID_LOW) == 0xA2)) {
success = 1;
continue;
}
/* Reset the sensor */
if (i2c_w(ov, 0x12, 0x80) < 0)
return -EIO;
/* Wait for it to initialize */
msleep(150);
/* Dummy read to sync I2C */
if (i2c_r(ov, 0x00) < 0)
return -EIO;
}
if (!success)
return -EIO;
PDEBUG(1, "I2C synced in %d attempt(s)", i);
return 0;
}
static int
ov511_set_packet_size(struct usb_ov511 *ov, int size)
{
int alt, mult;
if (ov51x_stop(ov) < 0)
return -EIO;
mult = size >> 5;
if (ov->bridge == BRG_OV511) {
if (size == 0)
alt = OV511_ALT_SIZE_0;
else if (size == 257)
alt = OV511_ALT_SIZE_257;
else if (size == 513)
alt = OV511_ALT_SIZE_513;
else if (size == 769)
alt = OV511_ALT_SIZE_769;
else if (size == 993)
alt = OV511_ALT_SIZE_993;
else {
err("Set packet size: invalid size (%d)", size);
return -EINVAL;
}
} else if (ov->bridge == BRG_OV511PLUS) {
if (size == 0)
alt = OV511PLUS_ALT_SIZE_0;
else if (size == 33)
alt = OV511PLUS_ALT_SIZE_33;
else if (size == 129)
alt = OV511PLUS_ALT_SIZE_129;
else if (size == 257)
alt = OV511PLUS_ALT_SIZE_257;
else if (size == 385)
alt = OV511PLUS_ALT_SIZE_385;
else if (size == 513)
alt = OV511PLUS_ALT_SIZE_513;
else if (size == 769)
alt = OV511PLUS_ALT_SIZE_769;
else if (size == 961)
alt = OV511PLUS_ALT_SIZE_961;
else {
err("Set packet size: invalid size (%d)", size);
return -EINVAL;
}
} else {
err("Set packet size: Invalid bridge type");
return -EINVAL;
}
PDEBUG(3, "%d, mult=%d, alt=%d", size, mult, alt);
if (reg_w(ov, R51x_FIFO_PSIZE, mult) < 0)
return -EIO;
if (usb_set_interface(ov->dev, ov->iface, alt) < 0) {
err("Set packet size: set interface error");
return -EBUSY;
}
if (ov51x_reset(ov, OV511_RESET_NOREGS) < 0)
return -EIO;
ov->packet_size = size;
if (ov51x_restart(ov) < 0)
return -EIO;
return 0;
}
/* Note: Unlike the OV511/OV511+, the size argument does NOT include the
* optional packet number byte. The actual size *is* stored in ov->packet_size,
* though. */
static int
ov518_set_packet_size(struct usb_ov511 *ov, int size)
{
int alt;
if (ov51x_stop(ov) < 0)
return -EIO;
if (ov->bclass == BCL_OV518) {
if (size == 0)
alt = OV518_ALT_SIZE_0;
else if (size == 128)
alt = OV518_ALT_SIZE_128;
else if (size == 256)
alt = OV518_ALT_SIZE_256;
else if (size == 384)
alt = OV518_ALT_SIZE_384;
else if (size == 512)
alt = OV518_ALT_SIZE_512;
else if (size == 640)
alt = OV518_ALT_SIZE_640;
else if (size == 768)
alt = OV518_ALT_SIZE_768;
else if (size == 896)
alt = OV518_ALT_SIZE_896;
else {
err("Set packet size: invalid size (%d)", size);
return -EINVAL;
}
} else {
err("Set packet size: Invalid bridge type");
return -EINVAL;
}
PDEBUG(3, "%d, alt=%d", size, alt);
ov->packet_size = size;
if (size > 0) {
/* Program ISO FIFO size reg (packet number isn't included) */
ov518_reg_w32(ov, 0x30, size, 2);
if (ov->packet_numbering)
++ov->packet_size;
}
if (usb_set_interface(ov->dev, ov->iface, alt) < 0) {
err("Set packet size: set interface error");
return -EBUSY;
}
/* Initialize the stream */
if (reg_w(ov, 0x2f, 0x80) < 0)
return -EIO;
if (ov51x_restart(ov) < 0)
return -EIO;
if (ov51x_reset(ov, OV511_RESET_NOREGS) < 0)
return -EIO;
return 0;
}
/* Upload compression params and quantization tables. Returns 0 for success. */
static int
ov511_init_compression(struct usb_ov511 *ov)
{
int rc = 0;
if (!ov->compress_inited) {
reg_w(ov, 0x70, phy);
reg_w(ov, 0x71, phuv);
reg_w(ov, 0x72, pvy);
reg_w(ov, 0x73, pvuv);
reg_w(ov, 0x74, qhy);
reg_w(ov, 0x75, qhuv);
reg_w(ov, 0x76, qvy);
reg_w(ov, 0x77, qvuv);
if (ov511_upload_quan_tables(ov) < 0) {
err("Error uploading quantization tables");
rc = -EIO;
goto out;
}
}
ov->compress_inited = 1;
out:
return rc;
}
/* Upload compression params and quantization tables. Returns 0 for success. */
static int
ov518_init_compression(struct usb_ov511 *ov)
{
int rc = 0;
if (!ov->compress_inited) {
if (ov518_upload_quan_tables(ov) < 0) {
err("Error uploading quantization tables");
rc = -EIO;
goto out;
}
}
ov->compress_inited = 1;
out:
return rc;
}
/* -------------------------------------------------------------------------- */
/* Sets sensor's contrast setting to "val" */
static int
sensor_set_contrast(struct usb_ov511 *ov, unsigned short val)
{
int rc;
PDEBUG(3, "%d", val);
if (ov->stop_during_set)
if (ov51x_stop(ov) < 0)
return -EIO;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV6620:
{
rc = i2c_w(ov, OV7610_REG_CNT, val >> 8);
if (rc < 0)
goto out;
break;
}
case SEN_OV6630:
{
rc = i2c_w_mask(ov, OV7610_REG_CNT, val >> 12, 0x0f);
if (rc < 0)
goto out;
break;
}
case SEN_OV7620:
{
unsigned char ctab[] = {
0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57,
0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff
};
/* Use Y gamma control instead. Bit 0 enables it. */
rc = i2c_w(ov, 0x64, ctab[val>>12]);
if (rc < 0)
goto out;
break;
}
case SEN_SAA7111A:
{
rc = i2c_w(ov, 0x0b, val >> 9);
if (rc < 0)
goto out;
break;
}
default:
{
PDEBUG(3, "Unsupported with this sensor");
rc = -EPERM;
goto out;
}
}
rc = 0; /* Success */
ov->contrast = val;
out:
if (ov51x_restart(ov) < 0)
return -EIO;
return rc;
}
/* Gets sensor's contrast setting */
static int
sensor_get_contrast(struct usb_ov511 *ov, unsigned short *val)
{
int rc;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV6620:
rc = i2c_r(ov, OV7610_REG_CNT);
if (rc < 0)
return rc;
else
*val = rc << 8;
break;
case SEN_OV6630:
rc = i2c_r(ov, OV7610_REG_CNT);
if (rc < 0)
return rc;
else
*val = rc << 12;
break;
case SEN_OV7620:
/* Use Y gamma reg instead. Bit 0 is the enable bit. */
rc = i2c_r(ov, 0x64);
if (rc < 0)
return rc;
else
*val = (rc & 0xfe) << 8;
break;
case SEN_SAA7111A:
*val = ov->contrast;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
return -EPERM;
}
PDEBUG(3, "%d", *val);
ov->contrast = *val;
return 0;
}
/* -------------------------------------------------------------------------- */
/* Sets sensor's brightness setting to "val" */
static int
sensor_set_brightness(struct usb_ov511 *ov, unsigned short val)
{
int rc;
PDEBUG(4, "%d", val);
if (ov->stop_during_set)
if (ov51x_stop(ov) < 0)
return -EIO;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
rc = i2c_w(ov, OV7610_REG_BRT, val >> 8);
if (rc < 0)
goto out;
break;
case SEN_OV7620:
/* 7620 doesn't like manual changes when in auto mode */
if (!ov->auto_brt) {
rc = i2c_w(ov, OV7610_REG_BRT, val >> 8);
if (rc < 0)
goto out;
}
break;
case SEN_SAA7111A:
rc = i2c_w(ov, 0x0a, val >> 8);
if (rc < 0)
goto out;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
rc = -EPERM;
goto out;
}
rc = 0; /* Success */
ov->brightness = val;
out:
if (ov51x_restart(ov) < 0)
return -EIO;
return rc;
}
/* Gets sensor's brightness setting */
static int
sensor_get_brightness(struct usb_ov511 *ov, unsigned short *val)
{
int rc;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV76BE:
case SEN_OV7620:
case SEN_OV6620:
case SEN_OV6630:
rc = i2c_r(ov, OV7610_REG_BRT);
if (rc < 0)
return rc;
else
*val = rc << 8;
break;
case SEN_SAA7111A:
*val = ov->brightness;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
return -EPERM;
}
PDEBUG(3, "%d", *val);
ov->brightness = *val;
return 0;
}
/* -------------------------------------------------------------------------- */
/* Sets sensor's saturation (color intensity) setting to "val" */
static int
sensor_set_saturation(struct usb_ov511 *ov, unsigned short val)
{
int rc;
PDEBUG(3, "%d", val);
if (ov->stop_during_set)
if (ov51x_stop(ov) < 0)
return -EIO;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
rc = i2c_w(ov, OV7610_REG_SAT, val >> 8);
if (rc < 0)
goto out;
break;
case SEN_OV7620:
// /* Use UV gamma control instead. Bits 0 & 7 are reserved. */
// rc = ov_i2c_write(ov->dev, 0x62, (val >> 9) & 0x7e);
// if (rc < 0)
// goto out;
rc = i2c_w(ov, OV7610_REG_SAT, val >> 8);
if (rc < 0)
goto out;
break;
case SEN_SAA7111A:
rc = i2c_w(ov, 0x0c, val >> 9);
if (rc < 0)
goto out;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
rc = -EPERM;
goto out;
}
rc = 0; /* Success */
ov->colour = val;
out:
if (ov51x_restart(ov) < 0)
return -EIO;
return rc;
}
/* Gets sensor's saturation (color intensity) setting */
static int
sensor_get_saturation(struct usb_ov511 *ov, unsigned short *val)
{
int rc;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
rc = i2c_r(ov, OV7610_REG_SAT);
if (rc < 0)
return rc;
else
*val = rc << 8;
break;
case SEN_OV7620:
// /* Use UV gamma reg instead. Bits 0 & 7 are reserved. */
// rc = i2c_r(ov, 0x62);
// if (rc < 0)
// return rc;
// else
// *val = (rc & 0x7e) << 9;
rc = i2c_r(ov, OV7610_REG_SAT);
if (rc < 0)
return rc;
else
*val = rc << 8;
break;
case SEN_SAA7111A:
*val = ov->colour;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
return -EPERM;
}
PDEBUG(3, "%d", *val);
ov->colour = *val;
return 0;
}
/* -------------------------------------------------------------------------- */
/* Sets sensor's hue (red/blue balance) setting to "val" */
static int
sensor_set_hue(struct usb_ov511 *ov, unsigned short val)
{
int rc;
PDEBUG(3, "%d", val);
if (ov->stop_during_set)
if (ov51x_stop(ov) < 0)
return -EIO;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV6620:
case SEN_OV6630:
rc = i2c_w(ov, OV7610_REG_RED, 0xFF - (val >> 8));
if (rc < 0)
goto out;
rc = i2c_w(ov, OV7610_REG_BLUE, val >> 8);
if (rc < 0)
goto out;
break;
case SEN_OV7620:
// Hue control is causing problems. I will enable it once it's fixed.
#if 0
rc = i2c_w(ov, 0x7a, (unsigned char)(val >> 8) + 0xb);
if (rc < 0)
goto out;
rc = i2c_w(ov, 0x79, (unsigned char)(val >> 8) + 0xb);
if (rc < 0)
goto out;
#endif
break;
case SEN_SAA7111A:
rc = i2c_w(ov, 0x0d, (val + 32768) >> 8);
if (rc < 0)
goto out;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
rc = -EPERM;
goto out;
}
rc = 0; /* Success */
ov->hue = val;
out:
if (ov51x_restart(ov) < 0)
return -EIO;
return rc;
}
/* Gets sensor's hue (red/blue balance) setting */
static int
sensor_get_hue(struct usb_ov511 *ov, unsigned short *val)
{
int rc;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV6620:
case SEN_OV6630:
rc = i2c_r(ov, OV7610_REG_BLUE);
if (rc < 0)
return rc;
else
*val = rc << 8;
break;
case SEN_OV7620:
rc = i2c_r(ov, 0x7a);
if (rc < 0)
return rc;
else
*val = rc << 8;
break;
case SEN_SAA7111A:
*val = ov->hue;
break;
default:
PDEBUG(3, "Unsupported with this sensor");
return -EPERM;
}
PDEBUG(3, "%d", *val);
ov->hue = *val;
return 0;
}
/* -------------------------------------------------------------------------- */
static int
sensor_set_picture(struct usb_ov511 *ov, struct video_picture *p)
{
int rc;
PDEBUG(4, "sensor_set_picture");
ov->whiteness = p->whiteness;
/* Don't return error if a setting is unsupported, or rest of settings
* will not be performed */
rc = sensor_set_contrast(ov, p->contrast);
if (FATAL_ERROR(rc))
return rc;
rc = sensor_set_brightness(ov, p->brightness);
if (FATAL_ERROR(rc))
return rc;
rc = sensor_set_saturation(ov, p->colour);
if (FATAL_ERROR(rc))
return rc;
rc = sensor_set_hue(ov, p->hue);
if (FATAL_ERROR(rc))
return rc;
return 0;
}
static int
sensor_get_picture(struct usb_ov511 *ov, struct video_picture *p)
{
int rc;
PDEBUG(4, "sensor_get_picture");
/* Don't return error if a setting is unsupported, or rest of settings
* will not be performed */
rc = sensor_get_contrast(ov, &(p->contrast));
if (FATAL_ERROR(rc))
return rc;
rc = sensor_get_brightness(ov, &(p->brightness));
if (FATAL_ERROR(rc))
return rc;
rc = sensor_get_saturation(ov, &(p->colour));
if (FATAL_ERROR(rc))
return rc;
rc = sensor_get_hue(ov, &(p->hue));
if (FATAL_ERROR(rc))
return rc;
p->whiteness = 105 << 8;
return 0;
}
#if 0
// FIXME: Exposure range is only 0x00-0x7f in interlace mode
/* Sets current exposure for sensor. This only has an effect if auto-exposure
* is off */
static inline int
sensor_set_exposure(struct usb_ov511 *ov, unsigned char val)
{
int rc;
PDEBUG(3, "%d", val);
if (ov->stop_during_set)
if (ov51x_stop(ov) < 0)
return -EIO;
switch (ov->sensor) {
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV7610:
case SEN_OV7620:
case SEN_OV76BE:
case SEN_OV8600:
rc = i2c_w(ov, 0x10, val);
if (rc < 0)
goto out;
break;
case SEN_KS0127:
case SEN_KS0127B:
case SEN_SAA7111A:
PDEBUG(3, "Unsupported with this sensor");
return -EPERM;
default:
err("Sensor not supported for set_exposure");
return -EINVAL;
}
rc = 0; /* Success */
ov->exposure = val;
out:
if (ov51x_restart(ov) < 0)
return -EIO;
return rc;
}
#endif
/* Gets current exposure level from sensor, regardless of whether it is under
* manual control. */
static int
sensor_get_exposure(struct usb_ov511 *ov, unsigned char *val)
{
int rc;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV7620:
case SEN_OV76BE:
case SEN_OV8600:
rc = i2c_r(ov, 0x10);
if (rc < 0)
return rc;
else
*val = rc;
break;
case SEN_KS0127:
case SEN_KS0127B:
case SEN_SAA7111A:
val = NULL;
PDEBUG(3, "Unsupported with this sensor");
return -EPERM;
default:
err("Sensor not supported for get_exposure");
return -EINVAL;
}
PDEBUG(3, "%d", *val);
ov->exposure = *val;
return 0;
}
/* Turns on or off the LED. Only has an effect with OV511+/OV518(+) */
static void
ov51x_led_control(struct usb_ov511 *ov, int enable)
{
PDEBUG(4, " (%s)", enable ? "turn on" : "turn off");
if (ov->bridge == BRG_OV511PLUS)
reg_w(ov, R511_SYS_LED_CTL, enable ? 1 : 0);
else if (ov->bclass == BCL_OV518)
reg_w_mask(ov, R518_GPIO_OUT, enable ? 0x02 : 0x00, 0x02);
return;
}
/* Matches the sensor's internal frame rate to the lighting frequency.
* Valid frequencies are:
* 50 - 50Hz, for European and Asian lighting
* 60 - 60Hz, for American lighting
*
* Tested with: OV7610, OV7620, OV76BE, OV6620
* Unsupported: KS0127, KS0127B, SAA7111A
* Returns: 0 for success
*/
static int
sensor_set_light_freq(struct usb_ov511 *ov, int freq)
{
int sixty;
PDEBUG(4, "%d Hz", freq);
if (freq == 60)
sixty = 1;
else if (freq == 50)
sixty = 0;
else {
err("Invalid light freq (%d Hz)", freq);
return -EINVAL;
}
switch (ov->sensor) {
case SEN_OV7610:
i2c_w_mask(ov, 0x2a, sixty?0x00:0x80, 0x80);
i2c_w(ov, 0x2b, sixty?0x00:0xac);
i2c_w_mask(ov, 0x13, 0x10, 0x10);
i2c_w_mask(ov, 0x13, 0x00, 0x10);
break;
case SEN_OV7620:
case SEN_OV76BE:
case SEN_OV8600:
i2c_w_mask(ov, 0x2a, sixty?0x00:0x80, 0x80);
i2c_w(ov, 0x2b, sixty?0x00:0xac);
i2c_w_mask(ov, 0x76, 0x01, 0x01);
break;
case SEN_OV6620:
case SEN_OV6630:
i2c_w(ov, 0x2b, sixty?0xa8:0x28);
i2c_w(ov, 0x2a, sixty?0x84:0xa4);
break;
case SEN_KS0127:
case SEN_KS0127B:
case SEN_SAA7111A:
PDEBUG(5, "Unsupported with this sensor");
return -EPERM;
default:
err("Sensor not supported for set_light_freq");
return -EINVAL;
}
ov->lightfreq = freq;
return 0;
}
/* If enable is true, turn on the sensor's banding filter, otherwise turn it
* off. This filter tries to reduce the pattern of horizontal light/dark bands
* caused by some (usually fluorescent) lighting. The light frequency must be
* set either before or after enabling it with ov51x_set_light_freq().
*
* Tested with: OV7610, OV7620, OV76BE, OV6620.
* Unsupported: KS0127, KS0127B, SAA7111A
* Returns: 0 for success
*/
static int
sensor_set_banding_filter(struct usb_ov511 *ov, int enable)
{
int rc;
PDEBUG(4, " (%s)", enable ? "turn on" : "turn off");
if (ov->sensor == SEN_KS0127 || ov->sensor == SEN_KS0127B
|| ov->sensor == SEN_SAA7111A) {
PDEBUG(5, "Unsupported with this sensor");
return -EPERM;
}
rc = i2c_w_mask(ov, 0x2d, enable?0x04:0x00, 0x04);
if (rc < 0)
return rc;
ov->bandfilt = enable;
return 0;
}
/* If enable is true, turn on the sensor's auto brightness control, otherwise
* turn it off.
*
* Unsupported: KS0127, KS0127B, SAA7111A
* Returns: 0 for success
*/
static int
sensor_set_auto_brightness(struct usb_ov511 *ov, int enable)
{
int rc;
PDEBUG(4, " (%s)", enable ? "turn on" : "turn off");
if (ov->sensor == SEN_KS0127 || ov->sensor == SEN_KS0127B
|| ov->sensor == SEN_SAA7111A) {
PDEBUG(5, "Unsupported with this sensor");
return -EPERM;
}
rc = i2c_w_mask(ov, 0x2d, enable?0x10:0x00, 0x10);
if (rc < 0)
return rc;
ov->auto_brt = enable;
return 0;
}
/* If enable is true, turn on the sensor's auto exposure control, otherwise
* turn it off.
*
* Unsupported: KS0127, KS0127B, SAA7111A
* Returns: 0 for success
*/
static int
sensor_set_auto_exposure(struct usb_ov511 *ov, int enable)
{
PDEBUG(4, " (%s)", enable ? "turn on" : "turn off");
switch (ov->sensor) {
case SEN_OV7610:
i2c_w_mask(ov, 0x29, enable?0x00:0x80, 0x80);
break;
case SEN_OV6620:
case SEN_OV7620:
case SEN_OV76BE:
case SEN_OV8600:
i2c_w_mask(ov, 0x13, enable?0x01:0x00, 0x01);
break;
case SEN_OV6630:
i2c_w_mask(ov, 0x28, enable?0x00:0x10, 0x10);
break;
case SEN_KS0127:
case SEN_KS0127B:
case SEN_SAA7111A:
PDEBUG(5, "Unsupported with this sensor");
return -EPERM;
default:
err("Sensor not supported for set_auto_exposure");
return -EINVAL;
}
ov->auto_exp = enable;
return 0;
}
/* Modifies the sensor's exposure algorithm to allow proper exposure of objects
* that are illuminated from behind.
*
* Tested with: OV6620, OV7620
* Unsupported: OV7610, OV76BE, KS0127, KS0127B, SAA7111A
* Returns: 0 for success
*/
static int
sensor_set_backlight(struct usb_ov511 *ov, int enable)
{
PDEBUG(4, " (%s)", enable ? "turn on" : "turn off");
switch (ov->sensor) {
case SEN_OV7620:
case SEN_OV8600:
i2c_w_mask(ov, 0x68, enable?0xe0:0xc0, 0xe0);
i2c_w_mask(ov, 0x29, enable?0x08:0x00, 0x08);
i2c_w_mask(ov, 0x28, enable?0x02:0x00, 0x02);
break;
case SEN_OV6620:
i2c_w_mask(ov, 0x4e, enable?0xe0:0xc0, 0xe0);
i2c_w_mask(ov, 0x29, enable?0x08:0x00, 0x08);
i2c_w_mask(ov, 0x0e, enable?0x80:0x00, 0x80);
break;
case SEN_OV6630:
i2c_w_mask(ov, 0x4e, enable?0x80:0x60, 0xe0);
i2c_w_mask(ov, 0x29, enable?0x08:0x00, 0x08);
i2c_w_mask(ov, 0x28, enable?0x02:0x00, 0x02);
break;
case SEN_OV7610:
case SEN_OV76BE:
case SEN_KS0127:
case SEN_KS0127B:
case SEN_SAA7111A:
PDEBUG(5, "Unsupported with this sensor");
return -EPERM;
default:
err("Sensor not supported for set_backlight");
return -EINVAL;
}
ov->backlight = enable;
return 0;
}
static int
sensor_set_mirror(struct usb_ov511 *ov, int enable)
{
PDEBUG(4, " (%s)", enable ? "turn on" : "turn off");
switch (ov->sensor) {
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV7610:
case SEN_OV7620:
case SEN_OV76BE:
case SEN_OV8600:
i2c_w_mask(ov, 0x12, enable?0x40:0x00, 0x40);
break;
case SEN_KS0127:
case SEN_KS0127B:
case SEN_SAA7111A:
PDEBUG(5, "Unsupported with this sensor");
return -EPERM;
default:
err("Sensor not supported for set_mirror");
return -EINVAL;
}
ov->mirror = enable;
return 0;
}
/* Returns number of bits per pixel (regardless of where they are located;
* planar or not), or zero for unsupported format.
*/
static inline int
get_depth(int palette)
{
switch (palette) {
case VIDEO_PALETTE_GREY: return 8;
case VIDEO_PALETTE_YUV420: return 12;
case VIDEO_PALETTE_YUV420P: return 12; /* Planar */
default: return 0; /* Invalid format */
}
}
/* Bytes per frame. Used by read(). Return of 0 indicates error */
static inline long int
get_frame_length(struct ov511_frame *frame)
{
if (!frame)
return 0;
else
return ((frame->width * frame->height
* get_depth(frame->format)) >> 3);
}
static int
mode_init_ov_sensor_regs(struct usb_ov511 *ov, int width, int height,
int mode, int sub_flag, int qvga)
{
int clock;
/******** Mode (VGA/QVGA) and sensor specific regs ********/
switch (ov->sensor) {
case SEN_OV7610:
i2c_w(ov, 0x14, qvga?0x24:0x04);
// FIXME: Does this improve the image quality or frame rate?
#if 0
i2c_w_mask(ov, 0x28, qvga?0x00:0x20, 0x20);
i2c_w(ov, 0x24, 0x10);
i2c_w(ov, 0x25, qvga?0x40:0x8a);
i2c_w(ov, 0x2f, qvga?0x30:0xb0);
i2c_w(ov, 0x35, qvga?0x1c:0x9c);
#endif
break;
case SEN_OV7620:
// i2c_w(ov, 0x2b, 0x00);
i2c_w(ov, 0x14, qvga?0xa4:0x84);
i2c_w_mask(ov, 0x28, qvga?0x00:0x20, 0x20);
i2c_w(ov, 0x24, qvga?0x20:0x3a);
i2c_w(ov, 0x25, qvga?0x30:0x60);
i2c_w_mask(ov, 0x2d, qvga?0x40:0x00, 0x40);
i2c_w_mask(ov, 0x67, qvga?0xf0:0x90, 0xf0);
i2c_w_mask(ov, 0x74, qvga?0x20:0x00, 0x20);
break;
case SEN_OV76BE:
// i2c_w(ov, 0x2b, 0x00);
i2c_w(ov, 0x14, qvga?0xa4:0x84);
// FIXME: Enable this once 7620AE uses 7620 initial settings
#if 0
i2c_w_mask(ov, 0x28, qvga?0x00:0x20, 0x20);
i2c_w(ov, 0x24, qvga?0x20:0x3a);
i2c_w(ov, 0x25, qvga?0x30:0x60);
i2c_w_mask(ov, 0x2d, qvga?0x40:0x00, 0x40);
i2c_w_mask(ov, 0x67, qvga?0xb0:0x90, 0xf0);
i2c_w_mask(ov, 0x74, qvga?0x20:0x00, 0x20);
#endif
break;
case SEN_OV6620:
i2c_w(ov, 0x14, qvga?0x24:0x04);
break;
case SEN_OV6630:
i2c_w(ov, 0x14, qvga?0xa0:0x80);
break;
default:
err("Invalid sensor");
return -EINVAL;
}
/******** Palette-specific regs ********/
if (mode == VIDEO_PALETTE_GREY) {
if (ov->sensor == SEN_OV7610 || ov->sensor == SEN_OV76BE) {
/* these aren't valid on the OV6620/OV7620/6630? */
i2c_w_mask(ov, 0x0e, 0x40, 0x40);
}
if (ov->sensor == SEN_OV6630 && ov->bridge == BRG_OV518
&& ov518_color) {
i2c_w_mask(ov, 0x12, 0x00, 0x10);
i2c_w_mask(ov, 0x13, 0x00, 0x20);
} else {
i2c_w_mask(ov, 0x13, 0x20, 0x20);
}
} else {
if (ov->sensor == SEN_OV7610 || ov->sensor == SEN_OV76BE) {
/* not valid on the OV6620/OV7620/6630? */
i2c_w_mask(ov, 0x0e, 0x00, 0x40);
}
/* The OV518 needs special treatment. Although both the OV518
* and the OV6630 support a 16-bit video bus, only the 8 bit Y
* bus is actually used. The UV bus is tied to ground.
* Therefore, the OV6630 needs to be in 8-bit multiplexed
* output mode */
if (ov->sensor == SEN_OV6630 && ov->bridge == BRG_OV518
&& ov518_color) {
i2c_w_mask(ov, 0x12, 0x10, 0x10);
i2c_w_mask(ov, 0x13, 0x20, 0x20);
} else {
i2c_w_mask(ov, 0x13, 0x00, 0x20);
}
}
/******** Clock programming ********/
/* The OV6620 needs special handling. This prevents the
* severe banding that normally occurs */
if (ov->sensor == SEN_OV6620 || ov->sensor == SEN_OV6630)
{
/* Clock down */
i2c_w(ov, 0x2a, 0x04);
if (ov->compress) {
// clock = 0; /* This ensures the highest frame rate */
clock = 3;
} else if (clockdiv == -1) { /* If user didn't override it */
clock = 3; /* Gives better exposure time */
} else {
clock = clockdiv;
}
PDEBUG(4, "Setting clock divisor to %d", clock);
i2c_w(ov, 0x11, clock);
i2c_w(ov, 0x2a, 0x84);
/* This next setting is critical. It seems to improve
* the gain or the contrast. The "reserved" bits seem
* to have some effect in this case. */
i2c_w(ov, 0x2d, 0x85);
}
else
{
if (ov->compress) {
clock = 1; /* This ensures the highest frame rate */
} else if (clockdiv == -1) { /* If user didn't override it */
/* Calculate and set the clock divisor */
clock = ((sub_flag ? ov->subw * ov->subh
: width * height)
* (mode == VIDEO_PALETTE_GREY ? 2 : 3) / 2)
/ 66000;
} else {
clock = clockdiv;
}
PDEBUG(4, "Setting clock divisor to %d", clock);
i2c_w(ov, 0x11, clock);
}
/******** Special Features ********/
if (framedrop >= 0)
i2c_w(ov, 0x16, framedrop);
/* Test Pattern */
i2c_w_mask(ov, 0x12, (testpat?0x02:0x00), 0x02);
/* Enable auto white balance */
i2c_w_mask(ov, 0x12, 0x04, 0x04);
// This will go away as soon as ov51x_mode_init_sensor_regs()
// is fully tested.
/* 7620/6620/6630? don't have register 0x35, so play it safe */
if (ov->sensor == SEN_OV7610 || ov->sensor == SEN_OV76BE) {
if (width == 640 && height == 480)
i2c_w(ov, 0x35, 0x9e);
else
i2c_w(ov, 0x35, 0x1e);
}
return 0;
}
static int
set_ov_sensor_window(struct usb_ov511 *ov, int width, int height, int mode,
int sub_flag)
{
int ret;
int hwsbase, hwebase, vwsbase, vwebase, hwsize, vwsize;
int hoffset, voffset, hwscale = 0, vwscale = 0;
/* The different sensor ICs handle setting up of window differently.
* IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!!! */
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV76BE:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = vwebase = 0x05;
break;
case SEN_OV6620:
case SEN_OV6630:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = 0x05;
vwebase = 0x06;
break;
case SEN_OV7620:
hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */
hwebase = 0x2f;
vwsbase = vwebase = 0x05;
break;
default:
err("Invalid sensor");
return -EINVAL;
}
if (ov->sensor == SEN_OV6620 || ov->sensor == SEN_OV6630) {
/* Note: OV518(+) does downsample on its own) */
if ((width > 176 && height > 144)
|| ov->bclass == BCL_OV518) { /* CIF */
ret = mode_init_ov_sensor_regs(ov, width, height,
mode, sub_flag, 0);
if (ret < 0)
return ret;
hwscale = 1;
vwscale = 1; /* The datasheet says 0; it's wrong */
hwsize = 352;
vwsize = 288;
} else if (width > 176 || height > 144) {
err("Illegal dimensions");
return -EINVAL;
} else { /* QCIF */
ret = mode_init_ov_sensor_regs(ov, width, height,
mode, sub_flag, 1);
if (ret < 0)
return ret;
hwsize = 176;
vwsize = 144;
}
} else {
if (width > 320 && height > 240) { /* VGA */
ret = mode_init_ov_sensor_regs(ov, width, height,
mode, sub_flag, 0);
if (ret < 0)
return ret;
hwscale = 2;
vwscale = 1;
hwsize = 640;
vwsize = 480;
} else if (width > 320 || height > 240) {
err("Illegal dimensions");
return -EINVAL;
} else { /* QVGA */
ret = mode_init_ov_sensor_regs(ov, width, height,
mode, sub_flag, 1);
if (ret < 0)
return ret;
hwscale = 1;
hwsize = 320;
vwsize = 240;
}
}
/* Center the window */
hoffset = ((hwsize - width) / 2) >> hwscale;
voffset = ((vwsize - height) / 2) >> vwscale;
/* FIXME! - This needs to be changed to support 160x120 and 6620!!! */
if (sub_flag) {
i2c_w(ov, 0x17, hwsbase+(ov->subx>>hwscale));
i2c_w(ov, 0x18, hwebase+((ov->subx+ov->subw)>>hwscale));
i2c_w(ov, 0x19, vwsbase+(ov->suby>>vwscale));
i2c_w(ov, 0x1a, vwebase+((ov->suby+ov->subh)>>vwscale));
} else {
i2c_w(ov, 0x17, hwsbase + hoffset);
i2c_w(ov, 0x18, hwebase + hoffset + (hwsize>>hwscale));
i2c_w(ov, 0x19, vwsbase + voffset);
i2c_w(ov, 0x1a, vwebase + voffset + (vwsize>>vwscale));
}
#ifdef OV511_DEBUG
if (dump_sensor)
dump_i2c_regs(ov);
#endif
return 0;
}
/* Set up the OV511/OV511+ with the given image parameters.
*
* Do not put any sensor-specific code in here (including I2C I/O functions)
*/
static int
ov511_mode_init_regs(struct usb_ov511 *ov,
int width, int height, int mode, int sub_flag)
{
int hsegs, vsegs;
if (sub_flag) {
width = ov->subw;
height = ov->subh;
}
PDEBUG(3, "width:%d, height:%d, mode:%d, sub:%d",
width, height, mode, sub_flag);
// FIXME: This should be moved to a 7111a-specific function once
// subcapture is dealt with properly
if (ov->sensor == SEN_SAA7111A) {
if (width == 320 && height == 240) {
/* No need to do anything special */
} else if (width == 640 && height == 480) {
/* Set the OV511 up as 320x480, but keep the
* V4L resolution as 640x480 */
width = 320;
} else {
err("SAA7111A only allows 320x240 or 640x480");
return -EINVAL;
}
}
/* Make sure width and height are a multiple of 8 */
if (width % 8 || height % 8) {
err("Invalid size (%d, %d) (mode = %d)", width, height, mode);
return -EINVAL;
}
if (width < ov->minwidth || height < ov->minheight) {
err("Requested dimensions are too small");
return -EINVAL;
}
if (ov51x_stop(ov) < 0)
return -EIO;
if (mode == VIDEO_PALETTE_GREY) {
reg_w(ov, R511_CAM_UV_EN, 0x00);
reg_w(ov, R511_SNAP_UV_EN, 0x00);
reg_w(ov, R511_SNAP_OPTS, 0x01);
} else {
reg_w(ov, R511_CAM_UV_EN, 0x01);
reg_w(ov, R511_SNAP_UV_EN, 0x01);
reg_w(ov, R511_SNAP_OPTS, 0x03);
}
/* Here I'm assuming that snapshot size == image size.
* I hope that's always true. --claudio
*/
hsegs = (width >> 3) - 1;
vsegs = (height >> 3) - 1;
reg_w(ov, R511_CAM_PXCNT, hsegs);
reg_w(ov, R511_CAM_LNCNT, vsegs);
reg_w(ov, R511_CAM_PXDIV, 0x00);
reg_w(ov, R511_CAM_LNDIV, 0x00);
/* YUV420, low pass filter on */
reg_w(ov, R511_CAM_OPTS, 0x03);
/* Snapshot additions */
reg_w(ov, R511_SNAP_PXCNT, hsegs);
reg_w(ov, R511_SNAP_LNCNT, vsegs);
reg_w(ov, R511_SNAP_PXDIV, 0x00);
reg_w(ov, R511_SNAP_LNDIV, 0x00);
if (ov->compress) {
/* Enable Y and UV quantization and compression */
reg_w(ov, R511_COMP_EN, 0x07);
reg_w(ov, R511_COMP_LUT_EN, 0x03);
ov51x_reset(ov, OV511_RESET_OMNICE);
}
if (ov51x_restart(ov) < 0)
return -EIO;
return 0;
}
/* Sets up the OV518/OV518+ with the given image parameters
*
* OV518 needs a completely different approach, until we can figure out what
* the individual registers do. Also, only 15 FPS is supported now.
*
* Do not put any sensor-specific code in here (including I2C I/O functions)
*/
static int
ov518_mode_init_regs(struct usb_ov511 *ov,
int width, int height, int mode, int sub_flag)
{
int hsegs, vsegs, hi_res;
if (sub_flag) {
width = ov->subw;
height = ov->subh;
}
PDEBUG(3, "width:%d, height:%d, mode:%d, sub:%d",
width, height, mode, sub_flag);
if (width % 16 || height % 8) {
err("Invalid size (%d, %d)", width, height);
return -EINVAL;
}
if (width < ov->minwidth || height < ov->minheight) {
err("Requested dimensions are too small");
return -EINVAL;
}
if (width >= 320 && height >= 240) {
hi_res = 1;
} else if (width >= 320 || height >= 240) {
err("Invalid width/height combination (%d, %d)", width, height);
return -EINVAL;
} else {
hi_res = 0;
}
if (ov51x_stop(ov) < 0)
return -EIO;
/******** Set the mode ********/
reg_w(ov, 0x2b, 0);
reg_w(ov, 0x2c, 0);
reg_w(ov, 0x2d, 0);
reg_w(ov, 0x2e, 0);
reg_w(ov, 0x3b, 0);
reg_w(ov, 0x3c, 0);
reg_w(ov, 0x3d, 0);
reg_w(ov, 0x3e, 0);
if (ov->bridge == BRG_OV518 && ov518_color) {
/* OV518 needs U and V swapped */
i2c_w_mask(ov, 0x15, 0x00, 0x01);
if (mode == VIDEO_PALETTE_GREY) {
/* Set 16-bit input format (UV data are ignored) */
reg_w_mask(ov, 0x20, 0x00, 0x08);
/* Set 8-bit (4:0:0) output format */
reg_w_mask(ov, 0x28, 0x00, 0xf0);
reg_w_mask(ov, 0x38, 0x00, 0xf0);
} else {
/* Set 8-bit (YVYU) input format */
reg_w_mask(ov, 0x20, 0x08, 0x08);
/* Set 12-bit (4:2:0) output format */
reg_w_mask(ov, 0x28, 0x80, 0xf0);
reg_w_mask(ov, 0x38, 0x80, 0xf0);
}
} else {
reg_w(ov, 0x28, (mode == VIDEO_PALETTE_GREY) ? 0x00:0x80);
reg_w(ov, 0x38, (mode == VIDEO_PALETTE_GREY) ? 0x00:0x80);
}
hsegs = width / 16;
vsegs = height / 4;
reg_w(ov, 0x29, hsegs);
reg_w(ov, 0x2a, vsegs);
reg_w(ov, 0x39, hsegs);
reg_w(ov, 0x3a, vsegs);
/* Windows driver does this here; who knows why */
reg_w(ov, 0x2f, 0x80);
/******** Set the framerate (to 15 FPS) ********/
/* Mode independent, but framerate dependent, regs */
reg_w(ov, 0x51, 0x02); /* Clock divider; lower==faster */
reg_w(ov, 0x22, 0x18);
reg_w(ov, 0x23, 0xff);
if (ov->bridge == BRG_OV518PLUS)
reg_w(ov, 0x21, 0x19);
else
reg_w(ov, 0x71, 0x19); /* Compression-related? */
// FIXME: Sensor-specific
/* Bit 5 is what matters here. Of course, it is "reserved" */
i2c_w(ov, 0x54, 0x23);
reg_w(ov, 0x2f, 0x80);
if (ov->bridge == BRG_OV518PLUS) {
reg_w(ov, 0x24, 0x94);
reg_w(ov, 0x25, 0x90);
ov518_reg_w32(ov, 0xc4, 400, 2); /* 190h */
ov518_reg_w32(ov, 0xc6, 540, 2); /* 21ch */
ov518_reg_w32(ov, 0xc7, 540, 2); /* 21ch */
ov518_reg_w32(ov, 0xc8, 108, 2); /* 6ch */
ov518_reg_w32(ov, 0xca, 131098, 3); /* 2001ah */
ov518_reg_w32(ov, 0xcb, 532, 2); /* 214h */
ov518_reg_w32(ov, 0xcc, 2400, 2); /* 960h */
ov518_reg_w32(ov, 0xcd, 32, 2); /* 20h */
ov518_reg_w32(ov, 0xce, 608, 2); /* 260h */
} else {
reg_w(ov, 0x24, 0x9f);
reg_w(ov, 0x25, 0x90);
ov518_reg_w32(ov, 0xc4, 400, 2); /* 190h */
ov518_reg_w32(ov, 0xc6, 500, 2); /* 1f4h */
ov518_reg_w32(ov, 0xc7, 500, 2); /* 1f4h */
ov518_reg_w32(ov, 0xc8, 142, 2); /* 8eh */
ov518_reg_w32(ov, 0xca, 131098, 3); /* 2001ah */
ov518_reg_w32(ov, 0xcb, 532, 2); /* 214h */
ov518_reg_w32(ov, 0xcc, 2000, 2); /* 7d0h */
ov518_reg_w32(ov, 0xcd, 32, 2); /* 20h */
ov518_reg_w32(ov, 0xce, 608, 2); /* 260h */
}
reg_w(ov, 0x2f, 0x80);
if (ov51x_restart(ov) < 0)
return -EIO;
/* Reset it just for good measure */
if (ov51x_reset(ov, OV511_RESET_NOREGS) < 0)
return -EIO;
return 0;
}
/* This is a wrapper around the OV511, OV518, and sensor specific functions */
static int
mode_init_regs(struct usb_ov511 *ov,
int width, int height, int mode, int sub_flag)
{
int rc = 0;
if (!ov || !ov->dev)
return -EFAULT;
if (ov->bclass == BCL_OV518) {
rc = ov518_mode_init_regs(ov, width, height, mode, sub_flag);
} else {
rc = ov511_mode_init_regs(ov, width, height, mode, sub_flag);
}
if (FATAL_ERROR(rc))
return rc;
switch (ov->sensor) {
case SEN_OV7610:
case SEN_OV7620:
case SEN_OV76BE:
case SEN_OV8600:
case SEN_OV6620:
case SEN_OV6630:
rc = set_ov_sensor_window(ov, width, height, mode, sub_flag);
break;
case SEN_KS0127:
case SEN_KS0127B:
err("KS0127-series decoders not supported yet");
rc = -EINVAL;
break;
case SEN_SAA7111A:
// rc = mode_init_saa_sensor_regs(ov, width, height, mode,
// sub_flag);
PDEBUG(1, "SAA status = 0x%02X", i2c_r(ov, 0x1f));
break;
default:
err("Unknown sensor");
rc = -EINVAL;
}
if (FATAL_ERROR(rc))
return rc;
/* Sensor-independent settings */
rc = sensor_set_auto_brightness(ov, ov->auto_brt);
if (FATAL_ERROR(rc))
return rc;
rc = sensor_set_auto_exposure(ov, ov->auto_exp);
if (FATAL_ERROR(rc))
return rc;
rc = sensor_set_banding_filter(ov, bandingfilter);
if (FATAL_ERROR(rc))
return rc;
if (ov->lightfreq) {
rc = sensor_set_light_freq(ov, lightfreq);
if (FATAL_ERROR(rc))
return rc;
}
rc = sensor_set_backlight(ov, ov->backlight);
if (FATAL_ERROR(rc))
return rc;
rc = sensor_set_mirror(ov, ov->mirror);
if (FATAL_ERROR(rc))
return rc;
return 0;
}
/* This sets the default image parameters. This is useful for apps that use
* read() and do not set these.
*/
static int
ov51x_set_default_params(struct usb_ov511 *ov)
{
int i;
/* Set default sizes in case IOCTL (VIDIOCMCAPTURE) is not used
* (using read() instead). */
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].width = ov->maxwidth;
ov->frame[i].height = ov->maxheight;
ov->frame[i].bytes_read = 0;
if (force_palette)
ov->frame[i].format = force_palette;
else
ov->frame[i].format = VIDEO_PALETTE_YUV420;
ov->frame[i].depth = get_depth(ov->frame[i].format);
}
PDEBUG(3, "%dx%d, %s", ov->maxwidth, ov->maxheight,
symbolic(v4l1_plist, ov->frame[0].format));
/* Initialize to max width/height, YUV420 or RGB24 (if supported) */
if (mode_init_regs(ov, ov->maxwidth, ov->maxheight,
ov->frame[0].format, 0) < 0)
return -EINVAL;
return 0;
}
/**********************************************************************
*
* Video decoder stuff
*
**********************************************************************/
/* Set analog input port of decoder */
static int
decoder_set_input(struct usb_ov511 *ov, int input)
{
PDEBUG(4, "port %d", input);
switch (ov->sensor) {
case SEN_SAA7111A:
{
/* Select mode */
i2c_w_mask(ov, 0x02, input, 0x07);
/* Bypass chrominance trap for modes 4..7 */
i2c_w_mask(ov, 0x09, (input > 3) ? 0x80:0x00, 0x80);
break;
}
default:
return -EINVAL;
}
return 0;
}
/* Get ASCII name of video input */
static int
decoder_get_input_name(struct usb_ov511 *ov, int input, char *name)
{
switch (ov->sensor) {
case SEN_SAA7111A:
{
if (input < 0 || input > 7)
return -EINVAL;
else if (input < 4)
sprintf(name, "CVBS-%d", input);
else // if (input < 8)
sprintf(name, "S-Video-%d", input - 4);
break;
}
default:
sprintf(name, "%s", "Camera");
}
return 0;
}
/* Set norm (NTSC, PAL, SECAM, AUTO) */
static int
decoder_set_norm(struct usb_ov511 *ov, int norm)
{
PDEBUG(4, "%d", norm);
switch (ov->sensor) {
case SEN_SAA7111A:
{
int reg_8, reg_e;
if (norm == VIDEO_MODE_NTSC) {
reg_8 = 0x40; /* 60 Hz */
reg_e = 0x00; /* NTSC M / PAL BGHI */
} else if (norm == VIDEO_MODE_PAL) {
reg_8 = 0x00; /* 50 Hz */
reg_e = 0x00; /* NTSC M / PAL BGHI */
} else if (norm == VIDEO_MODE_AUTO) {
reg_8 = 0x80; /* Auto field detect */
reg_e = 0x00; /* NTSC M / PAL BGHI */
} else if (norm == VIDEO_MODE_SECAM) {
reg_8 = 0x00; /* 50 Hz */
reg_e = 0x50; /* SECAM / PAL 4.43 */
} else {
return -EINVAL;
}
i2c_w_mask(ov, 0x08, reg_8, 0xc0);
i2c_w_mask(ov, 0x0e, reg_e, 0x70);
break;
}
default:
return -EINVAL;
}
return 0;
}
/**********************************************************************
*
* Raw data parsing
*
**********************************************************************/
/* Copies a 64-byte segment at pIn to an 8x8 block at pOut. The width of the
* image at pOut is specified by w.
*/
static inline void
make_8x8(unsigned char *pIn, unsigned char *pOut, int w)
{
unsigned char *pOut1 = pOut;
int x, y;
for (y = 0; y < 8; y++) {
pOut1 = pOut;
for (x = 0; x < 8; x++) {
*pOut1++ = *pIn++;
}
pOut += w;
}
}
/*
* For RAW BW (YUV 4:0:0) images, data show up in 256 byte segments.
* The segments represent 4 squares of 8x8 pixels as follows:
*
* 0 1 ... 7 64 65 ... 71 ... 192 193 ... 199
* 8 9 ... 15 72 73 ... 79 200 201 ... 207
* ... ... ...
* 56 57 ... 63 120 121 ... 127 248 249 ... 255
*
*/
static void
yuv400raw_to_yuv400p(struct ov511_frame *frame,
unsigned char *pIn0, unsigned char *pOut0)
{
int x, y;
unsigned char *pIn, *pOut, *pOutLine;
/* Copy Y */
pIn = pIn0;
pOutLine = pOut0;
for (y = 0; y < frame->rawheight - 1; y += 8) {
pOut = pOutLine;
for (x = 0; x < frame->rawwidth - 1; x += 8) {
make_8x8(pIn, pOut, frame->rawwidth);
pIn += 64;
pOut += 8;
}
pOutLine += 8 * frame->rawwidth;
}
}
/*
* For YUV 4:2:0 images, the data show up in 384 byte segments.
* The first 64 bytes of each segment are U, the next 64 are V. The U and
* V are arranged as follows:
*
* 0 1 ... 7
* 8 9 ... 15
* ...
* 56 57 ... 63
*
* U and V are shipped at half resolution (1 U,V sample -> one 2x2 block).
*
* The next 256 bytes are full resolution Y data and represent 4 squares
* of 8x8 pixels as follows:
*
* 0 1 ... 7 64 65 ... 71 ... 192 193 ... 199
* 8 9 ... 15 72 73 ... 79 200 201 ... 207
* ... ... ...
* 56 57 ... 63 120 121 ... 127 ... 248 249 ... 255
*
* Note that the U and V data in one segment represent a 16 x 16 pixel
* area, but the Y data represent a 32 x 8 pixel area. If the width is not an
* even multiple of 32, the extra 8x8 blocks within a 32x8 block belong to the
* next horizontal stripe.
*
* If dumppix module param is set, _parse_data just dumps the incoming segments,
* verbatim, in order, into the frame. When used with vidcat -f ppm -s 640x480
* this puts the data on the standard output and can be analyzed with the
* parseppm.c utility I wrote. That's a much faster way for figuring out how
* these data are scrambled.
*/
/* Converts from raw, uncompressed segments at pIn0 to a YUV420P frame at pOut0.
*
* FIXME: Currently only handles width and height that are multiples of 16
*/
static void
yuv420raw_to_yuv420p(struct ov511_frame *frame,
unsigned char *pIn0, unsigned char *pOut0)
{
int k, x, y;
unsigned char *pIn, *pOut, *pOutLine;
const unsigned int a = frame->rawwidth * frame->rawheight;
const unsigned int w = frame->rawwidth / 2;
/* Copy U and V */
pIn = pIn0;
pOutLine = pOut0 + a;
for (y = 0; y < frame->rawheight - 1; y += 16) {
pOut = pOutLine;
for (x = 0; x < frame->rawwidth - 1; x += 16) {
make_8x8(pIn, pOut, w);
make_8x8(pIn + 64, pOut + a/4, w);
pIn += 384;
pOut += 8;
}
pOutLine += 8 * w;
}
/* Copy Y */
pIn = pIn0 + 128;
pOutLine = pOut0;
k = 0;
for (y = 0; y < frame->rawheight - 1; y += 8) {
pOut = pOutLine;
for (x = 0; x < frame->rawwidth - 1; x += 8) {
make_8x8(pIn, pOut, frame->rawwidth);
pIn += 64;
pOut += 8;
if ((++k) > 3) {
k = 0;
pIn += 128;
}
}
pOutLine += 8 * frame->rawwidth;
}
}
/**********************************************************************
*
* Decompression
*
**********************************************************************/
static int
request_decompressor(struct usb_ov511 *ov)
{
if (ov->bclass == BCL_OV511 || ov->bclass == BCL_OV518) {
err("No decompressor available");
} else {
err("Unknown bridge");
}
return -ENOSYS;
}
static void
decompress(struct usb_ov511 *ov, struct ov511_frame *frame,
unsigned char *pIn0, unsigned char *pOut0)
{
if (!ov->decomp_ops)
if (request_decompressor(ov))
return;
}
/**********************************************************************
*
* Format conversion
*
**********************************************************************/
/* Fuses even and odd fields together, and doubles width.
* INPUT: an odd field followed by an even field at pIn0, in YUV planar format
* OUTPUT: a normal YUV planar image, with correct aspect ratio
*/
static void
deinterlace(struct ov511_frame *frame, int rawformat,
unsigned char *pIn0, unsigned char *pOut0)
{
const int fieldheight = frame->rawheight / 2;
const int fieldpix = fieldheight * frame->rawwidth;
const int w = frame->width;
int x, y;
unsigned char *pInEven, *pInOdd, *pOut;
PDEBUG(5, "fieldheight=%d", fieldheight);
if (frame->rawheight != frame->height) {
err("invalid height");
return;
}
if ((frame->rawwidth * 2) != frame->width) {
err("invalid width");
return;
}
/* Y */
pInOdd = pIn0;
pInEven = pInOdd + fieldpix;
pOut = pOut0;
for (y = 0; y < fieldheight; y++) {
for (x = 0; x < frame->rawwidth; x++) {
*pOut = *pInEven;
*(pOut+1) = *pInEven++;
*(pOut+w) = *pInOdd;
*(pOut+w+1) = *pInOdd++;
pOut += 2;
}
pOut += w;
}
if (rawformat == RAWFMT_YUV420) {
/* U */
pInOdd = pIn0 + fieldpix * 2;
pInEven = pInOdd + fieldpix / 4;
for (y = 0; y < fieldheight / 2; y++) {
for (x = 0; x < frame->rawwidth / 2; x++) {
*pOut = *pInEven;
*(pOut+1) = *pInEven++;
*(pOut+w/2) = *pInOdd;
*(pOut+w/2+1) = *pInOdd++;
pOut += 2;
}
pOut += w/2;
}
/* V */
pInOdd = pIn0 + fieldpix * 2 + fieldpix / 2;
pInEven = pInOdd + fieldpix / 4;
for (y = 0; y < fieldheight / 2; y++) {
for (x = 0; x < frame->rawwidth / 2; x++) {
*pOut = *pInEven;
*(pOut+1) = *pInEven++;
*(pOut+w/2) = *pInOdd;
*(pOut+w/2+1) = *pInOdd++;
pOut += 2;
}
pOut += w/2;
}
}
}
static void
ov51x_postprocess_grey(struct usb_ov511 *ov, struct ov511_frame *frame)
{
/* Deinterlace frame, if necessary */
if (ov->sensor == SEN_SAA7111A && frame->rawheight >= 480) {
if (frame->compressed)
decompress(ov, frame, frame->rawdata,
frame->tempdata);
else
yuv400raw_to_yuv400p(frame, frame->rawdata,
frame->tempdata);
deinterlace(frame, RAWFMT_YUV400, frame->tempdata,
frame->data);
} else {
if (frame->compressed)
decompress(ov, frame, frame->rawdata,
frame->data);
else
yuv400raw_to_yuv400p(frame, frame->rawdata,
frame->data);
}
}
/* Process raw YUV420 data into standard YUV420P */
static void
ov51x_postprocess_yuv420(struct usb_ov511 *ov, struct ov511_frame *frame)
{
/* Deinterlace frame, if necessary */
if (ov->sensor == SEN_SAA7111A && frame->rawheight >= 480) {
if (frame->compressed)
decompress(ov, frame, frame->rawdata, frame->tempdata);
else
yuv420raw_to_yuv420p(frame, frame->rawdata,
frame->tempdata);
deinterlace(frame, RAWFMT_YUV420, frame->tempdata,
frame->data);
} else {
if (frame->compressed)
decompress(ov, frame, frame->rawdata, frame->data);
else
yuv420raw_to_yuv420p(frame, frame->rawdata,
frame->data);
}
}
/* Post-processes the specified frame. This consists of:
* 1. Decompress frame, if necessary
* 2. Deinterlace frame and scale to proper size, if necessary
* 3. Convert from YUV planar to destination format, if necessary
* 4. Fix the RGB offset, if necessary
*/
static void
ov51x_postprocess(struct usb_ov511 *ov, struct ov511_frame *frame)
{
if (dumppix) {
memset(frame->data, 0,
MAX_DATA_SIZE(ov->maxwidth, ov->maxheight));
PDEBUG(4, "Dumping %d bytes", frame->bytes_recvd);
memcpy(frame->data, frame->rawdata, frame->bytes_recvd);
} else {
switch (frame->format) {
case VIDEO_PALETTE_GREY:
ov51x_postprocess_grey(ov, frame);
break;
case VIDEO_PALETTE_YUV420:
case VIDEO_PALETTE_YUV420P:
ov51x_postprocess_yuv420(ov, frame);
break;
default:
err("Cannot convert data to %s",
symbolic(v4l1_plist, frame->format));
}
}
}
/**********************************************************************
*
* OV51x data transfer, IRQ handler
*
**********************************************************************/
static inline void
ov511_move_data(struct usb_ov511 *ov, unsigned char *in, int n)
{
int num, offset;
int pnum = in[ov->packet_size - 1]; /* Get packet number */
int max_raw = MAX_RAW_DATA_SIZE(ov->maxwidth, ov->maxheight);
struct ov511_frame *frame = &ov->frame[ov->curframe];
struct timeval *ts;
/* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th
* byte non-zero. The EOF packet has image width/height in the
* 10th and 11th bytes. The 9th byte is given as follows:
*
* bit 7: EOF
* 6: compression enabled
* 5: 422/420/400 modes
* 4: 422/420/400 modes
* 3: 1
* 2: snapshot button on
* 1: snapshot frame
* 0: even/odd field
*/
if (printph) {
dev_info(&ov->dev->dev,
"ph(%3d): %2x %2x %2x %2x %2x %2x %2x %2x %2x %2x %2x %2x\n",
pnum, in[0], in[1], in[2], in[3], in[4], in[5], in[6],
in[7], in[8], in[9], in[10], in[11]);
}
/* Check for SOF/EOF packet */
if ((in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) ||
(~in[8] & 0x08))
goto check_middle;
/* Frame end */
if (in[8] & 0x80) {
ts = (struct timeval *)(frame->data
+ MAX_FRAME_SIZE(ov->maxwidth, ov->maxheight));
do_gettimeofday(ts);
/* Get the actual frame size from the EOF header */
frame->rawwidth = ((int)(in[9]) + 1) * 8;
frame->rawheight = ((int)(in[10]) + 1) * 8;
PDEBUG(4, "Frame end, frame=%d, pnum=%d, w=%d, h=%d, recvd=%d",
ov->curframe, pnum, frame->rawwidth, frame->rawheight,
frame->bytes_recvd);
/* Validate the header data */
RESTRICT_TO_RANGE(frame->rawwidth, ov->minwidth, ov->maxwidth);
RESTRICT_TO_RANGE(frame->rawheight, ov->minheight,
ov->maxheight);
/* Don't allow byte count to exceed buffer size */
RESTRICT_TO_RANGE(frame->bytes_recvd, 8, max_raw);
if (frame->scanstate == STATE_LINES) {
int nextf;
frame->grabstate = FRAME_DONE;
wake_up_interruptible(&frame->wq);
/* If next frame is ready or grabbing,
* point to it */
nextf = (ov->curframe + 1) % OV511_NUMFRAMES;
if (ov->frame[nextf].grabstate == FRAME_READY
|| ov->frame[nextf].grabstate == FRAME_GRABBING) {
ov->curframe = nextf;
ov->frame[nextf].scanstate = STATE_SCANNING;
} else {
if (frame->grabstate == FRAME_DONE) {
PDEBUG(4, "** Frame done **");
} else {
PDEBUG(4, "Frame not ready? state = %d",
ov->frame[nextf].grabstate);
}
ov->curframe = -1;
}
} else {
PDEBUG(5, "Frame done, but not scanning");
}
/* Image corruption caused by misplaced frame->segment = 0
* fixed by carlosf@conectiva.com.br
*/
} else {
/* Frame start */
PDEBUG(4, "Frame start, framenum = %d", ov->curframe);
/* Check to see if it's a snapshot frame */
/* FIXME?? Should the snapshot reset go here? Performance? */
if (in[8] & 0x02) {
frame->snapshot = 1;
PDEBUG(3, "snapshot detected");
}
frame->scanstate = STATE_LINES;
frame->bytes_recvd = 0;
frame->compressed = in[8] & 0x40;
}
check_middle:
/* Are we in a frame? */
if (frame->scanstate != STATE_LINES) {
PDEBUG(5, "Not in a frame; packet skipped");
return;
}
/* If frame start, skip header */
if (frame->bytes_recvd == 0)
offset = 9;
else
offset = 0;
num = n - offset - 1;
/* Dump all data exactly as received */
if (dumppix == 2) {
frame->bytes_recvd += n - 1;
if (frame->bytes_recvd <= max_raw)
memcpy(frame->rawdata + frame->bytes_recvd - (n - 1),
in, n - 1);
else
PDEBUG(3, "Raw data buffer overrun!! (%d)",
frame->bytes_recvd - max_raw);
} else if (!frame->compressed && !remove_zeros) {
frame->bytes_recvd += num;
if (frame->bytes_recvd <= max_raw)
memcpy(frame->rawdata + frame->bytes_recvd - num,
in + offset, num);
else
PDEBUG(3, "Raw data buffer overrun!! (%d)",
frame->bytes_recvd - max_raw);
} else { /* Remove all-zero FIFO lines (aligned 32-byte blocks) */
int b, read = 0, allzero, copied = 0;
if (offset) {
frame->bytes_recvd += 32 - offset; // Bytes out
memcpy(frame->rawdata, in + offset, 32 - offset);
read += 32;
}
while (read < n - 1) {
allzero = 1;
for (b = 0; b < 32; b++) {
if (in[read + b]) {
allzero = 0;
break;
}
}
if (allzero) {
/* Don't copy it */
} else {
if (frame->bytes_recvd + copied + 32 <= max_raw)
{
memcpy(frame->rawdata
+ frame->bytes_recvd + copied,
in + read, 32);
copied += 32;
} else {
PDEBUG(3, "Raw data buffer overrun!!");
}
}
read += 32;
}
frame->bytes_recvd += copied;
}
}
static inline void
ov518_move_data(struct usb_ov511 *ov, unsigned char *in, int n)
{
int max_raw = MAX_RAW_DATA_SIZE(ov->maxwidth, ov->maxheight);
struct ov511_frame *frame = &ov->frame[ov->curframe];
struct timeval *ts;
/* Don't copy the packet number byte */
if (ov->packet_numbering)
--n;
/* A false positive here is likely, until OVT gives me
* the definitive SOF/EOF format */
if ((!(in[0] | in[1] | in[2] | in[3] | in[5])) && in[6]) {
if (printph) {
dev_info(&ov->dev->dev,
"ph: %2x %2x %2x %2x %2x %2x %2x %2x\n",
in[0], in[1], in[2], in[3], in[4], in[5],
in[6], in[7]);
}
if (frame->scanstate == STATE_LINES) {
PDEBUG(4, "Detected frame end/start");
goto eof;
} else { //scanstate == STATE_SCANNING
/* Frame start */
PDEBUG(4, "Frame start, framenum = %d", ov->curframe);
goto sof;
}
} else {
goto check_middle;
}
eof:
ts = (struct timeval *)(frame->data
+ MAX_FRAME_SIZE(ov->maxwidth, ov->maxheight));
do_gettimeofday(ts);
PDEBUG(4, "Frame end, curframe = %d, hw=%d, vw=%d, recvd=%d",
ov->curframe,
(int)(in[9]), (int)(in[10]), frame->bytes_recvd);
// FIXME: Since we don't know the header formats yet,
// there is no way to know what the actual image size is
frame->rawwidth = frame->width;
frame->rawheight = frame->height;
/* Validate the header data */
RESTRICT_TO_RANGE(frame->rawwidth, ov->minwidth, ov->maxwidth);
RESTRICT_TO_RANGE(frame->rawheight, ov->minheight, ov->maxheight);
/* Don't allow byte count to exceed buffer size */
RESTRICT_TO_RANGE(frame->bytes_recvd, 8, max_raw);
if (frame->scanstate == STATE_LINES) {
int nextf;
frame->grabstate = FRAME_DONE;
wake_up_interruptible(&frame->wq);
/* If next frame is ready or grabbing,
* point to it */
nextf = (ov->curframe + 1) % OV511_NUMFRAMES;
if (ov->frame[nextf].grabstate == FRAME_READY
|| ov->frame[nextf].grabstate == FRAME_GRABBING) {
ov->curframe = nextf;
ov->frame[nextf].scanstate = STATE_SCANNING;
frame = &ov->frame[nextf];
} else {
if (frame->grabstate == FRAME_DONE) {
PDEBUG(4, "** Frame done **");
} else {
PDEBUG(4, "Frame not ready? state = %d",
ov->frame[nextf].grabstate);
}
ov->curframe = -1;
PDEBUG(4, "SOF dropped (no active frame)");
return; /* Nowhere to store this frame */
}
}
sof:
PDEBUG(4, "Starting capture on frame %d", frame->framenum);
// Snapshot not reverse-engineered yet.
#if 0
/* Check to see if it's a snapshot frame */
/* FIXME?? Should the snapshot reset go here? Performance? */
if (in[8] & 0x02) {
frame->snapshot = 1;
PDEBUG(3, "snapshot detected");
}
#endif
frame->scanstate = STATE_LINES;
frame->bytes_recvd = 0;
frame->compressed = 1;
check_middle:
/* Are we in a frame? */
if (frame->scanstate != STATE_LINES) {
PDEBUG(4, "scanstate: no SOF yet");
return;
}
/* Dump all data exactly as received */
if (dumppix == 2) {
frame->bytes_recvd += n;
if (frame->bytes_recvd <= max_raw)
memcpy(frame->rawdata + frame->bytes_recvd - n, in, n);
else
PDEBUG(3, "Raw data buffer overrun!! (%d)",
frame->bytes_recvd - max_raw);
} else {
/* All incoming data are divided into 8-byte segments. If the
* segment contains all zero bytes, it must be skipped. These
* zero-segments allow the OV518 to mainain a constant data rate
* regardless of the effectiveness of the compression. Segments
* are aligned relative to the beginning of each isochronous
* packet. The first segment in each image is a header (the
* decompressor skips it later).
*/
int b, read = 0, allzero, copied = 0;
while (read < n) {
allzero = 1;
for (b = 0; b < 8; b++) {
if (in[read + b]) {
allzero = 0;
break;
}
}
if (allzero) {
/* Don't copy it */
} else {
if (frame->bytes_recvd + copied + 8 <= max_raw)
{
memcpy(frame->rawdata
+ frame->bytes_recvd + copied,
in + read, 8);
copied += 8;
} else {
PDEBUG(3, "Raw data buffer overrun!!");
}
}
read += 8;
}
frame->bytes_recvd += copied;
}
}
static void
ov51x_isoc_irq(struct urb *urb)
{
int i;
struct usb_ov511 *ov;
struct ov511_sbuf *sbuf;
if (!urb->context) {
PDEBUG(4, "no context");
return;
}
sbuf = urb->context;
ov = sbuf->ov;
if (!ov || !ov->dev || !ov->user) {
PDEBUG(4, "no device, or not open");
return;
}
if (!ov->streaming) {
PDEBUG(4, "hmmm... not streaming, but got interrupt");
return;
}
if (urb->status == -ENOENT || urb->status == -ECONNRESET) {
PDEBUG(4, "URB unlinked");
return;
}
if (urb->status != -EINPROGRESS && urb->status != 0) {
err("ERROR: urb->status=%d: %s", urb->status,
symbolic(urb_errlist, urb->status));
}
/* Copy the data received into our frame buffer */
PDEBUG(5, "sbuf[%d]: Moving %d packets", sbuf->n,
urb->number_of_packets);
for (i = 0; i < urb->number_of_packets; i++) {
/* Warning: Don't call *_move_data() if no frame active! */
if (ov->curframe >= 0) {
int n = urb->iso_frame_desc[i].actual_length;
int st = urb->iso_frame_desc[i].status;
unsigned char *cdata;
urb->iso_frame_desc[i].actual_length = 0;
urb->iso_frame_desc[i].status = 0;
cdata = urb->transfer_buffer
+ urb->iso_frame_desc[i].offset;
if (!n) {
PDEBUG(4, "Zero-length packet");
continue;
}
if (st)
PDEBUG(2, "data error: [%d] len=%d, status=%d",
i, n, st);
if (ov->bclass == BCL_OV511)
ov511_move_data(ov, cdata, n);
else if (ov->bclass == BCL_OV518)
ov518_move_data(ov, cdata, n);
else
err("Unknown bridge device (%d)", ov->bridge);
} else if (waitqueue_active(&ov->wq)) {
wake_up_interruptible(&ov->wq);
}
}
/* Resubmit this URB */
urb->dev = ov->dev;
if ((i = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
err("usb_submit_urb() ret %d", i);
return;
}
/****************************************************************************
*
* Stream initialization and termination
*
***************************************************************************/
static int
ov51x_init_isoc(struct usb_ov511 *ov)
{
struct urb *urb;
int fx, err, n, i, size;
PDEBUG(3, "*** Initializing capture ***");
ov->curframe = -1;
if (ov->bridge == BRG_OV511) {
if (cams == 1)
size = 993;
else if (cams == 2)
size = 513;
else if (cams == 3 || cams == 4)
size = 257;
else {
err("\"cams\" parameter too high!");
return -1;
}
} else if (ov->bridge == BRG_OV511PLUS) {
if (cams == 1)
size = 961;
else if (cams == 2)
size = 513;
else if (cams == 3 || cams == 4)
size = 257;
else if (cams >= 5 && cams <= 8)
size = 129;
else if (cams >= 9 && cams <= 31)
size = 33;
else {
err("\"cams\" parameter too high!");
return -1;
}
} else if (ov->bclass == BCL_OV518) {
if (cams == 1)
size = 896;
else if (cams == 2)
size = 512;
else if (cams == 3 || cams == 4)
size = 256;
else if (cams >= 5 && cams <= 8)
size = 128;
else {
err("\"cams\" parameter too high!");
return -1;
}
} else {
err("invalid bridge type");
return -1;
}
// FIXME: OV518 is hardcoded to 15 FPS (alternate 5) for now
if (ov->bclass == BCL_OV518) {
if (packetsize == -1) {
ov518_set_packet_size(ov, 640);
} else {
dev_info(&ov->dev->dev, "Forcing packet size to %d\n",
packetsize);
ov518_set_packet_size(ov, packetsize);
}
} else {
if (packetsize == -1) {
ov511_set_packet_size(ov, size);
} else {
dev_info(&ov->dev->dev, "Forcing packet size to %d\n",
packetsize);
ov511_set_packet_size(ov, packetsize);
}
}
for (n = 0; n < OV511_NUMSBUF; n++) {
urb = usb_alloc_urb(FRAMES_PER_DESC, GFP_KERNEL);
if (!urb) {
err("init isoc: usb_alloc_urb ret. NULL");
for (i = 0; i < n; i++)
usb_free_urb(ov->sbuf[i].urb);
return -ENOMEM;
}
ov->sbuf[n].urb = urb;
urb->dev = ov->dev;
urb->context = &ov->sbuf[n];
urb->pipe = usb_rcvisocpipe(ov->dev, OV511_ENDPOINT_ADDRESS);
urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = ov->sbuf[n].data;
urb->complete = ov51x_isoc_irq;
urb->number_of_packets = FRAMES_PER_DESC;
urb->transfer_buffer_length = ov->packet_size * FRAMES_PER_DESC;
urb->interval = 1;
for (fx = 0; fx < FRAMES_PER_DESC; fx++) {
urb->iso_frame_desc[fx].offset = ov->packet_size * fx;
urb->iso_frame_desc[fx].length = ov->packet_size;
}
}
ov->streaming = 1;
for (n = 0; n < OV511_NUMSBUF; n++) {
ov->sbuf[n].urb->dev = ov->dev;
err = usb_submit_urb(ov->sbuf[n].urb, GFP_KERNEL);
if (err) {
err("init isoc: usb_submit_urb(%d) ret %d", n, err);
return err;
}
}
return 0;
}
static void
ov51x_unlink_isoc(struct usb_ov511 *ov)
{
int n;
/* Unschedule all of the iso td's */
for (n = OV511_NUMSBUF - 1; n >= 0; n--) {
if (ov->sbuf[n].urb) {
usb_kill_urb(ov->sbuf[n].urb);
usb_free_urb(ov->sbuf[n].urb);
ov->sbuf[n].urb = NULL;
}
}
}
static void
ov51x_stop_isoc(struct usb_ov511 *ov)
{
if (!ov->streaming || !ov->dev)
return;
PDEBUG(3, "*** Stopping capture ***");
if (ov->bclass == BCL_OV518)
ov518_set_packet_size(ov, 0);
else
ov511_set_packet_size(ov, 0);
ov->streaming = 0;
ov51x_unlink_isoc(ov);
}
static int
ov51x_new_frame(struct usb_ov511 *ov, int framenum)
{
struct ov511_frame *frame;
int newnum;
PDEBUG(4, "ov->curframe = %d, framenum = %d", ov->curframe, framenum);
if (!ov->dev)
return -1;
/* If we're not grabbing a frame right now and the other frame is */
/* ready to be grabbed into, then use it instead */
if (ov->curframe == -1) {
newnum = (framenum - 1 + OV511_NUMFRAMES) % OV511_NUMFRAMES;
if (ov->frame[newnum].grabstate == FRAME_READY)
framenum = newnum;
} else
return 0;
frame = &ov->frame[framenum];
PDEBUG(4, "framenum = %d, width = %d, height = %d", framenum,
frame->width, frame->height);
frame->grabstate = FRAME_GRABBING;
frame->scanstate = STATE_SCANNING;
frame->snapshot = 0;
ov->curframe = framenum;
/* Make sure it's not too big */
if (frame->width > ov->maxwidth)
frame->width = ov->maxwidth;
frame->width &= ~7L; /* Multiple of 8 */
if (frame->height > ov->maxheight)
frame->height = ov->maxheight;
frame->height &= ~3L; /* Multiple of 4 */
return 0;
}
/****************************************************************************
*
* Buffer management
*
***************************************************************************/
/*
* - You must acquire buf_lock before entering this function.
* - Because this code will free any non-null pointer, you must be sure to null
* them if you explicitly free them somewhere else!
*/
static void
ov51x_do_dealloc(struct usb_ov511 *ov)
{
int i;
PDEBUG(4, "entered");
if (ov->fbuf) {
rvfree(ov->fbuf, OV511_NUMFRAMES
* MAX_DATA_SIZE(ov->maxwidth, ov->maxheight));
ov->fbuf = NULL;
}
vfree(ov->rawfbuf);
ov->rawfbuf = NULL;
vfree(ov->tempfbuf);
ov->tempfbuf = NULL;
for (i = 0; i < OV511_NUMSBUF; i++) {
kfree(ov->sbuf[i].data);
ov->sbuf[i].data = NULL;
}
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].data = NULL;
ov->frame[i].rawdata = NULL;
ov->frame[i].tempdata = NULL;
if (ov->frame[i].compbuf) {
free_page((unsigned long) ov->frame[i].compbuf);
ov->frame[i].compbuf = NULL;
}
}
PDEBUG(4, "buffer memory deallocated");
ov->buf_state = BUF_NOT_ALLOCATED;
PDEBUG(4, "leaving");
}
static int
ov51x_alloc(struct usb_ov511 *ov)
{
int i;
const int w = ov->maxwidth;
const int h = ov->maxheight;
const int data_bufsize = OV511_NUMFRAMES * MAX_DATA_SIZE(w, h);
const int raw_bufsize = OV511_NUMFRAMES * MAX_RAW_DATA_SIZE(w, h);
PDEBUG(4, "entered");
mutex_lock(&ov->buf_lock);
if (ov->buf_state == BUF_ALLOCATED)
goto out;
ov->fbuf = rvmalloc(data_bufsize);
if (!ov->fbuf)
goto error;
ov->rawfbuf = vmalloc(raw_bufsize);
if (!ov->rawfbuf)
goto error;
memset(ov->rawfbuf, 0, raw_bufsize);
ov->tempfbuf = vmalloc(raw_bufsize);
if (!ov->tempfbuf)
goto error;
memset(ov->tempfbuf, 0, raw_bufsize);
for (i = 0; i < OV511_NUMSBUF; i++) {
ov->sbuf[i].data = kmalloc(FRAMES_PER_DESC *
MAX_FRAME_SIZE_PER_DESC, GFP_KERNEL);
if (!ov->sbuf[i].data)
goto error;
PDEBUG(4, "sbuf[%d] @ %p", i, ov->sbuf[i].data);
}
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].data = ov->fbuf + i * MAX_DATA_SIZE(w, h);
ov->frame[i].rawdata = ov->rawfbuf
+ i * MAX_RAW_DATA_SIZE(w, h);
ov->frame[i].tempdata = ov->tempfbuf
+ i * MAX_RAW_DATA_SIZE(w, h);
ov->frame[i].compbuf =
(unsigned char *) __get_free_page(GFP_KERNEL);
if (!ov->frame[i].compbuf)
goto error;
PDEBUG(4, "frame[%d] @ %p", i, ov->frame[i].data);
}
ov->buf_state = BUF_ALLOCATED;
out:
mutex_unlock(&ov->buf_lock);
PDEBUG(4, "leaving");
return 0;
error:
ov51x_do_dealloc(ov);
mutex_unlock(&ov->buf_lock);
PDEBUG(4, "errored");
return -ENOMEM;
}
static void
ov51x_dealloc(struct usb_ov511 *ov)
{
PDEBUG(4, "entered");
mutex_lock(&ov->buf_lock);
ov51x_do_dealloc(ov);
mutex_unlock(&ov->buf_lock);
PDEBUG(4, "leaving");
}
/****************************************************************************
*
* V4L 1 API
*
***************************************************************************/
static int
ov51x_v4l1_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct usb_ov511 *ov = video_get_drvdata(vdev);
int err, i;
PDEBUG(4, "opening");
mutex_lock(&ov->lock);
err = -EBUSY;
if (ov->user)
goto out;
ov->sub_flag = 0;
/* In case app doesn't set them... */
err = ov51x_set_default_params(ov);
if (err < 0)
goto out;
/* Make sure frames are reset */
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].grabstate = FRAME_UNUSED;
ov->frame[i].bytes_read = 0;
}
/* If compression is on, make sure now that a
* decompressor can be loaded */
if (ov->compress && !ov->decomp_ops) {
err = request_decompressor(ov);
if (err && !dumppix)
goto out;
}
err = ov51x_alloc(ov);
if (err < 0)
goto out;
err = ov51x_init_isoc(ov);
if (err) {
ov51x_dealloc(ov);
goto out;
}
ov->user++;
file->private_data = vdev;
if (ov->led_policy == LED_AUTO)
ov51x_led_control(ov, 1);
out:
mutex_unlock(&ov->lock);
return err;
}
static int
ov51x_v4l1_close(struct file *file)
{
struct video_device *vdev = file->private_data;
struct usb_ov511 *ov = video_get_drvdata(vdev);
PDEBUG(4, "ov511_close");
mutex_lock(&ov->lock);
ov->user--;
ov51x_stop_isoc(ov);
if (ov->led_policy == LED_AUTO)
ov51x_led_control(ov, 0);
if (ov->dev)
ov51x_dealloc(ov);
mutex_unlock(&ov->lock);
/* Device unplugged while open. Only a minimum of unregistration is done
* here; the disconnect callback already did the rest. */
if (!ov->dev) {
mutex_lock(&ov->cbuf_lock);
kfree(ov->cbuf);
ov->cbuf = NULL;
mutex_unlock(&ov->cbuf_lock);
ov51x_dealloc(ov);
kfree(ov);
ov = NULL;
}
file->private_data = NULL;
return 0;
}
/* Do not call this function directly! */
static long
ov51x_v4l1_ioctl_internal(struct file *file, unsigned int cmd, void *arg)
{
struct video_device *vdev = file->private_data;
struct usb_ov511 *ov = video_get_drvdata(vdev);
PDEBUG(5, "IOCtl: 0x%X", cmd);
if (!ov->dev)
return -EIO;
switch (cmd) {
case VIDIOCGCAP:
{
struct video_capability *b = arg;
PDEBUG(4, "VIDIOCGCAP");
memset(b, 0, sizeof(struct video_capability));
sprintf(b->name, "%s USB Camera",
symbolic(brglist, ov->bridge));
b->type = VID_TYPE_CAPTURE | VID_TYPE_SUBCAPTURE;
b->channels = ov->num_inputs;
b->audios = 0;
b->maxwidth = ov->maxwidth;
b->maxheight = ov->maxheight;
b->minwidth = ov->minwidth;
b->minheight = ov->minheight;
return 0;
}
case VIDIOCGCHAN:
{
struct video_channel *v = arg;
PDEBUG(4, "VIDIOCGCHAN");
if ((unsigned)(v->channel) >= ov->num_inputs) {
err("Invalid channel (%d)", v->channel);
return -EINVAL;
}
v->norm = ov->norm;
v->type = VIDEO_TYPE_CAMERA;
v->flags = 0;
// v->flags |= (ov->has_decoder) ? VIDEO_VC_NORM : 0;
v->tuners = 0;
decoder_get_input_name(ov, v->channel, v->name);
return 0;
}
case VIDIOCSCHAN:
{
struct video_channel *v = arg;
int err;
PDEBUG(4, "VIDIOCSCHAN");
/* Make sure it's not a camera */
if (!ov->has_decoder) {
if (v->channel == 0)
return 0;
else
return -EINVAL;
}
if (v->norm != VIDEO_MODE_PAL &&
v->norm != VIDEO_MODE_NTSC &&
v->norm != VIDEO_MODE_SECAM &&
v->norm != VIDEO_MODE_AUTO) {
err("Invalid norm (%d)", v->norm);
return -EINVAL;
}
if ((unsigned)(v->channel) >= ov->num_inputs) {
err("Invalid channel (%d)", v->channel);
return -EINVAL;
}
err = decoder_set_input(ov, v->channel);
if (err)
return err;
err = decoder_set_norm(ov, v->norm);
if (err)
return err;
return 0;
}
case VIDIOCGPICT:
{
struct video_picture *p = arg;
PDEBUG(4, "VIDIOCGPICT");
memset(p, 0, sizeof(struct video_picture));
if (sensor_get_picture(ov, p))
return -EIO;
/* Can we get these from frame[0]? -claudio? */
p->depth = ov->frame[0].depth;
p->palette = ov->frame[0].format;
return 0;
}
case VIDIOCSPICT:
{
struct video_picture *p = arg;
int i, rc;
PDEBUG(4, "VIDIOCSPICT");
if (!get_depth(p->palette))
return -EINVAL;
if (sensor_set_picture(ov, p))
return -EIO;
if (force_palette && p->palette != force_palette) {
dev_info(&ov->dev->dev, "Palette rejected (%s)\n",
symbolic(v4l1_plist, p->palette));
return -EINVAL;
}
// FIXME: Format should be independent of frames
if (p->palette != ov->frame[0].format) {
PDEBUG(4, "Detected format change");
rc = ov51x_wait_frames_inactive(ov);
if (rc)
return rc;
mode_init_regs(ov, ov->frame[0].width,
ov->frame[0].height, p->palette, ov->sub_flag);
}
PDEBUG(4, "Setting depth=%d, palette=%s",
p->depth, symbolic(v4l1_plist, p->palette));
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].depth = p->depth;
ov->frame[i].format = p->palette;
}
return 0;
}
case VIDIOCGCAPTURE:
{
int *vf = arg;
PDEBUG(4, "VIDIOCGCAPTURE");
ov->sub_flag = *vf;
return 0;
}
case VIDIOCSCAPTURE:
{
struct video_capture *vc = arg;
PDEBUG(4, "VIDIOCSCAPTURE");
if (vc->flags)
return -EINVAL;
if (vc->decimation)
return -EINVAL;
vc->x &= ~3L;
vc->y &= ~1L;
vc->y &= ~31L;
if (vc->width == 0)
vc->width = 32;
vc->height /= 16;
vc->height *= 16;
if (vc->height == 0)
vc->height = 16;
ov->subx = vc->x;
ov->suby = vc->y;
ov->subw = vc->width;
ov->subh = vc->height;
return 0;
}
case VIDIOCSWIN:
{
struct video_window *vw = arg;
int i, rc;
PDEBUG(4, "VIDIOCSWIN: %dx%d", vw->width, vw->height);
#if 0
if (vw->flags)
return -EINVAL;
if (vw->clipcount)
return -EINVAL;
if (vw->height != ov->maxheight)
return -EINVAL;
if (vw->width != ov->maxwidth)
return -EINVAL;
#endif
rc = ov51x_wait_frames_inactive(ov);
if (rc)
return rc;
rc = mode_init_regs(ov, vw->width, vw->height,
ov->frame[0].format, ov->sub_flag);
if (rc < 0)
return rc;
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].width = vw->width;
ov->frame[i].height = vw->height;
}
return 0;
}
case VIDIOCGWIN:
{
struct video_window *vw = arg;
memset(vw, 0, sizeof(struct video_window));
vw->x = 0; /* FIXME */
vw->y = 0;
vw->width = ov->frame[0].width;
vw->height = ov->frame[0].height;
vw->flags = 30;
PDEBUG(4, "VIDIOCGWIN: %dx%d", vw->width, vw->height);
return 0;
}
case VIDIOCGMBUF:
{
struct video_mbuf *vm = arg;
int i;
PDEBUG(4, "VIDIOCGMBUF");
memset(vm, 0, sizeof(struct video_mbuf));
vm->size = OV511_NUMFRAMES
* MAX_DATA_SIZE(ov->maxwidth, ov->maxheight);
vm->frames = OV511_NUMFRAMES;
vm->offsets[0] = 0;
for (i = 1; i < OV511_NUMFRAMES; i++) {
vm->offsets[i] = vm->offsets[i-1]
+ MAX_DATA_SIZE(ov->maxwidth, ov->maxheight);
}
return 0;
}
case VIDIOCMCAPTURE:
{
struct video_mmap *vm = arg;
int rc, depth;
unsigned int f = vm->frame;
PDEBUG(4, "VIDIOCMCAPTURE: frame: %d, %dx%d, %s", f, vm->width,
vm->height, symbolic(v4l1_plist, vm->format));
depth = get_depth(vm->format);
if (!depth) {
PDEBUG(2, "VIDIOCMCAPTURE: invalid format (%s)",
symbolic(v4l1_plist, vm->format));
return -EINVAL;
}
if (f >= OV511_NUMFRAMES) {
err("VIDIOCMCAPTURE: invalid frame (%d)", f);
return -EINVAL;
}
if (vm->width > ov->maxwidth
|| vm->height > ov->maxheight) {
err("VIDIOCMCAPTURE: requested dimensions too big");
return -EINVAL;
}
if (ov->frame[f].grabstate == FRAME_GRABBING) {
PDEBUG(4, "VIDIOCMCAPTURE: already grabbing");
return -EBUSY;
}
if (force_palette && (vm->format != force_palette)) {
PDEBUG(2, "palette rejected (%s)",
symbolic(v4l1_plist, vm->format));
return -EINVAL;
}
if ((ov->frame[f].width != vm->width) ||
(ov->frame[f].height != vm->height) ||
(ov->frame[f].format != vm->format) ||
(ov->frame[f].sub_flag != ov->sub_flag) ||
(ov->frame[f].depth != depth)) {
PDEBUG(4, "VIDIOCMCAPTURE: change in image parameters");
rc = ov51x_wait_frames_inactive(ov);
if (rc)
return rc;
rc = mode_init_regs(ov, vm->width, vm->height,
vm->format, ov->sub_flag);
#if 0
if (rc < 0) {
PDEBUG(1, "Got error while initializing regs ");
return ret;
}
#endif
ov->frame[f].width = vm->width;
ov->frame[f].height = vm->height;
ov->frame[f].format = vm->format;
ov->frame[f].sub_flag = ov->sub_flag;
ov->frame[f].depth = depth;
}
/* Mark it as ready */
ov->frame[f].grabstate = FRAME_READY;
PDEBUG(4, "VIDIOCMCAPTURE: renewing frame %d", f);
return ov51x_new_frame(ov, f);
}
case VIDIOCSYNC:
{
unsigned int fnum = *((unsigned int *) arg);
struct ov511_frame *frame;
int rc;
if (fnum >= OV511_NUMFRAMES) {
err("VIDIOCSYNC: invalid frame (%d)", fnum);
return -EINVAL;
}
frame = &ov->frame[fnum];
PDEBUG(4, "syncing to frame %d, grabstate = %d", fnum,
frame->grabstate);
switch (frame->grabstate) {
case FRAME_UNUSED:
return -EINVAL;
case FRAME_READY:
case FRAME_GRABBING:
case FRAME_ERROR:
redo:
if (!ov->dev)
return -EIO;
rc = wait_event_interruptible(frame->wq,
(frame->grabstate == FRAME_DONE)
|| (frame->grabstate == FRAME_ERROR));
if (rc)
return rc;
if (frame->grabstate == FRAME_ERROR) {
if ((rc = ov51x_new_frame(ov, fnum)) < 0)
return rc;
goto redo;
}
/* Fall through */
case FRAME_DONE:
if (ov->snap_enabled && !frame->snapshot) {
if ((rc = ov51x_new_frame(ov, fnum)) < 0)
return rc;
goto redo;
}
frame->grabstate = FRAME_UNUSED;
/* Reset the hardware snapshot button */
/* FIXME - Is this the best place for this? */
if ((ov->snap_enabled) && (frame->snapshot)) {
frame->snapshot = 0;
ov51x_clear_snapshot(ov);
}
/* Decompression, format conversion, etc... */
ov51x_postprocess(ov, frame);
break;
} /* end switch */
return 0;
}
case VIDIOCGFBUF:
{
struct video_buffer *vb = arg;
PDEBUG(4, "VIDIOCGFBUF");
memset(vb, 0, sizeof(struct video_buffer));
return 0;
}
case VIDIOCGUNIT:
{
struct video_unit *vu = arg;
PDEBUG(4, "VIDIOCGUNIT");
memset(vu, 0, sizeof(struct video_unit));
vu->video = ov->vdev->minor;
vu->vbi = VIDEO_NO_UNIT;
vu->radio = VIDEO_NO_UNIT;
vu->audio = VIDEO_NO_UNIT;
vu->teletext = VIDEO_NO_UNIT;
return 0;
}
case OV511IOC_WI2C:
{
struct ov511_i2c_struct *w = arg;
return i2c_w_slave(ov, w->slave, w->reg, w->value, w->mask);
}
case OV511IOC_RI2C:
{
struct ov511_i2c_struct *r = arg;
int rc;
rc = i2c_r_slave(ov, r->slave, r->reg);
if (rc < 0)
return rc;
r->value = rc;
return 0;
}
default:
PDEBUG(3, "Unsupported IOCtl: 0x%X", cmd);
return -ENOIOCTLCMD;
} /* end switch */
return 0;
}
static long
ov51x_v4l1_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct video_device *vdev = file->private_data;
struct usb_ov511 *ov = video_get_drvdata(vdev);
int rc;
if (mutex_lock_interruptible(&ov->lock))
return -EINTR;
rc = video_usercopy(file, cmd, arg, ov51x_v4l1_ioctl_internal);
mutex_unlock(&ov->lock);
return rc;
}
static ssize_t
ov51x_v4l1_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos)
{
struct video_device *vdev = file->private_data;
int noblock = file->f_flags&O_NONBLOCK;
unsigned long count = cnt;
struct usb_ov511 *ov = video_get_drvdata(vdev);
int i, rc = 0, frmx = -1;
struct ov511_frame *frame;
if (mutex_lock_interruptible(&ov->lock))
return -EINTR;
PDEBUG(4, "%ld bytes, noblock=%d", count, noblock);
if (!vdev || !buf) {
rc = -EFAULT;
goto error;
}
if (!ov->dev) {
rc = -EIO;
goto error;
}
// FIXME: Only supports two frames
/* See if a frame is completed, then use it. */
if (ov->frame[0].grabstate >= FRAME_DONE) /* _DONE or _ERROR */
frmx = 0;
else if (ov->frame[1].grabstate >= FRAME_DONE)/* _DONE or _ERROR */
frmx = 1;
/* If nonblocking we return immediately */
if (noblock && (frmx == -1)) {
rc = -EAGAIN;
goto error;
}
/* If no FRAME_DONE, look for a FRAME_GRABBING state. */
/* See if a frame is in process (grabbing), then use it. */
if (frmx == -1) {
if (ov->frame[0].grabstate == FRAME_GRABBING)
frmx = 0;
else if (ov->frame[1].grabstate == FRAME_GRABBING)
frmx = 1;
}
/* If no frame is active, start one. */
if (frmx == -1) {
if ((rc = ov51x_new_frame(ov, frmx = 0))) {
err("read: ov51x_new_frame error");
goto error;
}
}
frame = &ov->frame[frmx];
restart:
if (!ov->dev) {
rc = -EIO;
goto error;
}
/* Wait while we're grabbing the image */
PDEBUG(4, "Waiting image grabbing");
rc = wait_event_interruptible(frame->wq,
(frame->grabstate == FRAME_DONE)
|| (frame->grabstate == FRAME_ERROR));
if (rc)
goto error;
PDEBUG(4, "Got image, frame->grabstate = %d", frame->grabstate);
PDEBUG(4, "bytes_recvd = %d", frame->bytes_recvd);
if (frame->grabstate == FRAME_ERROR) {
frame->bytes_read = 0;
err("** ick! ** Errored frame %d", ov->curframe);
if (ov51x_new_frame(ov, frmx)) {
err("read: ov51x_new_frame error");
goto error;
}
goto restart;
}
/* Repeat until we get a snapshot frame */
if (ov->snap_enabled)
PDEBUG(4, "Waiting snapshot frame");
if (ov->snap_enabled && !frame->snapshot) {
frame->bytes_read = 0;
if ((rc = ov51x_new_frame(ov, frmx))) {
err("read: ov51x_new_frame error");
goto error;
}
goto restart;
}
/* Clear the snapshot */
if (ov->snap_enabled && frame->snapshot) {
frame->snapshot = 0;
ov51x_clear_snapshot(ov);
}
/* Decompression, format conversion, etc... */
ov51x_postprocess(ov, frame);
PDEBUG(4, "frmx=%d, bytes_read=%ld, length=%ld", frmx,
frame->bytes_read,
get_frame_length(frame));
/* copy bytes to user space; we allow for partials reads */
// if ((count + frame->bytes_read)
// > get_frame_length((struct ov511_frame *)frame))
// count = frame->scanlength - frame->bytes_read;
/* FIXME - count hardwired to be one frame... */
count = get_frame_length(frame);
PDEBUG(4, "Copy to user space: %ld bytes", count);
if ((i = copy_to_user(buf, frame->data + frame->bytes_read, count))) {
PDEBUG(4, "Copy failed! %d bytes not copied", i);
rc = -EFAULT;
goto error;
}
frame->bytes_read += count;
PDEBUG(4, "{copy} count used=%ld, new bytes_read=%ld",
count, frame->bytes_read);
/* If all data have been read... */
if (frame->bytes_read
>= get_frame_length(frame)) {
frame->bytes_read = 0;
// FIXME: Only supports two frames
/* Mark it as available to be used again. */
ov->frame[frmx].grabstate = FRAME_UNUSED;
if ((rc = ov51x_new_frame(ov, !frmx))) {
err("ov51x_new_frame returned error");
goto error;
}
}
PDEBUG(4, "read finished, returning %ld (sweet)", count);
mutex_unlock(&ov->lock);
return count;
error:
mutex_unlock(&ov->lock);
return rc;
}
static int
ov51x_v4l1_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = file->private_data;
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
struct usb_ov511 *ov = video_get_drvdata(vdev);
unsigned long page, pos;
if (ov->dev == NULL)
return -EIO;
PDEBUG(4, "mmap: %ld (%lX) bytes", size, size);
if (size > (((OV511_NUMFRAMES
* MAX_DATA_SIZE(ov->maxwidth, ov->maxheight)
+ PAGE_SIZE - 1) & ~(PAGE_SIZE - 1))))
return -EINVAL;
if (mutex_lock_interruptible(&ov->lock))
return -EINTR;
pos = (unsigned long)ov->fbuf;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) {
mutex_unlock(&ov->lock);
return -EAGAIN;
}
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
mutex_unlock(&ov->lock);
return 0;
}
static const struct v4l2_file_operations ov511_fops = {
.owner = THIS_MODULE,
.open = ov51x_v4l1_open,
.release = ov51x_v4l1_close,
.read = ov51x_v4l1_read,
.mmap = ov51x_v4l1_mmap,
.ioctl = ov51x_v4l1_ioctl,
};
static struct video_device vdev_template = {
.name = "OV511 USB Camera",
.fops = &ov511_fops,
.release = video_device_release,
};
/****************************************************************************
*
* OV511 and sensor configuration
*
***************************************************************************/
/* This initializes the OV7610, OV7620, or OV76BE sensor. The OV76BE uses
* the same register settings as the OV7610, since they are very similar.
*/
static int
ov7xx0_configure(struct usb_ov511 *ov)
{
int i, success;
int rc;
/* Lawrence Glaister <lg@jfm.bc.ca> reports:
*
* Register 0x0f in the 7610 has the following effects:
*
* 0x85 (AEC method 1): Best overall, good contrast range
* 0x45 (AEC method 2): Very overexposed
* 0xa5 (spec sheet default): Ok, but the black level is
* shifted resulting in loss of contrast
* 0x05 (old driver setting): very overexposed, too much
* contrast
*/
static struct ov511_regvals aRegvalsNorm7610[] = {
{ OV511_I2C_BUS, 0x10, 0xff },
{ OV511_I2C_BUS, 0x16, 0x06 },
{ OV511_I2C_BUS, 0x28, 0x24 },
{ OV511_I2C_BUS, 0x2b, 0xac },
{ OV511_I2C_BUS, 0x12, 0x00 },
{ OV511_I2C_BUS, 0x38, 0x81 },
{ OV511_I2C_BUS, 0x28, 0x24 }, /* 0c */
{ OV511_I2C_BUS, 0x0f, 0x85 }, /* lg's setting */
{ OV511_I2C_BUS, 0x15, 0x01 },
{ OV511_I2C_BUS, 0x20, 0x1c },
{ OV511_I2C_BUS, 0x23, 0x2a },
{ OV511_I2C_BUS, 0x24, 0x10 },
{ OV511_I2C_BUS, 0x25, 0x8a },
{ OV511_I2C_BUS, 0x26, 0xa2 },
{ OV511_I2C_BUS, 0x27, 0xc2 },
{ OV511_I2C_BUS, 0x2a, 0x04 },
{ OV511_I2C_BUS, 0x2c, 0xfe },
{ OV511_I2C_BUS, 0x2d, 0x93 },
{ OV511_I2C_BUS, 0x30, 0x71 },
{ OV511_I2C_BUS, 0x31, 0x60 },
{ OV511_I2C_BUS, 0x32, 0x26 },
{ OV511_I2C_BUS, 0x33, 0x20 },
{ OV511_I2C_BUS, 0x34, 0x48 },
{ OV511_I2C_BUS, 0x12, 0x24 },
{ OV511_I2C_BUS, 0x11, 0x01 },
{ OV511_I2C_BUS, 0x0c, 0x24 },
{ OV511_I2C_BUS, 0x0d, 0x24 },
{ OV511_DONE_BUS, 0x0, 0x00 },
};
static struct ov511_regvals aRegvalsNorm7620[] = {
{ OV511_I2C_BUS, 0x00, 0x00 },
{ OV511_I2C_BUS, 0x01, 0x80 },
{ OV511_I2C_BUS, 0x02, 0x80 },
{ OV511_I2C_BUS, 0x03, 0xc0 },
{ OV511_I2C_BUS, 0x06, 0x60 },
{ OV511_I2C_BUS, 0x07, 0x00 },
{ OV511_I2C_BUS, 0x0c, 0x24 },
{ OV511_I2C_BUS, 0x0c, 0x24 },
{ OV511_I2C_BUS, 0x0d, 0x24 },
{ OV511_I2C_BUS, 0x11, 0x01 },
{ OV511_I2C_BUS, 0x12, 0x24 },
{ OV511_I2C_BUS, 0x13, 0x01 },
{ OV511_I2C_BUS, 0x14, 0x84 },
{ OV511_I2C_BUS, 0x15, 0x01 },
{ OV511_I2C_BUS, 0x16, 0x03 },
{ OV511_I2C_BUS, 0x17, 0x2f },
{ OV511_I2C_BUS, 0x18, 0xcf },
{ OV511_I2C_BUS, 0x19, 0x06 },
{ OV511_I2C_BUS, 0x1a, 0xf5 },
{ OV511_I2C_BUS, 0x1b, 0x00 },
{ OV511_I2C_BUS, 0x20, 0x18 },
{ OV511_I2C_BUS, 0x21, 0x80 },
{ OV511_I2C_BUS, 0x22, 0x80 },
{ OV511_I2C_BUS, 0x23, 0x00 },
{ OV511_I2C_BUS, 0x26, 0xa2 },
{ OV511_I2C_BUS, 0x27, 0xea },
{ OV511_I2C_BUS, 0x28, 0x20 },
{ OV511_I2C_BUS, 0x29, 0x00 },
{ OV511_I2C_BUS, 0x2a, 0x10 },
{ OV511_I2C_BUS, 0x2b, 0x00 },
{ OV511_I2C_BUS, 0x2c, 0x88 },
{ OV511_I2C_BUS, 0x2d, 0x91 },
{ OV511_I2C_BUS, 0x2e, 0x80 },
{ OV511_I2C_BUS, 0x2f, 0x44 },
{ OV511_I2C_BUS, 0x60, 0x27 },
{ OV511_I2C_BUS, 0x61, 0x02 },
{ OV511_I2C_BUS, 0x62, 0x5f },
{ OV511_I2C_BUS, 0x63, 0xd5 },
{ OV511_I2C_BUS, 0x64, 0x57 },
{ OV511_I2C_BUS, 0x65, 0x83 },
{ OV511_I2C_BUS, 0x66, 0x55 },
{ OV511_I2C_BUS, 0x67, 0x92 },
{ OV511_I2C_BUS, 0x68, 0xcf },
{ OV511_I2C_BUS, 0x69, 0x76 },
{ OV511_I2C_BUS, 0x6a, 0x22 },
{ OV511_I2C_BUS, 0x6b, 0x00 },
{ OV511_I2C_BUS, 0x6c, 0x02 },
{ OV511_I2C_BUS, 0x6d, 0x44 },
{ OV511_I2C_BUS, 0x6e, 0x80 },
{ OV511_I2C_BUS, 0x6f, 0x1d },
{ OV511_I2C_BUS, 0x70, 0x8b },
{ OV511_I2C_BUS, 0x71, 0x00 },
{ OV511_I2C_BUS, 0x72, 0x14 },
{ OV511_I2C_BUS, 0x73, 0x54 },
{ OV511_I2C_BUS, 0x74, 0x00 },
{ OV511_I2C_BUS, 0x75, 0x8e },
{ OV511_I2C_BUS, 0x76, 0x00 },
{ OV511_I2C_BUS, 0x77, 0xff },
{ OV511_I2C_BUS, 0x78, 0x80 },
{ OV511_I2C_BUS, 0x79, 0x80 },
{ OV511_I2C_BUS, 0x7a, 0x80 },
{ OV511_I2C_BUS, 0x7b, 0xe2 },
{ OV511_I2C_BUS, 0x7c, 0x00 },
{ OV511_DONE_BUS, 0x0, 0x00 },
};
PDEBUG(4, "starting configuration");
/* This looks redundant, but is necessary for WebCam 3 */
ov->primary_i2c_slave = OV7xx0_SID;
if (ov51x_set_slave_ids(ov, OV7xx0_SID) < 0)
return -1;
if (init_ov_sensor(ov) >= 0) {
PDEBUG(1, "OV7xx0 sensor initalized (method 1)");
} else {
/* Reset the 76xx */
if (i2c_w(ov, 0x12, 0x80) < 0)
return -1;
/* Wait for it to initialize */
msleep(150);
i = 0;
success = 0;
while (i <= i2c_detect_tries) {
if ((i2c_r(ov, OV7610_REG_ID_HIGH) == 0x7F) &&
(i2c_r(ov, OV7610_REG_ID_LOW) == 0xA2)) {
success = 1;
break;
} else {
i++;
}
}
// Was (i == i2c_detect_tries) previously. This obviously used to always report
// success. Whether anyone actually depended on that bug is unknown
if ((i >= i2c_detect_tries) && (success == 0)) {
err("Failed to read sensor ID. You might not have an");
err("OV7610/20, or it may be not responding. Report");
err("this to " EMAIL);
err("This is only a warning. You can attempt to use");
err("your camera anyway");
// Only issue a warning for now
// return -1;
} else {
PDEBUG(1, "OV7xx0 initialized (method 2, %dx)", i+1);
}
}
/* Detect sensor (sub)type */
rc = i2c_r(ov, OV7610_REG_COM_I);
if (rc < 0) {
err("Error detecting sensor type");
return -1;
} else if ((rc & 3) == 3) {
dev_info(&ov->dev->dev, "Sensor is an OV7610\n");
ov->sensor = SEN_OV7610;
} else if ((rc & 3) == 1) {
/* I don't know what's different about the 76BE yet. */
if (i2c_r(ov, 0x15) & 1)
dev_info(&ov->dev->dev, "Sensor is an OV7620AE\n");
else
dev_info(&ov->dev->dev, "Sensor is an OV76BE\n");
/* OV511+ will return all zero isoc data unless we
* configure the sensor as a 7620. Someone needs to
* find the exact reg. setting that causes this. */
if (ov->bridge == BRG_OV511PLUS) {
dev_info(&ov->dev->dev,
"Enabling 511+/7620AE workaround\n");
ov->sensor = SEN_OV7620;
} else {
ov->sensor = SEN_OV76BE;
}
} else if ((rc & 3) == 0) {
dev_info(&ov->dev->dev, "Sensor is an OV7620\n");
ov->sensor = SEN_OV7620;
} else {
err("Unknown image sensor version: %d", rc & 3);
return -1;
}
if (ov->sensor == SEN_OV7620) {
PDEBUG(4, "Writing 7620 registers");
if (write_regvals(ov, aRegvalsNorm7620))
return -1;
} else {
PDEBUG(4, "Writing 7610 registers");
if (write_regvals(ov, aRegvalsNorm7610))
return -1;
}
/* Set sensor-specific vars */
ov->maxwidth = 640;
ov->maxheight = 480;
ov->minwidth = 64;
ov->minheight = 48;
// FIXME: These do not match the actual settings yet
ov->brightness = 0x80 << 8;
ov->contrast = 0x80 << 8;
ov->colour = 0x80 << 8;
ov->hue = 0x80 << 8;
return 0;
}
/* This initializes the OV6620, OV6630, OV6630AE, or OV6630AF sensor. */
static int
ov6xx0_configure(struct usb_ov511 *ov)
{
int rc;
static struct ov511_regvals aRegvalsNorm6x20[] = {
{ OV511_I2C_BUS, 0x12, 0x80 }, /* reset */
{ OV511_I2C_BUS, 0x11, 0x01 },
{ OV511_I2C_BUS, 0x03, 0x60 },
{ OV511_I2C_BUS, 0x05, 0x7f }, /* For when autoadjust is off */
{ OV511_I2C_BUS, 0x07, 0xa8 },
/* The ratio of 0x0c and 0x0d controls the white point */
{ OV511_I2C_BUS, 0x0c, 0x24 },
{ OV511_I2C_BUS, 0x0d, 0x24 },
{ OV511_I2C_BUS, 0x0f, 0x15 }, /* COMS */
{ OV511_I2C_BUS, 0x10, 0x75 }, /* AEC Exposure time */
{ OV511_I2C_BUS, 0x12, 0x24 }, /* Enable AGC */
{ OV511_I2C_BUS, 0x14, 0x04 },
/* 0x16: 0x06 helps frame stability with moving objects */
{ OV511_I2C_BUS, 0x16, 0x06 },
// { OV511_I2C_BUS, 0x20, 0x30 }, /* Aperture correction enable */
{ OV511_I2C_BUS, 0x26, 0xb2 }, /* BLC enable */
/* 0x28: 0x05 Selects RGB format if RGB on */
{ OV511_I2C_BUS, 0x28, 0x05 },
{ OV511_I2C_BUS, 0x2a, 0x04 }, /* Disable framerate adjust */
// { OV511_I2C_BUS, 0x2b, 0xac }, /* Framerate; Set 2a[7] first */
{ OV511_I2C_BUS, 0x2d, 0x99 },
{ OV511_I2C_BUS, 0x33, 0xa0 }, /* Color Processing Parameter */
{ OV511_I2C_BUS, 0x34, 0xd2 }, /* Max A/D range */
{ OV511_I2C_BUS, 0x38, 0x8b },
{ OV511_I2C_BUS, 0x39, 0x40 },
{ OV511_I2C_BUS, 0x3c, 0x39 }, /* Enable AEC mode changing */
{ OV511_I2C_BUS, 0x3c, 0x3c }, /* Change AEC mode */
{ OV511_I2C_BUS, 0x3c, 0x24 }, /* Disable AEC mode changing */
{ OV511_I2C_BUS, 0x3d, 0x80 },
/* These next two registers (0x4a, 0x4b) are undocumented. They
* control the color balance */
{ OV511_I2C_BUS, 0x4a, 0x80 },
{ OV511_I2C_BUS, 0x4b, 0x80 },
{ OV511_I2C_BUS, 0x4d, 0xd2 }, /* This reduces noise a bit */
{ OV511_I2C_BUS, 0x4e, 0xc1 },
{ OV511_I2C_BUS, 0x4f, 0x04 },
// Do 50-53 have any effect?
// Toggle 0x12[2] off and on here?
{ OV511_DONE_BUS, 0x0, 0x00 }, /* END MARKER */
};
static struct ov511_regvals aRegvalsNorm6x30[] = {
/*OK*/ { OV511_I2C_BUS, 0x12, 0x80 }, /* reset */
{ OV511_I2C_BUS, 0x11, 0x00 },
/*OK*/ { OV511_I2C_BUS, 0x03, 0x60 },
/*0A?*/ { OV511_I2C_BUS, 0x05, 0x7f }, /* For when autoadjust is off */
{ OV511_I2C_BUS, 0x07, 0xa8 },
/* The ratio of 0x0c and 0x0d controls the white point */
/*OK*/ { OV511_I2C_BUS, 0x0c, 0x24 },
/*OK*/ { OV511_I2C_BUS, 0x0d, 0x24 },
/*A*/ { OV511_I2C_BUS, 0x0e, 0x20 },
// /*04?*/ { OV511_I2C_BUS, 0x14, 0x80 },
{ OV511_I2C_BUS, 0x16, 0x03 },
// /*OK*/ { OV511_I2C_BUS, 0x20, 0x30 }, /* Aperture correction enable */
// 21 & 22? The suggested values look wrong. Go with default
/*A*/ { OV511_I2C_BUS, 0x23, 0xc0 },
/*A*/ { OV511_I2C_BUS, 0x25, 0x9a }, // Check this against default
// /*OK*/ { OV511_I2C_BUS, 0x26, 0xb2 }, /* BLC enable */
/* 0x28: 0x05 Selects RGB format if RGB on */
// /*04?*/ { OV511_I2C_BUS, 0x28, 0x05 },
// /*04?*/ { OV511_I2C_BUS, 0x28, 0x45 }, // DEBUG: Tristate UV bus
/*OK*/ { OV511_I2C_BUS, 0x2a, 0x04 }, /* Disable framerate adjust */
// /*OK*/ { OV511_I2C_BUS, 0x2b, 0xac }, /* Framerate; Set 2a[7] first */
{ OV511_I2C_BUS, 0x2d, 0x99 },
// /*A*/ { OV511_I2C_BUS, 0x33, 0x26 }, // Reserved bits on 6620
// /*d2?*/ { OV511_I2C_BUS, 0x34, 0x03 }, /* Max A/D range */
// /*8b?*/ { OV511_I2C_BUS, 0x38, 0x83 },
// /*40?*/ { OV511_I2C_BUS, 0x39, 0xc0 }, // 6630 adds bit 7
// { OV511_I2C_BUS, 0x3c, 0x39 }, /* Enable AEC mode changing */
// { OV511_I2C_BUS, 0x3c, 0x3c }, /* Change AEC mode */
// { OV511_I2C_BUS, 0x3c, 0x24 }, /* Disable AEC mode changing */
{ OV511_I2C_BUS, 0x3d, 0x80 },
// /*A*/ { OV511_I2C_BUS, 0x3f, 0x0e },
/* These next two registers (0x4a, 0x4b) are undocumented. They
* control the color balance */
// /*OK?*/ { OV511_I2C_BUS, 0x4a, 0x80 }, // Check these
// /*OK?*/ { OV511_I2C_BUS, 0x4b, 0x80 },
{ OV511_I2C_BUS, 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */
/*c1?*/ { OV511_I2C_BUS, 0x4e, 0x40 },
/* UV average mode, color killer: strongest */
{ OV511_I2C_BUS, 0x4f, 0x07 },
{ OV511_I2C_BUS, 0x54, 0x23 }, /* Max AGC gain: 18dB */
{ OV511_I2C_BUS, 0x57, 0x81 }, /* (default) */
{ OV511_I2C_BUS, 0x59, 0x01 }, /* AGC dark current comp: +1 */
{ OV511_I2C_BUS, 0x5a, 0x2c }, /* (undocumented) */
{ OV511_I2C_BUS, 0x5b, 0x0f }, /* AWB chrominance levels */
// { OV511_I2C_BUS, 0x5c, 0x10 },
{ OV511_DONE_BUS, 0x0, 0x00 }, /* END MARKER */
};
PDEBUG(4, "starting sensor configuration");
if (init_ov_sensor(ov) < 0) {
err("Failed to read sensor ID. You might not have an OV6xx0,");
err("or it may be not responding. Report this to " EMAIL);
return -1;
} else {
PDEBUG(1, "OV6xx0 sensor detected");
}
/* Detect sensor (sub)type */
rc = i2c_r(ov, OV7610_REG_COM_I);
if (rc < 0) {
err("Error detecting sensor type");
return -1;
}
if ((rc & 3) == 0) {
ov->sensor = SEN_OV6630;
dev_info(&ov->dev->dev, "Sensor is an OV6630\n");
} else if ((rc & 3) == 1) {
ov->sensor = SEN_OV6620;
dev_info(&ov->dev->dev, "Sensor is an OV6620\n");
} else if ((rc & 3) == 2) {
ov->sensor = SEN_OV6630;
dev_info(&ov->dev->dev, "Sensor is an OV6630AE\n");
} else if ((rc & 3) == 3) {
ov->sensor = SEN_OV6630;
dev_info(&ov->dev->dev, "Sensor is an OV6630AF\n");
}
/* Set sensor-specific vars */
ov->maxwidth = 352;
ov->maxheight = 288;
ov->minwidth = 64;
ov->minheight = 48;
// FIXME: These do not match the actual settings yet
ov->brightness = 0x80 << 8;
ov->contrast = 0x80 << 8;
ov->colour = 0x80 << 8;
ov->hue = 0x80 << 8;
if (ov->sensor == SEN_OV6620) {
PDEBUG(4, "Writing 6x20 registers");
if (write_regvals(ov, aRegvalsNorm6x20))
return -1;
} else {
PDEBUG(4, "Writing 6x30 registers");
if (write_regvals(ov, aRegvalsNorm6x30))
return -1;
}
return 0;
}
/* This initializes the KS0127 and KS0127B video decoders. */
static int
ks0127_configure(struct usb_ov511 *ov)
{
int rc;
// FIXME: I don't know how to sync or reset it yet
#if 0
if (ov51x_init_ks_sensor(ov) < 0) {
err("Failed to initialize the KS0127");
return -1;
} else {
PDEBUG(1, "KS012x(B) sensor detected");
}
#endif
/* Detect decoder subtype */
rc = i2c_r(ov, 0x00);
if (rc < 0) {
err("Error detecting sensor type");
return -1;
} else if (rc & 0x08) {
rc = i2c_r(ov, 0x3d);
if (rc < 0) {
err("Error detecting sensor type");
return -1;
} else if ((rc & 0x0f) == 0) {
dev_info(&ov->dev->dev, "Sensor is a KS0127\n");
ov->sensor = SEN_KS0127;
} else if ((rc & 0x0f) == 9) {
dev_info(&ov->dev->dev, "Sensor is a KS0127B Rev. A\n");
ov->sensor = SEN_KS0127B;
}
} else {
err("Error: Sensor is an unsupported KS0122");
return -1;
}
/* Set sensor-specific vars */
ov->maxwidth = 640;
ov->maxheight = 480;
ov->minwidth = 64;
ov->minheight = 48;
// FIXME: These do not match the actual settings yet
ov->brightness = 0x80 << 8;
ov->contrast = 0x80 << 8;
ov->colour = 0x80 << 8;
ov->hue = 0x80 << 8;
/* This device is not supported yet. Bail out now... */
err("This sensor is not supported yet.");
return -1;
return 0;
}
/* This initializes the SAA7111A video decoder. */
static int
saa7111a_configure(struct usb_ov511 *ov)
{
int rc;
/* Since there is no register reset command, all registers must be
* written, otherwise gives erratic results */
static struct ov511_regvals aRegvalsNormSAA7111A[] = {
{ OV511_I2C_BUS, 0x06, 0xce },
{ OV511_I2C_BUS, 0x07, 0x00 },
{ OV511_I2C_BUS, 0x10, 0x44 }, /* YUV422, 240/286 lines */
{ OV511_I2C_BUS, 0x0e, 0x01 }, /* NTSC M or PAL BGHI */
{ OV511_I2C_BUS, 0x00, 0x00 },
{ OV511_I2C_BUS, 0x01, 0x00 },
{ OV511_I2C_BUS, 0x03, 0x23 },
{ OV511_I2C_BUS, 0x04, 0x00 },
{ OV511_I2C_BUS, 0x05, 0x00 },
{ OV511_I2C_BUS, 0x08, 0xc8 }, /* Auto field freq */
{ OV511_I2C_BUS, 0x09, 0x01 }, /* Chrom. trap off, APER=0.25 */
{ OV511_I2C_BUS, 0x0a, 0x80 }, /* BRIG=128 */
{ OV511_I2C_BUS, 0x0b, 0x40 }, /* CONT=1.0 */
{ OV511_I2C_BUS, 0x0c, 0x40 }, /* SATN=1.0 */
{ OV511_I2C_BUS, 0x0d, 0x00 }, /* HUE=0 */
{ OV511_I2C_BUS, 0x0f, 0x00 },
{ OV511_I2C_BUS, 0x11, 0x0c },
{ OV511_I2C_BUS, 0x12, 0x00 },
{ OV511_I2C_BUS, 0x13, 0x00 },
{ OV511_I2C_BUS, 0x14, 0x00 },
{ OV511_I2C_BUS, 0x15, 0x00 },
{ OV511_I2C_BUS, 0x16, 0x00 },
{ OV511_I2C_BUS, 0x17, 0x00 },
{ OV511_I2C_BUS, 0x02, 0xc0 }, /* Composite input 0 */
{ OV511_DONE_BUS, 0x0, 0x00 },
};
// FIXME: I don't know how to sync or reset it yet
#if 0
if (ov51x_init_saa_sensor(ov) < 0) {
err("Failed to initialize the SAA7111A");
return -1;
} else {
PDEBUG(1, "SAA7111A sensor detected");
}
#endif
/* 640x480 not supported with PAL */
if (ov->pal) {
ov->maxwidth = 320;
ov->maxheight = 240; /* Even field only */
} else {
ov->maxwidth = 640;
ov->maxheight = 480; /* Even/Odd fields */
}
ov->minwidth = 320;
ov->minheight = 240; /* Even field only */
ov->has_decoder = 1;
ov->num_inputs = 8;
ov->norm = VIDEO_MODE_AUTO;
ov->stop_during_set = 0; /* Decoder guarantees stable image */
/* Decoder doesn't change these values, so we use these instead of
* acutally reading the registers (which doesn't work) */
ov->brightness = 0x80 << 8;
ov->contrast = 0x40 << 9;
ov->colour = 0x40 << 9;
ov->hue = 32768;
PDEBUG(4, "Writing SAA7111A registers");
if (write_regvals(ov, aRegvalsNormSAA7111A))
return -1;
/* Detect version of decoder. This must be done after writing the
* initial regs or the decoder will lock up. */
rc = i2c_r(ov, 0x00);
if (rc < 0) {
err("Error detecting sensor version");
return -1;
} else {
dev_info(&ov->dev->dev,
"Sensor is an SAA7111A (version 0x%x)\n", rc);
ov->sensor = SEN_SAA7111A;
}
// FIXME: Fix this for OV518(+)
/* Latch to negative edge of clock. Otherwise, we get incorrect
* colors and jitter in the digital signal. */
if (ov->bclass == BCL_OV511)
reg_w(ov, 0x11, 0x00);
else
dev_warn(&ov->dev->dev,
"SAA7111A not yet supported with OV518/OV518+\n");
return 0;
}
/* This initializes the OV511/OV511+ and the sensor */
static int
ov511_configure(struct usb_ov511 *ov)
{
static struct ov511_regvals aRegvalsInit511[] = {
{ OV511_REG_BUS, R51x_SYS_RESET, 0x7f },
{ OV511_REG_BUS, R51x_SYS_INIT, 0x01 },
{ OV511_REG_BUS, R51x_SYS_RESET, 0x7f },
{ OV511_REG_BUS, R51x_SYS_INIT, 0x01 },
{ OV511_REG_BUS, R51x_SYS_RESET, 0x3f },
{ OV511_REG_BUS, R51x_SYS_INIT, 0x01 },
{ OV511_REG_BUS, R51x_SYS_RESET, 0x3d },
{ OV511_DONE_BUS, 0x0, 0x00},
};
static struct ov511_regvals aRegvalsNorm511[] = {
{ OV511_REG_BUS, R511_DRAM_FLOW_CTL, 0x01 },
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x00 },
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x02 },
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x00 },
{ OV511_REG_BUS, R511_FIFO_OPTS, 0x1f },
{ OV511_REG_BUS, R511_COMP_EN, 0x00 },
{ OV511_REG_BUS, R511_COMP_LUT_EN, 0x03 },
{ OV511_DONE_BUS, 0x0, 0x00 },
};
static struct ov511_regvals aRegvalsNorm511Plus[] = {
{ OV511_REG_BUS, R511_DRAM_FLOW_CTL, 0xff },
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x00 },
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x02 },
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x00 },
{ OV511_REG_BUS, R511_FIFO_OPTS, 0xff },
{ OV511_REG_BUS, R511_COMP_EN, 0x00 },
{ OV511_REG_BUS, R511_COMP_LUT_EN, 0x03 },
{ OV511_DONE_BUS, 0x0, 0x00 },
};
PDEBUG(4, "");
ov->customid = reg_r(ov, R511_SYS_CUST_ID);
if (ov->customid < 0) {
err("Unable to read camera bridge registers");
goto error;
}
PDEBUG (1, "CustomID = %d", ov->customid);
ov->desc = symbolic(camlist, ov->customid);
dev_info(&ov->dev->dev, "model: %s\n", ov->desc);
if (0 == strcmp(ov->desc, NOT_DEFINED_STR)) {
err("Camera type (%d) not recognized", ov->customid);
err("Please notify " EMAIL " of the name,");
err("manufacturer, model, and this number of your camera.");
err("Also include the output of the detection process.");
}
if (ov->customid == 70) /* USB Life TV (PAL/SECAM) */
ov->pal = 1;
if (write_regvals(ov, aRegvalsInit511))
goto error;
if (ov->led_policy == LED_OFF || ov->led_policy == LED_AUTO)
ov51x_led_control(ov, 0);
/* The OV511+ has undocumented bits in the flow control register.
* Setting it to 0xff fixes the corruption with moving objects. */
if (ov->bridge == BRG_OV511) {
if (write_regvals(ov, aRegvalsNorm511))
goto error;
} else if (ov->bridge == BRG_OV511PLUS) {
if (write_regvals(ov, aRegvalsNorm511Plus))
goto error;
} else {
err("Invalid bridge");
}
if (ov511_init_compression(ov))
goto error;
ov->packet_numbering = 1;
ov511_set_packet_size(ov, 0);
ov->snap_enabled = snapshot;
/* Test for 7xx0 */
PDEBUG(3, "Testing for 0V7xx0");
ov->primary_i2c_slave = OV7xx0_SID;
if (ov51x_set_slave_ids(ov, OV7xx0_SID) < 0)
goto error;
if (i2c_w(ov, 0x12, 0x80) < 0) {
/* Test for 6xx0 */
PDEBUG(3, "Testing for 0V6xx0");
ov->primary_i2c_slave = OV6xx0_SID;
if (ov51x_set_slave_ids(ov, OV6xx0_SID) < 0)
goto error;
if (i2c_w(ov, 0x12, 0x80) < 0) {
/* Test for 8xx0 */
PDEBUG(3, "Testing for 0V8xx0");
ov->primary_i2c_slave = OV8xx0_SID;
if (ov51x_set_slave_ids(ov, OV8xx0_SID) < 0)
goto error;
if (i2c_w(ov, 0x12, 0x80) < 0) {
/* Test for SAA7111A */
PDEBUG(3, "Testing for SAA7111A");
ov->primary_i2c_slave = SAA7111A_SID;
if (ov51x_set_slave_ids(ov, SAA7111A_SID) < 0)
goto error;
if (i2c_w(ov, 0x0d, 0x00) < 0) {
/* Test for KS0127 */
PDEBUG(3, "Testing for KS0127");
ov->primary_i2c_slave = KS0127_SID;
if (ov51x_set_slave_ids(ov, KS0127_SID) < 0)
goto error;
if (i2c_w(ov, 0x10, 0x00) < 0) {
err("Can't determine sensor slave IDs");
goto error;
} else {
if (ks0127_configure(ov) < 0) {
err("Failed to configure KS0127");
goto error;
}
}
} else {
if (saa7111a_configure(ov) < 0) {
err("Failed to configure SAA7111A");
goto error;
}
}
} else {
err("Detected unsupported OV8xx0 sensor");
goto error;
}
} else {
if (ov6xx0_configure(ov) < 0) {
err("Failed to configure OV6xx0");
goto error;
}
}
} else {
if (ov7xx0_configure(ov) < 0) {
err("Failed to configure OV7xx0");
goto error;
}
}
return 0;
error:
err("OV511 Config failed");
return -EBUSY;
}
/* This initializes the OV518/OV518+ and the sensor */
static int
ov518_configure(struct usb_ov511 *ov)
{
/* For 518 and 518+ */
static struct ov511_regvals aRegvalsInit518[] = {
{ OV511_REG_BUS, R51x_SYS_RESET, 0x40 },
{ OV511_REG_BUS, R51x_SYS_INIT, 0xe1 },
{ OV511_REG_BUS, R51x_SYS_RESET, 0x3e },
{ OV511_REG_BUS, R51x_SYS_INIT, 0xe1 },
{ OV511_REG_BUS, R51x_SYS_RESET, 0x00 },
{ OV511_REG_BUS, R51x_SYS_INIT, 0xe1 },
{ OV511_REG_BUS, 0x46, 0x00 },
{ OV511_REG_BUS, 0x5d, 0x03 },
{ OV511_DONE_BUS, 0x0, 0x00},
};
static struct ov511_regvals aRegvalsNorm518[] = {
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x02 }, /* Reset */
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x01 }, /* Enable */
{ OV511_REG_BUS, 0x31, 0x0f },
{ OV511_REG_BUS, 0x5d, 0x03 },
{ OV511_REG_BUS, 0x24, 0x9f },
{ OV511_REG_BUS, 0x25, 0x90 },
{ OV511_REG_BUS, 0x20, 0x00 },
{ OV511_REG_BUS, 0x51, 0x04 },
{ OV511_REG_BUS, 0x71, 0x19 },
{ OV511_DONE_BUS, 0x0, 0x00 },
};
static struct ov511_regvals aRegvalsNorm518Plus[] = {
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x02 }, /* Reset */
{ OV511_REG_BUS, R51x_SYS_SNAP, 0x01 }, /* Enable */
{ OV511_REG_BUS, 0x31, 0x0f },
{ OV511_REG_BUS, 0x5d, 0x03 },
{ OV511_REG_BUS, 0x24, 0x9f },
{ OV511_REG_BUS, 0x25, 0x90 },
{ OV511_REG_BUS, 0x20, 0x60 },
{ OV511_REG_BUS, 0x51, 0x02 },
{ OV511_REG_BUS, 0x71, 0x19 },
{ OV511_REG_BUS, 0x40, 0xff },
{ OV511_REG_BUS, 0x41, 0x42 },
{ OV511_REG_BUS, 0x46, 0x00 },
{ OV511_REG_BUS, 0x33, 0x04 },
{ OV511_REG_BUS, 0x21, 0x19 },
{ OV511_REG_BUS, 0x3f, 0x10 },
{ OV511_DONE_BUS, 0x0, 0x00 },
};
PDEBUG(4, "");
/* First 5 bits of custom ID reg are a revision ID on OV518 */
dev_info(&ov->dev->dev, "Device revision %d\n",
0x1F & reg_r(ov, R511_SYS_CUST_ID));
/* Give it the default description */
ov->desc = symbolic(camlist, 0);
if (write_regvals(ov, aRegvalsInit518))
goto error;
/* Set LED GPIO pin to output mode */
if (reg_w_mask(ov, 0x57, 0x00, 0x02) < 0)
goto error;
/* LED is off by default with OV518; have to explicitly turn it on */
if (ov->led_policy == LED_OFF || ov->led_policy == LED_AUTO)
ov51x_led_control(ov, 0);
else
ov51x_led_control(ov, 1);
/* Don't require compression if dumppix is enabled; otherwise it's
* required. OV518 has no uncompressed mode, to save RAM. */
if (!dumppix && !ov->compress) {
ov->compress = 1;
dev_warn(&ov->dev->dev,
"Compression required with OV518...enabling\n");
}
if (ov->bridge == BRG_OV518) {
if (write_regvals(ov, aRegvalsNorm518))
goto error;
} else if (ov->bridge == BRG_OV518PLUS) {
if (write_regvals(ov, aRegvalsNorm518Plus))
goto error;
} else {
err("Invalid bridge");
}
if (reg_w(ov, 0x2f, 0x80) < 0)
goto error;
if (ov518_init_compression(ov))
goto error;
if (ov->bridge == BRG_OV518)
{
struct usb_interface *ifp;
struct usb_host_interface *alt;
__u16 mxps = 0;
ifp = usb_ifnum_to_if(ov->dev, 0);
if (ifp) {
alt = usb_altnum_to_altsetting(ifp, 7);
if (alt)
mxps = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
}
/* Some OV518s have packet numbering by default, some don't */
if (mxps == 897)
ov->packet_numbering = 1;
else
ov->packet_numbering = 0;
} else {
/* OV518+ has packet numbering turned on by default */
ov->packet_numbering = 1;
}
ov518_set_packet_size(ov, 0);
ov->snap_enabled = snapshot;
/* Test for 76xx */
ov->primary_i2c_slave = OV7xx0_SID;
if (ov51x_set_slave_ids(ov, OV7xx0_SID) < 0)
goto error;
/* The OV518 must be more aggressive about sensor detection since
* I2C write will never fail if the sensor is not present. We have
* to try to initialize the sensor to detect its presence */
if (init_ov_sensor(ov) < 0) {
/* Test for 6xx0 */
ov->primary_i2c_slave = OV6xx0_SID;
if (ov51x_set_slave_ids(ov, OV6xx0_SID) < 0)
goto error;
if (init_ov_sensor(ov) < 0) {
/* Test for 8xx0 */
ov->primary_i2c_slave = OV8xx0_SID;
if (ov51x_set_slave_ids(ov, OV8xx0_SID) < 0)
goto error;
if (init_ov_sensor(ov) < 0) {
err("Can't determine sensor slave IDs");
goto error;
} else {
err("Detected unsupported OV8xx0 sensor");
goto error;
}
} else {
if (ov6xx0_configure(ov) < 0) {
err("Failed to configure OV6xx0");
goto error;
}
}
} else {
if (ov7xx0_configure(ov) < 0) {
err("Failed to configure OV7xx0");
goto error;
}
}
ov->maxwidth = 352;
ov->maxheight = 288;
// The OV518 cannot go as low as the sensor can
ov->minwidth = 160;
ov->minheight = 120;
return 0;
error:
err("OV518 Config failed");
return -EBUSY;
}
/****************************************************************************
* sysfs
***************************************************************************/
static inline struct usb_ov511 *cd_to_ov(struct device *cd)
{
struct video_device *vdev = to_video_device(cd);
return video_get_drvdata(vdev);
}
static ssize_t show_custom_id(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
return sprintf(buf, "%d\n", ov->customid);
}
static DEVICE_ATTR(custom_id, S_IRUGO, show_custom_id, NULL);
static ssize_t show_model(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
return sprintf(buf, "%s\n", ov->desc);
}
static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
static ssize_t show_bridge(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
return sprintf(buf, "%s\n", symbolic(brglist, ov->bridge));
}
static DEVICE_ATTR(bridge, S_IRUGO, show_bridge, NULL);
static ssize_t show_sensor(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
return sprintf(buf, "%s\n", symbolic(senlist, ov->sensor));
}
static DEVICE_ATTR(sensor, S_IRUGO, show_sensor, NULL);
static ssize_t show_brightness(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
unsigned short x;
if (!ov->dev)
return -ENODEV;
sensor_get_brightness(ov, &x);
return sprintf(buf, "%d\n", x >> 8);
}
static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
static ssize_t show_saturation(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
unsigned short x;
if (!ov->dev)
return -ENODEV;
sensor_get_saturation(ov, &x);
return sprintf(buf, "%d\n", x >> 8);
}
static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
static ssize_t show_contrast(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
unsigned short x;
if (!ov->dev)
return -ENODEV;
sensor_get_contrast(ov, &x);
return sprintf(buf, "%d\n", x >> 8);
}
static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
static ssize_t show_hue(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
unsigned short x;
if (!ov->dev)
return -ENODEV;
sensor_get_hue(ov, &x);
return sprintf(buf, "%d\n", x >> 8);
}
static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
static ssize_t show_exposure(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct usb_ov511 *ov = cd_to_ov(cd);
unsigned char exp = 0;
if (!ov->dev)
return -ENODEV;
sensor_get_exposure(ov, &exp);
return sprintf(buf, "%d\n", exp);
}
static DEVICE_ATTR(exposure, S_IRUGO, show_exposure, NULL);
static int ov_create_sysfs(struct video_device *vdev)
{
int rc;
rc = device_create_file(&vdev->dev, &dev_attr_custom_id);
if (rc) goto err;
rc = device_create_file(&vdev->dev, &dev_attr_model);
if (rc) goto err_id;
rc = device_create_file(&vdev->dev, &dev_attr_bridge);
if (rc) goto err_model;
rc = device_create_file(&vdev->dev, &dev_attr_sensor);
if (rc) goto err_bridge;
rc = device_create_file(&vdev->dev, &dev_attr_brightness);
if (rc) goto err_sensor;
rc = device_create_file(&vdev->dev, &dev_attr_saturation);
if (rc) goto err_bright;
rc = device_create_file(&vdev->dev, &dev_attr_contrast);
if (rc) goto err_sat;
rc = device_create_file(&vdev->dev, &dev_attr_hue);
if (rc) goto err_contrast;
rc = device_create_file(&vdev->dev, &dev_attr_exposure);
if (rc) goto err_hue;
return 0;
err_hue:
device_remove_file(&vdev->dev, &dev_attr_hue);
err_contrast:
device_remove_file(&vdev->dev, &dev_attr_contrast);
err_sat:
device_remove_file(&vdev->dev, &dev_attr_saturation);
err_bright:
device_remove_file(&vdev->dev, &dev_attr_brightness);
err_sensor:
device_remove_file(&vdev->dev, &dev_attr_sensor);
err_bridge:
device_remove_file(&vdev->dev, &dev_attr_bridge);
err_model:
device_remove_file(&vdev->dev, &dev_attr_model);
err_id:
device_remove_file(&vdev->dev, &dev_attr_custom_id);
err:
return rc;
}
/****************************************************************************
* USB routines
***************************************************************************/
static int
ov51x_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_interface_descriptor *idesc;
struct usb_ov511 *ov;
int i, rc, nr;
PDEBUG(1, "probing for device...");
/* We don't handle multi-config cameras */
if (dev->descriptor.bNumConfigurations != 1)
return -ENODEV;
idesc = &intf->cur_altsetting->desc;
if (idesc->bInterfaceClass != 0xFF)
return -ENODEV;
if (idesc->bInterfaceSubClass != 0x00)
return -ENODEV;
if ((ov = kzalloc(sizeof(*ov), GFP_KERNEL)) == NULL) {
err("couldn't kmalloc ov struct");
goto error_out;
}
ov->dev = dev;
ov->iface = idesc->bInterfaceNumber;
ov->led_policy = led;
ov->compress = compress;
ov->lightfreq = lightfreq;
ov->num_inputs = 1; /* Video decoder init functs. change this */
ov->stop_during_set = !fastset;
ov->backlight = backlight;
ov->mirror = mirror;
ov->auto_brt = autobright;
ov->auto_gain = autogain;
ov->auto_exp = autoexp;
switch (le16_to_cpu(dev->descriptor.idProduct)) {
case PROD_OV511:
ov->bridge = BRG_OV511;
ov->bclass = BCL_OV511;
break;
case PROD_OV511PLUS:
ov->bridge = BRG_OV511PLUS;
ov->bclass = BCL_OV511;
break;
case PROD_OV518:
ov->bridge = BRG_OV518;
ov->bclass = BCL_OV518;
break;
case PROD_OV518PLUS:
ov->bridge = BRG_OV518PLUS;
ov->bclass = BCL_OV518;
break;
case PROD_ME2CAM:
if (le16_to_cpu(dev->descriptor.idVendor) != VEND_MATTEL)
goto error;
ov->bridge = BRG_OV511PLUS;
ov->bclass = BCL_OV511;
break;
default:
err("Unknown product ID 0x%04x", le16_to_cpu(dev->descriptor.idProduct));
goto error;
}
dev_info(&intf->dev, "USB %s video device found\n",
symbolic(brglist, ov->bridge));
init_waitqueue_head(&ov->wq);
mutex_init(&ov->lock); /* to 1 == available */
mutex_init(&ov->buf_lock);
mutex_init(&ov->i2c_lock);
mutex_init(&ov->cbuf_lock);
ov->buf_state = BUF_NOT_ALLOCATED;
if (usb_make_path(dev, ov->usb_path, OV511_USB_PATH_LEN) < 0) {
err("usb_make_path error");
goto error;
}
/* Allocate control transfer buffer. */
/* Must be kmalloc()'ed, for DMA compatibility */
ov->cbuf = kmalloc(OV511_CBUF_SIZE, GFP_KERNEL);
if (!ov->cbuf)
goto error;
if (ov->bclass == BCL_OV518) {
if (ov518_configure(ov) < 0)
goto error;
} else {
if (ov511_configure(ov) < 0)
goto error;
}
for (i = 0; i < OV511_NUMFRAMES; i++) {
ov->frame[i].framenum = i;
init_waitqueue_head(&ov->frame[i].wq);
}
for (i = 0; i < OV511_NUMSBUF; i++) {
ov->sbuf[i].ov = ov;
spin_lock_init(&ov->sbuf[i].lock);
ov->sbuf[i].n = i;
}
/* Unnecessary? (This is done on open(). Need to make sure variables
* are properly initialized without this before removing it, though). */
if (ov51x_set_default_params(ov) < 0)
goto error;
#ifdef OV511_DEBUG
if (dump_bridge) {
if (ov->bclass == BCL_OV511)
ov511_dump_regs(ov);
else
ov518_dump_regs(ov);
}
#endif
ov->vdev = video_device_alloc();
if (!ov->vdev)
goto error;
memcpy(ov->vdev, &vdev_template, sizeof(*ov->vdev));
ov->vdev->parent = &intf->dev;
video_set_drvdata(ov->vdev, ov);
mutex_lock(&ov->lock);
/* Check to see next free device and mark as used */
nr = find_first_zero_bit(&ov511_devused, OV511_MAX_UNIT_VIDEO);
/* Registers device */
if (unit_video[nr] != 0)
rc = video_register_device(ov->vdev, VFL_TYPE_GRABBER,
unit_video[nr]);
else
rc = video_register_device(ov->vdev, VFL_TYPE_GRABBER, -1);
if (rc < 0) {
err("video_register_device failed");
mutex_unlock(&ov->lock);
goto error;
}
/* Mark device as used */
ov511_devused |= 1 << nr;
ov->nr = nr;
dev_info(&intf->dev, "Device at %s registered to %s\n",
ov->usb_path, video_device_node_name(ov->vdev));
usb_set_intfdata(intf, ov);
if (ov_create_sysfs(ov->vdev)) {
err("ov_create_sysfs failed");
ov511_devused &= ~(1 << nr);
mutex_unlock(&ov->lock);
goto error;
}
mutex_unlock(&ov->lock);
return 0;
error:
if (ov->vdev) {
if (!video_is_registered(ov->vdev))
video_device_release(ov->vdev);
else
video_unregister_device(ov->vdev);
ov->vdev = NULL;
}
if (ov->cbuf) {
mutex_lock(&ov->cbuf_lock);
kfree(ov->cbuf);
ov->cbuf = NULL;
mutex_unlock(&ov->cbuf_lock);
}
kfree(ov);
ov = NULL;
error_out:
err("Camera initialization failed");
return -EIO;
}
static void
ov51x_disconnect(struct usb_interface *intf)
{
struct usb_ov511 *ov = usb_get_intfdata(intf);
int n;
PDEBUG(3, "");
mutex_lock(&ov->lock);
usb_set_intfdata (intf, NULL);
/* Free device number */
ov511_devused &= ~(1 << ov->nr);
if (ov->vdev)
video_unregister_device(ov->vdev);
for (n = 0; n < OV511_NUMFRAMES; n++)
ov->frame[n].grabstate = FRAME_ERROR;
ov->curframe = -1;
/* This will cause the process to request another frame */
for (n = 0; n < OV511_NUMFRAMES; n++)
wake_up_interruptible(&ov->frame[n].wq);
wake_up_interruptible(&ov->wq);
ov->streaming = 0;
ov51x_unlink_isoc(ov);
mutex_unlock(&ov->lock);
ov->dev = NULL;
/* Free the memory */
if (!ov->user) {
mutex_lock(&ov->cbuf_lock);
kfree(ov->cbuf);
ov->cbuf = NULL;
mutex_unlock(&ov->cbuf_lock);
ov51x_dealloc(ov);
kfree(ov);
ov = NULL;
}
PDEBUG(3, "Disconnect complete");
}
static struct usb_driver ov511_driver = {
.name = "ov511",
.id_table = device_table,
.probe = ov51x_probe,
.disconnect = ov51x_disconnect
};
/****************************************************************************
*
* Module routines
*
***************************************************************************/
static int __init
usb_ov511_init(void)
{
int retval;
retval = usb_register(&ov511_driver);
if (retval)
goto out;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
out:
return retval;
}
static void __exit
usb_ov511_exit(void)
{
usb_deregister(&ov511_driver);
printk(KERN_INFO KBUILD_MODNAME ": driver deregistered\n");
}
module_init(usb_ov511_init);
module_exit(usb_ov511_exit);
|
gpl-2.0
|
bensonhsu2013/diff_variant_i8160
|
drivers/net/wireless/iwlwifi/iwl-4965.c
|
764
|
67014
|
/******************************************************************************
*
* Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/wireless.h>
#include <net/mac80211.h>
#include <linux/etherdevice.h>
#include <asm/unaligned.h>
#include "iwl-eeprom.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
#include "iwl-helpers.h"
#include "iwl-calib.h"
#include "iwl-sta.h"
#include "iwl-agn-led.h"
#include "iwl-agn.h"
#include "iwl-agn-debugfs.h"
static int iwl4965_send_tx_power(struct iwl_priv *priv);
static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
/* Highest firmware API version supported */
#define IWL4965_UCODE_API_MAX 2
/* Lowest firmware API version supported */
#define IWL4965_UCODE_API_MIN 2
#define IWL4965_FW_PRE "iwlwifi-4965-"
#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
/* check contents of special bootstrap uCode SRAM */
static int iwl4965_verify_bsm(struct iwl_priv *priv)
{
__le32 *image = priv->ucode_boot.v_addr;
u32 len = priv->ucode_boot.len;
u32 reg;
u32 val;
IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
/* verify BSM SRAM contents */
val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
for (reg = BSM_SRAM_LOWER_BOUND;
reg < BSM_SRAM_LOWER_BOUND + len;
reg += sizeof(u32), image++) {
val = iwl_read_prph(priv, reg);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "BSM uCode verification failed at "
"addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
BSM_SRAM_LOWER_BOUND,
reg - BSM_SRAM_LOWER_BOUND, len,
val, le32_to_cpu(*image));
return -EIO;
}
}
IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
return 0;
}
/**
* iwl4965_load_bsm - Load bootstrap instructions
*
* BSM operation:
*
* The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
* in special SRAM that does not power down during RFKILL. When powering back
* up after power-saving sleeps (or during initial uCode load), the BSM loads
* the bootstrap program into the on-board processor, and starts it.
*
* The bootstrap program loads (via DMA) instructions and data for a new
* program from host DRAM locations indicated by the host driver in the
* BSM_DRAM_* registers. Once the new program is loaded, it starts
* automatically.
*
* When initializing the NIC, the host driver points the BSM to the
* "initialize" uCode image. This uCode sets up some internal data, then
* notifies host via "initialize alive" that it is complete.
*
* The host then replaces the BSM_DRAM_* pointer values to point to the
* normal runtime uCode instructions and a backup uCode data cache buffer
* (filled initially with starting data values for the on-board processor),
* then triggers the "initialize" uCode to load and launch the runtime uCode,
* which begins normal operation.
*
* When doing a power-save shutdown, runtime uCode saves data SRAM into
* the backup data cache in DRAM before SRAM is powered down.
*
* When powering back up, the BSM loads the bootstrap program. This reloads
* the runtime uCode instructions and the backup data cache into SRAM,
* and re-launches the runtime uCode from where it left off.
*/
static int iwl4965_load_bsm(struct iwl_priv *priv)
{
__le32 *image = priv->ucode_boot.v_addr;
u32 len = priv->ucode_boot.len;
dma_addr_t pinst;
dma_addr_t pdata;
u32 inst_len;
u32 data_len;
int i;
u32 done;
u32 reg_offset;
int ret;
IWL_DEBUG_INFO(priv, "Begin load bsm\n");
priv->ucode_type = UCODE_RT;
/* make sure bootstrap program is no larger than BSM's SRAM size */
if (len > IWL49_MAX_BSM_SIZE)
return -EINVAL;
/* Tell bootstrap uCode where to find the "Initialize" uCode
* in host DRAM ... host DRAM physical address bits 35:4 for 4965.
* NOTE: iwl_init_alive_start() will replace these values,
* after the "initialize" uCode has run, to point to
* runtime/protocol instructions and backup data cache.
*/
pinst = priv->ucode_init.p_addr >> 4;
pdata = priv->ucode_init_data.p_addr >> 4;
inst_len = priv->ucode_init.len;
data_len = priv->ucode_init_data.len;
iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
/* Fill BSM memory with bootstrap instructions */
for (reg_offset = BSM_SRAM_LOWER_BOUND;
reg_offset < BSM_SRAM_LOWER_BOUND + len;
reg_offset += sizeof(u32), image++)
_iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
ret = iwl4965_verify_bsm(priv);
if (ret)
return ret;
/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
iwl_write_prph(priv, BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
/* Load bootstrap code into instruction SRAM now,
* to prepare to load "initialize" uCode */
iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
/* Wait for load of bootstrap uCode to finish */
for (i = 0; i < 100; i++) {
done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
if (!(done & BSM_WR_CTRL_REG_BIT_START))
break;
udelay(10);
}
if (i < 100)
IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
else {
IWL_ERR(priv, "BSM write did not complete!\n");
return -EIO;
}
/* Enable future boot loads whenever power management unit triggers it
* (e.g. when powering back up after power-save shutdown) */
iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
return 0;
}
/**
* iwl4965_set_ucode_ptrs - Set uCode address location
*
* Tell initialization uCode where to find runtime uCode.
*
* BSM registers initially contain pointers to initialization uCode.
* We need to replace them to load runtime uCode inst and data,
* and to save runtime data when powering down.
*/
static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
{
dma_addr_t pinst;
dma_addr_t pdata;
int ret = 0;
/* bits 35:4 for 4965 */
pinst = priv->ucode_code.p_addr >> 4;
pdata = priv->ucode_data_backup.p_addr >> 4;
/* Tell bootstrap uCode where to find image to load */
iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
priv->ucode_data.len);
/* Inst byte count must be last to set up, bit 31 signals uCode
* that all new ptr/size info is in place */
iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
priv->ucode_code.len | BSM_DRAM_INST_LOAD);
IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
return ret;
}
/**
* iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
*
* Called after REPLY_ALIVE notification received from "initialize" uCode.
*
* The 4965 "initialize" ALIVE reply contains calibration data for:
* Voltage, temperature, and MIMO tx gain correction, now stored in priv
* (3945 does not contain this data).
*
* Tell "initialize" uCode to go ahead and load the runtime uCode.
*/
static void iwl4965_init_alive_start(struct iwl_priv *priv)
{
/* Check alive response for "valid" sign from uCode */
if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
/* We had an error bringing up the hardware, so take it
* all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
goto restart;
}
/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
* This is a paranoid check, because we would not have gotten the
* "initialize" alive if code weren't properly loaded. */
if (iwl_verify_ucode(priv)) {
/* Runtime instruction load was bad;
* take it all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
goto restart;
}
/* Calculate temperature */
priv->temperature = iwl4965_hw_get_temperature(priv);
/* Send pointers to protocol/runtime uCode image ... init code will
* load and launch runtime uCode, which will send us another "Alive"
* notification. */
IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
if (iwl4965_set_ucode_ptrs(priv)) {
/* Runtime instruction load won't happen;
* take it all the way back down so we can try again */
IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
goto restart;
}
return;
restart:
queue_work(priv->workqueue, &priv->restart);
}
static bool is_ht40_channel(__le32 rxon_flags)
{
int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
return ((chan_mod == CHANNEL_MODE_PURE_40) ||
(chan_mod == CHANNEL_MODE_MIXED));
}
/*
* EEPROM handlers
*/
static u16 iwl4965_eeprom_calib_version(struct iwl_priv *priv)
{
return iwl_eeprom_query16(priv, EEPROM_4965_CALIB_VERSION_OFFSET);
}
/*
* Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
* must be called under priv->lock and mac access
*/
static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
{
iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
}
static void iwl4965_nic_config(struct iwl_priv *priv)
{
unsigned long flags;
u16 radio_cfg;
spin_lock_irqsave(&priv->lock, flags);
radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
EEPROM_RF_CFG_DASH_MSK(radio_cfg));
/* set CSR_HW_CONFIG_REG for uCode use */
iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
priv->calib_info = (struct iwl_eeprom_calib_info *)
iwl_eeprom_query_addr(priv, EEPROM_4965_CALIB_TXPOWER_OFFSET);
spin_unlock_irqrestore(&priv->lock, flags);
}
/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
* Called after every association, but this runs only once!
* ... once chain noise is calibrated the first time, it's good forever. */
static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
{
struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
struct iwl_calib_diff_gain_cmd cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = 0;
cmd.diff_gain_b = 0;
cmd.diff_gain_c = 0;
if (iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
sizeof(cmd), &cmd))
IWL_ERR(priv,
"Could not send REPLY_PHY_CALIBRATION_CMD\n");
data->state = IWL_CHAIN_NOISE_ACCUMULATE;
IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
}
}
static void iwl4965_gain_computation(struct iwl_priv *priv,
u32 *average_noise,
u16 min_average_noise_antenna_i,
u32 min_average_noise,
u8 default_chain)
{
int i, ret;
struct iwl_chain_noise_data *data = &priv->chain_noise_data;
data->delta_gain_code[min_average_noise_antenna_i] = 0;
for (i = default_chain; i < NUM_RX_CHAINS; i++) {
s32 delta_g = 0;
if (!(data->disconn_array[i]) &&
(data->delta_gain_code[i] ==
CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
delta_g = average_noise[i] - min_average_noise;
data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
data->delta_gain_code[i] =
min(data->delta_gain_code[i],
(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
data->delta_gain_code[i] =
(data->delta_gain_code[i] | (1 << 2));
} else {
data->delta_gain_code[i] = 0;
}
}
IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
data->delta_gain_code[0],
data->delta_gain_code[1],
data->delta_gain_code[2]);
/* Differential gain gets sent to uCode only once */
if (!data->radio_write) {
struct iwl_calib_diff_gain_cmd cmd;
data->radio_write = 1;
memset(&cmd, 0, sizeof(cmd));
cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = data->delta_gain_code[0];
cmd.diff_gain_b = data->delta_gain_code[1];
cmd.diff_gain_c = data->delta_gain_code[2];
ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
sizeof(cmd), &cmd);
if (ret)
IWL_DEBUG_CALIB(priv, "fail sending cmd "
"REPLY_PHY_CALIBRATION_CMD\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
/* Mark so we run this algo only once! */
data->state = IWL_CHAIN_NOISE_CALIBRATED;
}
data->chain_noise_a = 0;
data->chain_noise_b = 0;
data->chain_noise_c = 0;
data->chain_signal_a = 0;
data->chain_signal_b = 0;
data->chain_signal_c = 0;
data->beacon_count = 0;
}
static void iwl4965_bg_txpower_work(struct work_struct *work)
{
struct iwl_priv *priv = container_of(work, struct iwl_priv,
txpower_work);
/* If a scan happened to start before we got here
* then just return; the statistics notification will
* kick off another scheduled work to compensate for
* any temperature delta we missed here. */
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status))
return;
mutex_lock(&priv->mutex);
/* Regardless of if we are associated, we must reconfigure the
* TX power since frames can be sent on non-radar channels while
* not associated */
iwl4965_send_tx_power(priv);
/* Update last_temperature to keep is_calib_needed from running
* when it isn't needed... */
priv->last_temperature = priv->temperature;
mutex_unlock(&priv->mutex);
}
/*
* Acquire priv->lock before calling this function !
*/
static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
{
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
}
/**
* iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
* @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
* @scd_retry: (1) Indicates queue will be used in aggregation mode
*
* NOTE: Acquire priv->lock before calling this function !
*/
static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry)
{
int txq_id = txq->q.id;
/* Find out whether to activate Tx queue */
int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
/* Set up and activate */
iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
(active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
(tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
(scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
(scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
IWL49_SCD_QUEUE_STTS_REG_MSK);
txq->sched_retry = scd_retry;
IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
}
static const s8 default_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWL49_CMD_FIFO_NUM,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_UNUSED,
};
static int iwl4965_alive_notify(struct iwl_priv *priv)
{
u32 a;
unsigned long flags;
int i, chan;
u32 reg_val;
spin_lock_irqsave(&priv->lock, flags);
/* Clear 4965's internal Tx Scheduler data base */
priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
iwl_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
iwl_write_targ_mem(priv, a, 0);
for (; a < priv->scd_base_addr +
IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
iwl_write_targ_mem(priv, a, 0);
/* Tel 4965 where to find Tx byte count tables */
iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
priv->scd_bc_tbls.dma >> 10);
/* Enable DMA channel */
for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
/* Update FH chicken bits */
reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Disable chain mode for all queues */
iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
/* Initialize each Tx queue (including the command queue) */
for (i = 0; i < priv->hw_params.max_txq_num; i++) {
/* TFD circular buffer read/write indexes */
iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
/* Max Tx Window size for Scheduler-ACK mode */
iwl_write_targ_mem(priv, priv->scd_base_addr +
IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
(SCD_WIN_SIZE <<
IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
/* Frame limit */
iwl_write_targ_mem(priv, priv->scd_base_addr +
IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
(SCD_FRAME_LIMIT <<
IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
}
iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
(1 << priv->hw_params.max_txq_num) - 1);
/* Activate all Tx DMA/FIFO channels */
priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 6));
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
/* make sure all queue are not stopped */
memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
for (i = 0; i < 4; i++)
atomic_set(&priv->queue_stop_count[i], 0);
/* reset to 0 to enable all the queue first */
priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
iwl_txq_ctx_activate(priv, i);
if (ac == IWL_TX_FIFO_UNUSED)
continue;
iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
}
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
.min_nrg_cck = 97,
.max_nrg_cck = 0, /* not used, set to 0 */
.auto_corr_min_ofdm = 85,
.auto_corr_min_ofdm_mrc = 170,
.auto_corr_min_ofdm_x1 = 105,
.auto_corr_min_ofdm_mrc_x1 = 220,
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
.auto_corr_max_ofdm_x1 = 140,
.auto_corr_max_ofdm_mrc_x1 = 270,
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
.auto_corr_min_cck_mrc = 200,
.auto_corr_max_cck_mrc = 400,
.nrg_th_cck = 100,
.nrg_th_ofdm = 100,
.barker_corr_th_min = 190,
.barker_corr_th_min_mrc = 390,
.nrg_th_cca = 62,
};
static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
{
/* want Kelvin */
priv->hw_params.ct_kill_threshold =
CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
}
/**
* iwl4965_hw_set_hw_params
*
* Called when initializing driver
*/
static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
{
if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
priv->cfg->num_of_queues =
priv->cfg->mod_params->num_of_queues;
priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
priv->hw_params.scd_bc_tbls_size =
priv->cfg->num_of_queues *
sizeof(struct iwl4965_scd_bc_tbl);
priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
priv->hw_params.max_stations = IWL4965_STATION_COUNT;
priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
priv->hw_params.sens = &iwl4965_sensitivity;
return 0;
}
static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
{
s32 sign = 1;
if (num < 0) {
sign = -sign;
num = -num;
}
if (denom < 0) {
sign = -sign;
denom = -denom;
}
*res = 1;
*res = ((num * 2 + denom) / (denom * 2)) * sign;
return 1;
}
/**
* iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
*
* Determines power supply voltage compensation for txpower calculations.
* Returns number of 1/2-dB steps to subtract from gain table index,
* to compensate for difference between power supply voltage during
* factory measurements, vs. current power supply voltage.
*
* Voltage indication is higher for lower voltage.
* Lower voltage requires more gain (lower gain table index).
*/
static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
s32 current_voltage)
{
s32 comp = 0;
if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
(TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
return 0;
iwl4965_math_div_round(current_voltage - eeprom_voltage,
TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
if (current_voltage > eeprom_voltage)
comp *= 2;
if ((comp < -2) || (comp > 2))
comp = 0;
return comp;
}
static s32 iwl4965_get_tx_atten_grp(u16 channel)
{
if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
return CALIB_CH_GROUP_5;
if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
return CALIB_CH_GROUP_1;
if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
return CALIB_CH_GROUP_2;
if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
return CALIB_CH_GROUP_3;
if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
return CALIB_CH_GROUP_4;
return -1;
}
static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
{
s32 b = -1;
for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
if (priv->calib_info->band_info[b].ch_from == 0)
continue;
if ((channel >= priv->calib_info->band_info[b].ch_from)
&& (channel <= priv->calib_info->band_info[b].ch_to))
break;
}
return b;
}
static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
{
s32 val;
if (x2 == x1)
return y1;
else {
iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
return val + y2;
}
}
/**
* iwl4965_interpolate_chan - Interpolate factory measurements for one channel
*
* Interpolates factory measurements from the two sample channels within a
* sub-band, to apply to channel of interest. Interpolation is proportional to
* differences in channel frequencies, which is proportional to differences
* in channel number.
*/
static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
struct iwl_eeprom_calib_ch_info *chan_info)
{
s32 s = -1;
u32 c;
u32 m;
const struct iwl_eeprom_calib_measure *m1;
const struct iwl_eeprom_calib_measure *m2;
struct iwl_eeprom_calib_measure *omeas;
u32 ch_i1;
u32 ch_i2;
s = iwl4965_get_sub_band(priv, channel);
if (s >= EEPROM_TX_POWER_BANDS) {
IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
return -1;
}
ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
chan_info->ch_num = (u8) channel;
IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
channel, s, ch_i1, ch_i2);
for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
m1 = &(priv->calib_info->band_info[s].ch1.
measurements[c][m]);
m2 = &(priv->calib_info->band_info[s].ch2.
measurements[c][m]);
omeas = &(chan_info->measurements[c][m]);
omeas->actual_pow =
(u8) iwl4965_interpolate_value(channel, ch_i1,
m1->actual_pow,
ch_i2,
m2->actual_pow);
omeas->gain_idx =
(u8) iwl4965_interpolate_value(channel, ch_i1,
m1->gain_idx, ch_i2,
m2->gain_idx);
omeas->temperature =
(u8) iwl4965_interpolate_value(channel, ch_i1,
m1->temperature,
ch_i2,
m2->temperature);
omeas->pa_det =
(s8) iwl4965_interpolate_value(channel, ch_i1,
m1->pa_det, ch_i2,
m2->pa_det);
IWL_DEBUG_TXPOWER(priv,
"chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
m1->actual_pow, m2->actual_pow, omeas->actual_pow);
IWL_DEBUG_TXPOWER(priv,
"chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
m1->gain_idx, m2->gain_idx, omeas->gain_idx);
IWL_DEBUG_TXPOWER(priv,
"chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
m1->pa_det, m2->pa_det, omeas->pa_det);
IWL_DEBUG_TXPOWER(priv,
"chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
m1->temperature, m2->temperature,
omeas->temperature);
}
}
return 0;
}
/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
* for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
static s32 back_off_table[] = {
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
10 /* CCK */
};
/* Thermal compensation values for txpower for various frequency ranges ...
* ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
static struct iwl4965_txpower_comp_entry {
s32 degrees_per_05db_a;
s32 degrees_per_05db_a_denom;
} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
{9, 2}, /* group 0 5.2, ch 34-43 */
{4, 1}, /* group 1 5.2, ch 44-70 */
{4, 1}, /* group 2 5.2, ch 71-124 */
{4, 1}, /* group 3 5.2, ch 125-200 */
{3, 1} /* group 4 2.4, ch all */
};
static s32 get_min_power_index(s32 rate_power_index, u32 band)
{
if (!band) {
if ((rate_power_index & 7) <= 4)
return MIN_TX_GAIN_INDEX_52GHZ_EXT;
}
return MIN_TX_GAIN_INDEX;
}
struct gain_entry {
u8 dsp;
u8 radio;
};
static const struct gain_entry gain_table[2][108] = {
/* 5.2GHz power gain index table */
{
{123, 0x3F}, /* highest txpower */
{117, 0x3F},
{110, 0x3F},
{104, 0x3F},
{98, 0x3F},
{110, 0x3E},
{104, 0x3E},
{98, 0x3E},
{110, 0x3D},
{104, 0x3D},
{98, 0x3D},
{110, 0x3C},
{104, 0x3C},
{98, 0x3C},
{110, 0x3B},
{104, 0x3B},
{98, 0x3B},
{110, 0x3A},
{104, 0x3A},
{98, 0x3A},
{110, 0x39},
{104, 0x39},
{98, 0x39},
{110, 0x38},
{104, 0x38},
{98, 0x38},
{110, 0x37},
{104, 0x37},
{98, 0x37},
{110, 0x36},
{104, 0x36},
{98, 0x36},
{110, 0x35},
{104, 0x35},
{98, 0x35},
{110, 0x34},
{104, 0x34},
{98, 0x34},
{110, 0x33},
{104, 0x33},
{98, 0x33},
{110, 0x32},
{104, 0x32},
{98, 0x32},
{110, 0x31},
{104, 0x31},
{98, 0x31},
{110, 0x30},
{104, 0x30},
{98, 0x30},
{110, 0x25},
{104, 0x25},
{98, 0x25},
{110, 0x24},
{104, 0x24},
{98, 0x24},
{110, 0x23},
{104, 0x23},
{98, 0x23},
{110, 0x22},
{104, 0x18},
{98, 0x18},
{110, 0x17},
{104, 0x17},
{98, 0x17},
{110, 0x16},
{104, 0x16},
{98, 0x16},
{110, 0x15},
{104, 0x15},
{98, 0x15},
{110, 0x14},
{104, 0x14},
{98, 0x14},
{110, 0x13},
{104, 0x13},
{98, 0x13},
{110, 0x12},
{104, 0x08},
{98, 0x08},
{110, 0x07},
{104, 0x07},
{98, 0x07},
{110, 0x06},
{104, 0x06},
{98, 0x06},
{110, 0x05},
{104, 0x05},
{98, 0x05},
{110, 0x04},
{104, 0x04},
{98, 0x04},
{110, 0x03},
{104, 0x03},
{98, 0x03},
{110, 0x02},
{104, 0x02},
{98, 0x02},
{110, 0x01},
{104, 0x01},
{98, 0x01},
{110, 0x00},
{104, 0x00},
{98, 0x00},
{93, 0x00},
{88, 0x00},
{83, 0x00},
{78, 0x00},
},
/* 2.4GHz power gain index table */
{
{110, 0x3f}, /* highest txpower */
{104, 0x3f},
{98, 0x3f},
{110, 0x3e},
{104, 0x3e},
{98, 0x3e},
{110, 0x3d},
{104, 0x3d},
{98, 0x3d},
{110, 0x3c},
{104, 0x3c},
{98, 0x3c},
{110, 0x3b},
{104, 0x3b},
{98, 0x3b},
{110, 0x3a},
{104, 0x3a},
{98, 0x3a},
{110, 0x39},
{104, 0x39},
{98, 0x39},
{110, 0x38},
{104, 0x38},
{98, 0x38},
{110, 0x37},
{104, 0x37},
{98, 0x37},
{110, 0x36},
{104, 0x36},
{98, 0x36},
{110, 0x35},
{104, 0x35},
{98, 0x35},
{110, 0x34},
{104, 0x34},
{98, 0x34},
{110, 0x33},
{104, 0x33},
{98, 0x33},
{110, 0x32},
{104, 0x32},
{98, 0x32},
{110, 0x31},
{104, 0x31},
{98, 0x31},
{110, 0x30},
{104, 0x30},
{98, 0x30},
{110, 0x6},
{104, 0x6},
{98, 0x6},
{110, 0x5},
{104, 0x5},
{98, 0x5},
{110, 0x4},
{104, 0x4},
{98, 0x4},
{110, 0x3},
{104, 0x3},
{98, 0x3},
{110, 0x2},
{104, 0x2},
{98, 0x2},
{110, 0x1},
{104, 0x1},
{98, 0x1},
{110, 0x0},
{104, 0x0},
{98, 0x0},
{97, 0},
{96, 0},
{95, 0},
{94, 0},
{93, 0},
{92, 0},
{91, 0},
{90, 0},
{89, 0},
{88, 0},
{87, 0},
{86, 0},
{85, 0},
{84, 0},
{83, 0},
{82, 0},
{81, 0},
{80, 0},
{79, 0},
{78, 0},
{77, 0},
{76, 0},
{75, 0},
{74, 0},
{73, 0},
{72, 0},
{71, 0},
{70, 0},
{69, 0},
{68, 0},
{67, 0},
{66, 0},
{65, 0},
{64, 0},
{63, 0},
{62, 0},
{61, 0},
{60, 0},
{59, 0},
}
};
static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
u8 is_ht40, u8 ctrl_chan_high,
struct iwl4965_tx_power_db *tx_power_tbl)
{
u8 saturation_power;
s32 target_power;
s32 user_target_power;
s32 power_limit;
s32 current_temp;
s32 reg_limit;
s32 current_regulatory;
s32 txatten_grp = CALIB_CH_GROUP_MAX;
int i;
int c;
const struct iwl_channel_info *ch_info = NULL;
struct iwl_eeprom_calib_ch_info ch_eeprom_info;
const struct iwl_eeprom_calib_measure *measurement;
s16 voltage;
s32 init_voltage;
s32 voltage_compensation;
s32 degrees_per_05db_num;
s32 degrees_per_05db_denom;
s32 factory_temp;
s32 temperature_comp[2];
s32 factory_gain_index[2];
s32 factory_actual_pwr[2];
s32 power_index;
/* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
* are used for indexing into txpower table) */
user_target_power = 2 * priv->tx_power_user_lmt;
/* Get current (RXON) channel, band, width */
IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
is_ht40);
ch_info = iwl_get_channel_info(priv, priv->band, channel);
if (!is_channel_valid(ch_info))
return -EINVAL;
/* get txatten group, used to select 1) thermal txpower adjustment
* and 2) mimo txpower balance between Tx chains. */
txatten_grp = iwl4965_get_tx_atten_grp(channel);
if (txatten_grp < 0) {
IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
channel);
return -EINVAL;
}
IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
channel, txatten_grp);
if (is_ht40) {
if (ctrl_chan_high)
channel -= 2;
else
channel += 2;
}
/* hardware txpower limits ...
* saturation (clipping distortion) txpowers are in half-dBm */
if (band)
saturation_power = priv->calib_info->saturation_power24;
else
saturation_power = priv->calib_info->saturation_power52;
if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
saturation_power > IWL_TX_POWER_SATURATION_MAX) {
if (band)
saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
else
saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
}
/* regulatory txpower limits ... reg_limit values are in half-dBm,
* max_power_avg values are in dBm, convert * 2 */
if (is_ht40)
reg_limit = ch_info->ht40_max_power_avg * 2;
else
reg_limit = ch_info->max_power_avg * 2;
if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
(reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
if (band)
reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
else
reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
}
/* Interpolate txpower calibration values for this channel,
* based on factory calibration tests on spaced channels. */
iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
/* calculate tx gain adjustment based on power supply voltage */
voltage = le16_to_cpu(priv->calib_info->voltage);
init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
voltage_compensation =
iwl4965_get_voltage_compensation(voltage, init_voltage);
IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
init_voltage,
voltage, voltage_compensation);
/* get current temperature (Celsius) */
current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
current_temp = KELVIN_TO_CELSIUS(current_temp);
/* select thermal txpower adjustment params, based on channel group
* (same frequency group used for mimo txatten adjustment) */
degrees_per_05db_num =
tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
degrees_per_05db_denom =
tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
/* get per-chain txpower values from factory measurements */
for (c = 0; c < 2; c++) {
measurement = &ch_eeprom_info.measurements[c][1];
/* txgain adjustment (in half-dB steps) based on difference
* between factory and current temperature */
factory_temp = measurement->temperature;
iwl4965_math_div_round((current_temp - factory_temp) *
degrees_per_05db_denom,
degrees_per_05db_num,
&temperature_comp[c]);
factory_gain_index[c] = measurement->gain_idx;
factory_actual_pwr[c] = measurement->actual_pow;
IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
"curr tmp %d, comp %d steps\n",
factory_temp, current_temp,
temperature_comp[c]);
IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
factory_gain_index[c],
factory_actual_pwr[c]);
}
/* for each of 33 bit-rates (including 1 for CCK) */
for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
u8 is_mimo_rate;
union iwl4965_tx_power_dual_stream tx_power;
/* for mimo, reduce each chain's txpower by half
* (3dB, 6 steps), so total output power is regulatory
* compliant. */
if (i & 0x8) {
current_regulatory = reg_limit -
IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
is_mimo_rate = 1;
} else {
current_regulatory = reg_limit;
is_mimo_rate = 0;
}
/* find txpower limit, either hardware or regulatory */
power_limit = saturation_power - back_off_table[i];
if (power_limit > current_regulatory)
power_limit = current_regulatory;
/* reduce user's txpower request if necessary
* for this rate on this channel */
target_power = user_target_power;
if (target_power > power_limit)
target_power = power_limit;
IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
i, saturation_power - back_off_table[i],
current_regulatory, user_target_power,
target_power);
/* for each of 2 Tx chains (radio transmitters) */
for (c = 0; c < 2; c++) {
s32 atten_value;
if (is_mimo_rate)
atten_value =
(s32)le32_to_cpu(priv->card_alive_init.
tx_atten[txatten_grp][c]);
else
atten_value = 0;
/* calculate index; higher index means lower txpower */
power_index = (u8) (factory_gain_index[c] -
(target_power -
factory_actual_pwr[c]) -
temperature_comp[c] -
voltage_compensation +
atten_value);
/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
power_index); */
if (power_index < get_min_power_index(i, band))
power_index = get_min_power_index(i, band);
/* adjust 5 GHz index to support negative indexes */
if (!band)
power_index += 9;
/* CCK, rate 32, reduce txpower for CCK */
if (i == POWER_TABLE_CCK_ENTRY)
power_index +=
IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
/* stay within the table! */
if (power_index > 107) {
IWL_WARN(priv, "txpower index %d > 107\n",
power_index);
power_index = 107;
}
if (power_index < 0) {
IWL_WARN(priv, "txpower index %d < 0\n",
power_index);
power_index = 0;
}
/* fill txpower command for this rate/chain */
tx_power.s.radio_tx_gain[c] =
gain_table[band][power_index].radio;
tx_power.s.dsp_predis_atten[c] =
gain_table[band][power_index].dsp;
IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
"gain 0x%02x dsp %d\n",
c, atten_value, power_index,
tx_power.s.radio_tx_gain[c],
tx_power.s.dsp_predis_atten[c]);
} /* for each chain */
tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
} /* for each rate */
return 0;
}
/**
* iwl4965_send_tx_power - Configure the TXPOWER level user limit
*
* Uses the active RXON for channel, band, and characteristics (ht40, high)
* The power limit is taken from priv->tx_power_user_lmt.
*/
static int iwl4965_send_tx_power(struct iwl_priv *priv)
{
struct iwl4965_txpowertable_cmd cmd = { 0 };
int ret;
u8 band = 0;
bool is_ht40 = false;
u8 ctrl_chan_high = 0;
if (test_bit(STATUS_SCANNING, &priv->status)) {
/* If this gets hit a lot, switch it to a BUG() and catch
* the stack trace to find out who is calling this during
* a scan. */
IWL_WARN(priv, "TX Power requested while scanning!\n");
return -EAGAIN;
}
band = priv->band == IEEE80211_BAND_2GHZ;
is_ht40 = is_ht40_channel(priv->active_rxon.flags);
if (is_ht40 &&
(priv->active_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
cmd.band = band;
cmd.channel = priv->active_rxon.channel;
ret = iwl4965_fill_txpower_tbl(priv, band,
le16_to_cpu(priv->active_rxon.channel),
is_ht40, ctrl_chan_high, &cmd.tx_power);
if (ret)
goto out;
ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
out:
return ret;
}
static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
{
int ret = 0;
struct iwl4965_rxon_assoc_cmd rxon_assoc;
const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
if ((rxon1->flags == rxon2->flags) &&
(rxon1->filter_flags == rxon2->filter_flags) &&
(rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
(rxon1->ofdm_ht_single_stream_basic_rates ==
rxon2->ofdm_ht_single_stream_basic_rates) &&
(rxon1->ofdm_ht_dual_stream_basic_rates ==
rxon2->ofdm_ht_dual_stream_basic_rates) &&
(rxon1->rx_chain == rxon2->rx_chain) &&
(rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
return 0;
}
rxon_assoc.flags = priv->staging_rxon.flags;
rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
rxon_assoc.reserved = 0;
rxon_assoc.ofdm_ht_single_stream_basic_rates =
priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
rxon_assoc.ofdm_ht_dual_stream_basic_rates =
priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
sizeof(rxon_assoc), &rxon_assoc, NULL);
if (ret)
return ret;
return ret;
}
static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
{
int rc;
u8 band = 0;
bool is_ht40 = false;
u8 ctrl_chan_high = 0;
struct iwl4965_channel_switch_cmd cmd;
const struct iwl_channel_info *ch_info;
band = priv->band == IEEE80211_BAND_2GHZ;
ch_info = iwl_get_channel_info(priv, priv->band, channel);
is_ht40 = is_ht40_channel(priv->staging_rxon.flags);
if (is_ht40 &&
(priv->staging_rxon.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
ctrl_chan_high = 1;
cmd.band = band;
cmd.expect_beacon = 0;
cmd.channel = cpu_to_le16(channel);
cmd.rxon_flags = priv->staging_rxon.flags;
cmd.rxon_filter_flags = priv->staging_rxon.filter_flags;
cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
if (ch_info)
cmd.expect_beacon = is_channel_radar(ch_info);
else {
IWL_ERR(priv, "invalid channel switch from %u to %u\n",
priv->active_rxon.channel, channel);
return -EFAULT;
}
rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
ctrl_chan_high, &cmd.tx_power);
if (rc) {
IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
return rc;
}
priv->switch_rxon.channel = cpu_to_le16(channel);
priv->switch_rxon.switch_in_progress = true;
return iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
/**
* iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
struct iwl_tx_queue *txq,
u16 byte_cnt)
{
struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
int txq_id = txq->q.id;
int write_ptr = txq->q.write_ptr;
int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
__le16 bc_ent;
WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
bc_ent = cpu_to_le16(len & 0xFFF);
/* Set up byte count within first 256 entries */
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
/* If within first 64 entries, duplicate at end */
if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
/**
* sign_extend - Sign extend a value using specified bit as sign-bit
*
* Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
* and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
*
* @param oper value to sign extend
* @param index 0 based bit index (0<=index<32) to sign bit
*/
static s32 sign_extend(u32 oper, int index)
{
u8 shift = 31 - index;
return (s32)(oper << shift) >> shift;
}
/**
* iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
* @statistics: Provides the temperature reading from the uCode
*
* A return of <0 indicates bogus data in the statistics
*/
static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
{
s32 temperature;
s32 vt;
s32 R1, R2, R3;
u32 R4;
if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
(priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
} else {
IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
}
/*
* Temperature is only 23 bits, so sign extend out to 32.
*
* NOTE If we haven't received a statistics notification yet
* with an updated temperature, use R4 provided to us in the
* "initialize" ALIVE response.
*/
if (!test_bit(STATUS_TEMPERATURE, &priv->status))
vt = sign_extend(R4, 23);
else
vt = sign_extend(
le32_to_cpu(priv->statistics.general.temperature), 23);
IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
if (R3 == R1) {
IWL_ERR(priv, "Calibration conflict R1 == R3\n");
return -1;
}
/* Calculate temperature in degrees Kelvin, adjust by 97%.
* Add offset to center the adjustment around 0 degrees Centigrade. */
temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
temperature /= (R3 - R1);
temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
temperature, KELVIN_TO_CELSIUS(temperature));
return temperature;
}
/* Adjust Txpower only if temperature variance is greater than threshold. */
#define IWL_TEMPERATURE_THRESHOLD 3
/**
* iwl4965_is_temp_calib_needed - determines if new calibration is needed
*
* If the temperature changed has changed sufficiently, then a recalibration
* is needed.
*
* Assumes caller will replace priv->last_temperature once calibration
* executed.
*/
static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
{
int temp_diff;
if (!test_bit(STATUS_STATISTICS, &priv->status)) {
IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
return 0;
}
temp_diff = priv->temperature - priv->last_temperature;
/* get absolute value */
if (temp_diff < 0) {
IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
temp_diff = -temp_diff;
} else if (temp_diff == 0)
IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
else
IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
return 0;
}
IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
return 1;
}
static void iwl4965_temperature_calib(struct iwl_priv *priv)
{
s32 temp;
temp = iwl4965_hw_get_temperature(priv);
if (temp < 0)
return;
if (priv->temperature != temp) {
if (priv->temperature)
IWL_DEBUG_TEMP(priv, "Temperature changed "
"from %dC to %dC\n",
KELVIN_TO_CELSIUS(priv->temperature),
KELVIN_TO_CELSIUS(temp));
else
IWL_DEBUG_TEMP(priv, "Temperature "
"initialized to %dC\n",
KELVIN_TO_CELSIUS(temp));
}
priv->temperature = temp;
iwl_tt_handler(priv);
set_bit(STATUS_TEMPERATURE, &priv->status);
if (!priv->disable_tx_power_cal &&
unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
iwl4965_is_temp_calib_needed(priv))
queue_work(priv->workqueue, &priv->txpower_work);
}
/**
* iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
*/
static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
u16 txq_id)
{
/* Simply stop the queue, but don't change any configuration;
* the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
iwl_write_prph(priv,
IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
(0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
(1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
/**
* txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
* priv->lock must be held by the caller
*/
static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
u16 ssn_idx, u8 tx_fifo)
{
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
<= txq_id)) {
IWL_WARN(priv,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWL49_FIRST_AMPDU_QUEUE,
IWL49_FIRST_AMPDU_QUEUE +
priv->cfg->num_of_ampdu_queues - 1);
return -EINVAL;
}
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
/* supposes that ssn_idx is valid (!= 0xFFF) */
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
iwl_txq_ctx_deactivate(priv, txq_id);
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
return 0;
}
/**
* iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
*/
static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
u16 txq_id)
{
u32 tbl_dw_addr;
u32 tbl_dw;
u16 scd_q2ratid;
scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
tbl_dw_addr = priv->scd_base_addr +
IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
return 0;
}
/**
* iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
*
* NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
* i.e. it must be one of the higher queues used for aggregation
*/
static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
int tx_fifo, int sta_id, int tid, u16 ssn_idx)
{
unsigned long flags;
u16 ra_tid;
if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
(IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
<= txq_id)) {
IWL_WARN(priv,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWL49_FIRST_AMPDU_QUEUE,
IWL49_FIRST_AMPDU_QUEUE +
priv->cfg->num_of_ampdu_queues - 1);
return -EINVAL;
}
ra_tid = BUILD_RAxTID(sta_id, tid);
/* Modify device's station table to Tx this TID */
iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
spin_lock_irqsave(&priv->lock, flags);
/* Stop this Tx queue before configuring it */
iwl4965_tx_queue_stop_scheduler(priv, txq_id);
/* Map receiver-address / traffic-ID to this queue */
iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(priv,
priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
(SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
iwl_write_targ_mem(priv, priv->scd_base_addr +
IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
(SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
& IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
{
switch (cmd_id) {
case REPLY_RXON:
return (u16) sizeof(struct iwl4965_rxon_cmd);
default:
return len;
}
}
static u16 iwl4965_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
{
struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
addsta->mode = cmd->mode;
memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
addsta->station_flags = cmd->station_flags;
addsta->station_flags_msk = cmd->station_flags_msk;
addsta->tid_disable_tx = cmd->tid_disable_tx;
addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
addsta->sleep_tx_count = cmd->sleep_tx_count;
addsta->reserved1 = cpu_to_le16(0);
addsta->reserved2 = cpu_to_le16(0);
return (u16)sizeof(struct iwl4965_addsta_cmd);
}
static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
{
return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
}
/**
* iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
*/
static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
struct iwl_ht_agg *agg,
struct iwl4965_tx_resp *tx_resp,
int txq_id, u16 start_idx)
{
u16 status;
struct agg_tx_status *frame_status = tx_resp->u.agg_status;
struct ieee80211_tx_info *info = NULL;
struct ieee80211_hdr *hdr = NULL;
u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
int i, sh, idx;
u16 seq;
if (agg->wait_for_ba)
IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
agg->frame_count = tx_resp->frame_count;
agg->start_idx = start_idx;
agg->rate_n_flags = rate_n_flags;
agg->bitmap = 0;
/* num frames attempted by Tx command */
if (agg->frame_count == 1) {
/* Only one frame was attempted; no block-ack will arrive */
status = le16_to_cpu(frame_status[0].status);
idx = start_idx;
/* FIXME: code repetition */
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
agg->frame_count, agg->start_idx, idx);
info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
/* FIXME: code repetition end */
IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
status & 0xff, tx_resp->failure_frame);
IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
agg->wait_for_ba = 0;
} else {
/* Two or more frames were attempted; expect block-ack */
u64 bitmap = 0;
int start = agg->start_idx;
/* Construct bit-map of pending frames within Tx window */
for (i = 0; i < agg->frame_count; i++) {
u16 sc;
status = le16_to_cpu(frame_status[i].status);
seq = le16_to_cpu(frame_status[i].sequence);
idx = SEQ_TO_INDEX(seq);
txq_id = SEQ_TO_QUEUE(seq);
if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
AGG_TX_STATE_ABORT_MSK))
continue;
IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
agg->frame_count, txq_id, idx);
hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
if (!hdr) {
IWL_ERR(priv,
"BUG_ON idx doesn't point to valid skb"
" idx=%d, txq_id=%d\n", idx, txq_id);
return -1;
}
sc = le16_to_cpu(hdr->seq_ctrl);
if (idx != (SEQ_TO_SN(sc) & 0xff)) {
IWL_ERR(priv,
"BUG_ON idx doesn't match seq control"
" idx=%d, seq_idx=%d, seq=%d\n",
idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
return -1;
}
IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
i, idx, SEQ_TO_SN(sc));
sh = idx - start;
if (sh > 64) {
sh = (start - idx) + 0xff;
bitmap = bitmap << sh;
sh = 0;
start = idx;
} else if (sh < -64)
sh = 0xff - (start - idx);
else if (sh < 0) {
sh = start - idx;
start = idx;
bitmap = bitmap << sh;
sh = 0;
}
bitmap |= 1ULL << sh;
IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
start, (unsigned long long)bitmap);
}
agg->bitmap = bitmap;
agg->start_idx = start;
IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
agg->frame_count, agg->start_idx,
(unsigned long long)agg->bitmap);
if (bitmap)
agg->wait_for_ba = 1;
}
return 0;
}
static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
{
int i;
int start = 0;
int ret = IWL_INVALID_STATION;
unsigned long flags;
if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
(priv->iw_mode == NL80211_IFTYPE_AP))
start = IWL_STA_ID;
if (is_broadcast_ether_addr(addr))
return priv->hw_params.bcast_sta_id;
spin_lock_irqsave(&priv->sta_lock, flags);
for (i = start; i < priv->hw_params.max_stations; i++)
if (priv->stations[i].used &&
(!compare_ether_addr(priv->stations[i].sta.sta.addr,
addr))) {
ret = i;
goto out;
}
IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
addr, priv->num_stations);
out:
/*
* It may be possible that more commands interacting with stations
* arrive before we completed processing the adding of
* station
*/
if (ret != IWL_INVALID_STATION &&
(!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
(priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
ret);
ret = IWL_INVALID_STATION;
}
spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;
}
static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
{
if (priv->iw_mode == NL80211_IFTYPE_STATION) {
return IWL_AP_ID;
} else {
u8 *da = ieee80211_get_DA(hdr);
return iwl_find_station(priv, da);
}
}
/**
* iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
*/
static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int index = SEQ_TO_INDEX(sequence);
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
"is out of range [0-%d] %d %d\n", txq_id,
index, txq->q.n_bd, txq->q.write_ptr,
txq->q.read_ptr);
return;
}
info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
memset(&info->status, 0, sizeof(info->status));
hdr = iwl_tx_queue_get_hdr(priv, txq_id, index);
if (ieee80211_is_data_qos(hdr->frame_control)) {
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & 0xf;
}
sta_id = iwl_get_ra_sta_id(priv, hdr);
if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
IWL_ERR(priv, "Station not known\n");
return;
}
if (txq->sched_retry) {
const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
struct iwl_ht_agg *agg = NULL;
WARN_ON(!qc);
agg = &priv->stations[sta_id].tid[tid].agg;
iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
/* check if BAR is needed */
if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
if (txq->q.read_ptr != (scd_ssn & 0xff)) {
index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
"%d index %d\n", scd_ssn , index);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc)
iwl_free_tfds_in_queue(priv, sta_id,
tid, freed);
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark) &&
(agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
if (agg->state == IWL_AGG_OFF)
iwl_wake_queue(priv, txq_id);
else
iwl_wake_queue(priv, txq->swq_id);
}
}
} else {
info->status.rates[0].count = tx_resp->failure_frame + 1;
info->flags |= iwl_tx_status_to_mac80211(status);
iwlagn_hwrate_to_tx_control(priv,
le32_to_cpu(tx_resp->rate_n_flags),
info);
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
"rate_n_flags 0x%x retries %d\n",
txq_id,
iwl_get_tx_fail_reason(status), status,
le32_to_cpu(tx_resp->rate_n_flags),
tx_resp->failure_frame);
freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
else if (sta_id == IWL_INVALID_STATION)
IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
if (priv->mac80211_registered &&
(iwl_queue_space(&txq->q) > txq->q.low_mark))
iwl_wake_queue(priv, txq_id);
}
if (qc && likely(sta_id != IWL_INVALID_STATION))
iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
iwl_check_abort_status(priv, tx_resp->frame_count, status);
}
static int iwl4965_calc_rssi(struct iwl_priv *priv,
struct iwl_rx_phy_res *rx_resp)
{
/* data from PHY/DSP regarding signal strength, etc.,
* contents are always there, not configurable by host. */
struct iwl4965_rx_non_cfg_phy *ncphy =
(struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
>> IWL49_AGC_DB_POS;
u32 valid_antennae =
(le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
>> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
u8 max_rssi = 0;
u32 i;
/* Find max rssi among 3 possible receivers.
* These values are measured by the digital signal processor (DSP).
* They should stay fairly constant even as the signal strength varies,
* if the radio's automatic gain control (AGC) is working right.
* AGC value (see below) will provide the "interesting" info. */
for (i = 0; i < 3; i++)
if (valid_antennae & (1 << i))
max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
max_rssi, agc);
/* dBm = max_rssi dB - agc dB - constant.
* Higher AGC (higher radio gain) means lower signal. */
return max_rssi - agc - IWLAGN_RSSI_OFFSET;
}
/* Set up 4965-specific Rx frame reply handlers */
static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
{
/* Legacy Rx frames */
priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
/* Tx response */
priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
}
static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
{
INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
}
static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
{
cancel_work_sync(&priv->txpower_work);
}
static struct iwl_hcmd_ops iwl4965_hcmd = {
.rxon_assoc = iwl4965_send_rxon_assoc,
.commit_rxon = iwl_commit_rxon,
.set_rxon_chain = iwl_set_rxon_chain,
.send_bt_config = iwl_send_bt_config,
};
static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
.get_hcmd_size = iwl4965_get_hcmd_size,
.build_addsta_hcmd = iwl4965_build_addsta_hcmd,
.chain_noise_reset = iwl4965_chain_noise_reset,
.gain_computation = iwl4965_gain_computation,
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
.calc_rssi = iwl4965_calc_rssi,
.request_scan = iwlagn_request_scan,
};
static struct iwl_lib_ops iwl4965_lib = {
.set_hw_params = iwl4965_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
.txq_set_sched = iwl4965_txq_set_sched,
.txq_agg_enable = iwl4965_txq_agg_enable,
.txq_agg_disable = iwl4965_txq_agg_disable,
.txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
.txq_free_tfd = iwl_hw_txq_free_tfd,
.txq_init = iwl_hw_tx_queue_init,
.rx_handler_setup = iwl4965_rx_handler_setup,
.setup_deferred_work = iwl4965_setup_deferred_work,
.cancel_deferred_work = iwl4965_cancel_deferred_work,
.is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
.alive_notify = iwl4965_alive_notify,
.init_alive_start = iwl4965_init_alive_start,
.load_ucode = iwl4965_load_bsm,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
.dump_fh = iwl_dump_fh,
.set_channel_switch = iwl4965_hw_channel_switch,
.apm_ops = {
.init = iwl_apm_init,
.stop = iwl_apm_stop,
.config = iwl4965_nic_config,
.set_pwr_src = iwl_set_pwr_src,
},
.eeprom_ops = {
.regulatory_bands = {
EEPROM_REGULATORY_BAND_1_CHANNELS,
EEPROM_REGULATORY_BAND_2_CHANNELS,
EEPROM_REGULATORY_BAND_3_CHANNELS,
EEPROM_REGULATORY_BAND_4_CHANNELS,
EEPROM_REGULATORY_BAND_5_CHANNELS,
EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
},
.verify_signature = iwlcore_eeprom_verify_signature,
.acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
.release_semaphore = iwlcore_eeprom_release_semaphore,
.calib_version = iwl4965_eeprom_calib_version,
.query_addr = iwlcore_eeprom_query_addr,
},
.send_tx_power = iwl4965_send_tx_power,
.update_chain_flags = iwl_update_chain_flags,
.post_associate = iwl_post_associate,
.config_ap = iwl_config_ap,
.isr = iwl_isr_legacy,
.temp_ops = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
.manage_ibss_station = iwlagn_manage_ibss_station,
.debugfs_ops = {
.rx_stats_read = iwl_ucode_rx_stats_read,
.tx_stats_read = iwl_ucode_tx_stats_read,
.general_stats_read = iwl_ucode_general_stats_read,
},
.check_plcp_health = iwl_good_plcp_health,
};
static const struct iwl_ops iwl4965_ops = {
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
.utils = &iwl4965_hcmd_utils,
.led = &iwlagn_led_ops,
};
struct iwl_cfg iwl4965_agn_cfg = {
.name = "Intel(R) Wireless WiFi Link 4965AGN",
.fw_name_pre = IWL4965_FW_PRE,
.ucode_api_max = IWL4965_UCODE_API_MAX,
.ucode_api_min = IWL4965_UCODE_API_MIN,
.sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
.eeprom_size = IWL4965_EEPROM_IMG_SIZE,
.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
.ops = &iwl4965_ops,
.num_of_queues = IWL49_NUM_QUEUES,
.num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
.mod_params = &iwlagn_mod_params,
.valid_tx_ant = ANT_AB,
.valid_rx_ant = ANT_ABC,
.pll_cfg_val = 0,
.set_l0s = true,
.use_bsm = true,
.use_isr_legacy = true,
.ht_greenfield_support = false,
.broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
.monitor_recover_period = IWL_MONITORING_PERIOD,
.temperature_kelvin = true,
.max_event_log_size = 512,
.tx_power_by_driver = true,
.ucode_tracing = true,
.sensitivity_calib_by_driver = true,
.chain_noise_calib_by_driver = true,
/*
* Force use of chains B and C for scan RX on 5 GHz band
* because the device has off-channel reception on chain A.
*/
.scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
};
/* Module firmware */
MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
|
gpl-2.0
|
lodr/codeaurora_kernel_msm
|
drivers/net/wireless/bcmdhd/aiutils.c
|
1020
|
21942
|
/*
* Misc utility routines for accessing chip-specific features
* of the SiliconBackplane-based Broadcom chips.
*
* Copyright (C) 1999-2013, Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
* under the terms of the GNU General Public License version 2 (the "GPL"),
* available at http://www.broadcom.com/licenses/GPLv2.php, with the
* following added to such license:
*
* As a special exception, the copyright holders of this software give you
* permission to link this software with independent modules, and to copy and
* distribute the resulting executable under terms of your choice, provided that
* you also meet, for each linked independent module, the terms and conditions of
* the license of that module. An independent module is a module which is not
* derived from this software. The special exception does not apply to any
* modifications of the software.
*
* Notwithstanding the above, under no circumstances may you combine this
* software in any way with any other Broadcom software provided under a license
* other than the GPL, without Broadcom's express prior written consent.
*
* $Id: aiutils.c 385510 2013-02-15 21:02:07Z $
*/
#include <bcm_cfg.h>
#include <typedefs.h>
#include <bcmdefs.h>
#include <osl.h>
#include <bcmutils.h>
#include <siutils.h>
#include <hndsoc.h>
#include <sbchipc.h>
#include <pcicfg.h>
#include "siutils_priv.h"
#define BCM47162_DMP() (0)
#define BCM5357_DMP() (0)
#define BCM4707_DMP() (0)
#define remap_coreid(sih, coreid) (coreid)
#define remap_corerev(sih, corerev) (corerev)
/* EROM parsing */
static uint32
get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match)
{
uint32 ent;
uint inv = 0, nom = 0;
while (TRUE) {
ent = R_REG(si_osh(sih), *eromptr);
(*eromptr)++;
if (mask == 0)
break;
if ((ent & ER_VALID) == 0) {
inv++;
continue;
}
if (ent == (ER_END | ER_VALID))
break;
if ((ent & mask) == match)
break;
nom++;
}
SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent));
if (inv + nom) {
SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom));
}
return ent;
}
static uint32
get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh,
uint32 *sizel, uint32 *sizeh)
{
uint32 asd, sz, szd;
asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
if (((asd & ER_TAG1) != ER_ADD) ||
(((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
((asd & AD_ST_MASK) != st)) {
/* This is not what we want, "push" it back */
(*eromptr)--;
return 0;
}
*addrl = asd & AD_ADDR_MASK;
if (asd & AD_AG32)
*addrh = get_erom_ent(sih, eromptr, 0, 0);
else
*addrh = 0;
*sizeh = 0;
sz = asd & AD_SZ_MASK;
if (sz == AD_SZ_SZD) {
szd = get_erom_ent(sih, eromptr, 0, 0);
*sizel = szd & SD_SZ_MASK;
if (szd & SD_SG32)
*sizeh = get_erom_ent(sih, eromptr, 0, 0);
} else
*sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n",
sp, ad, st, *sizeh, *sizel, *addrh, *addrl));
return asd;
}
static void
ai_hwfixup(si_info_t *sii)
{
}
/* parse the enumeration rom to identify all cores */
void
ai_scan(si_t *sih, void *regs, uint devid)
{
si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc = (chipcregs_t *)regs;
uint32 erombase, *eromptr, *eromlim;
erombase = R_REG(sii->osh, &cc->eromptr);
switch (BUSTYPE(sih->bustype)) {
case SI_BUS:
eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
break;
case PCI_BUS:
/* Set wrappers address */
sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE);
/* Now point the window at the erom */
OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase);
eromptr = regs;
break;
case SPI_BUS:
case SDIO_BUS:
eromptr = (uint32 *)(uintptr)erombase;
break;
case PCMCIA_BUS:
default:
SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype));
ASSERT(0);
return;
}
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n",
regs, erombase, eromptr, eromlim));
while (eromptr < eromlim) {
uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
uint32 mpd, asd, addrl, addrh, sizel, sizeh;
uint i, j, idx;
bool br;
br = FALSE;
/* Grok a component */
cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
if (cia == (ER_END | ER_VALID)) {
SI_VMSG(("Found END of erom after %d cores\n", sii->numcores));
ai_hwfixup(sii);
return;
}
cib = get_erom_ent(sih, &eromptr, 0, 0);
if ((cib & ER_TAG) != ER_CI) {
SI_ERROR(("CIA not followed by CIB\n"));
goto error;
}
cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
#ifdef BCMDBG_SI
SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, "
"nsw = %d, nmp = %d & nsp = %d\n",
mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp));
#else
BCM_REFERENCE(crev);
#endif
if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
continue;
if ((nmw + nsw == 0)) {
/* A component which is not a core */
if (cid == OOB_ROUTER_CORE_ID) {
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
&addrl, &addrh, &sizel, &sizeh);
if (asd != 0) {
sii->oob_router = addrl;
}
}
if (cid != GMAC_COMMON_4706_CORE_ID && cid != NS_CCB_CORE_ID)
continue;
}
idx = sii->numcores;
sii->cia[idx] = cia;
sii->cib[idx] = cib;
sii->coreid[idx] = remap_coreid(sih, cid);
for (i = 0; i < nmp; i++) {
mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
if ((mpd & ER_TAG) != ER_MP) {
SI_ERROR(("Not enough MP entries for component 0x%x\n", cid));
goto error;
}
SI_VMSG((" Master port %d, mp: %d id: %d\n", i,
(mpd & MPD_MP_MASK) >> MPD_MP_SHIFT,
(mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT));
}
/* First Slave Address Descriptor should be port 0:
* the main register space for the core
*/
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
if (asd == 0) {
do {
/* Try again to see if it is a bridge */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
&sizel, &sizeh);
if (asd != 0)
br = TRUE;
else {
if (br == TRUE) {
break;
}
else if ((addrh != 0) || (sizeh != 0) ||
(sizel != SI_CORE_SIZE)) {
SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 ="
"0x%x\n", addrh, sizeh, sizel));
SI_ERROR(("First Slave ASD for"
"core 0x%04x malformed "
"(0x%08x)\n", cid, asd));
goto error;
}
}
} while (1);
}
sii->coresba[idx] = addrl;
sii->coresba_size[idx] = sizel;
/* Get any more ASDs in port 0 */
j = 1;
do {
asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
&sizel, &sizeh);
if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
sii->coresba2[idx] = addrl;
sii->coresba2_size[idx] = sizel;
}
j++;
} while (asd != 0);
/* Go through the ASDs for other slave ports */
for (i = 1; i < nsp; i++) {
j = 0;
do {
asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
&sizel, &sizeh);
if (asd == 0)
break;
j++;
} while (1);
if (j == 0) {
SI_ERROR((" SP %d has no address descriptors\n", i));
goto error;
}
}
/* Now get master wrappers */
for (i = 0; i < nmw; i++) {
asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh,
&sizel, &sizeh);
if (asd == 0) {
SI_ERROR(("Missing descriptor for MW %d\n", i));
goto error;
}
if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
SI_ERROR(("Master wrapper %d is not 4KB\n", i));
goto error;
}
if (i == 0)
sii->wrapba[idx] = addrl;
}
/* And finally slave wrappers */
for (i = 0; i < nsw; i++) {
uint fwp = (nsp == 1) ? 0 : 1;
asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh,
&sizel, &sizeh);
if (asd == 0) {
SI_ERROR(("Missing descriptor for SW %d\n", i));
goto error;
}
if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
SI_ERROR(("Slave wrapper %d is not 4KB\n", i));
goto error;
}
if ((nmw == 0) && (i == 0))
sii->wrapba[idx] = addrl;
}
/* Don't record bridges */
if (br)
continue;
/* Done with core */
sii->numcores++;
}
SI_ERROR(("Reached end of erom without finding END"));
error:
sii->numcores = 0;
return;
}
/* This function changes the logical "focus" to the indicated core.
* Return the current core's virtual address.
*/
void *
ai_setcoreidx(si_t *sih, uint coreidx)
{
si_info_t *sii = SI_INFO(sih);
uint32 addr, wrap;
void *regs;
if (coreidx >= MIN(sii->numcores, SI_MAXCORES))
return (NULL);
addr = sii->coresba[coreidx];
wrap = sii->wrapba[coreidx];
/*
* If the user has provided an interrupt mask enabled function,
* then assert interrupts are disabled before switching the core.
*/
ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg));
switch (BUSTYPE(sih->bustype)) {
case SI_BUS:
/* map new one */
if (!sii->regs[coreidx]) {
sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE);
ASSERT(GOODREGS(sii->regs[coreidx]));
}
sii->curmap = regs = sii->regs[coreidx];
if (!sii->wrappers[coreidx] && (wrap != 0)) {
sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE);
ASSERT(GOODREGS(sii->wrappers[coreidx]));
}
sii->curwrap = sii->wrappers[coreidx];
break;
case SPI_BUS:
case SDIO_BUS:
sii->curmap = regs = (void *)((uintptr)addr);
sii->curwrap = (void *)((uintptr)wrap);
break;
case PCMCIA_BUS:
default:
ASSERT(0);
regs = NULL;
break;
}
sii->curmap = regs;
sii->curidx = coreidx;
return regs;
}
void
ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size)
{
si_info_t *sii = SI_INFO(sih);
chipcregs_t *cc = NULL;
uint32 erombase, *eromptr, *eromlim;
uint i, j, cidx;
uint32 cia, cib, nmp, nsp;
uint32 asd, addrl, addrh, sizel, sizeh;
for (i = 0; i < sii->numcores; i++) {
if (sii->coreid[i] == CC_CORE_ID) {
cc = (chipcregs_t *)sii->regs[i];
break;
}
}
if (cc == NULL)
goto error;
erombase = R_REG(sii->osh, &cc->eromptr);
eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE);
eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32));
cidx = sii->curidx;
cia = sii->cia[cidx];
cib = sii->cib[cidx];
nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
/* scan for cores */
while (eromptr < eromlim) {
if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) &&
(get_erom_ent(sih, &eromptr, 0, 0) == cib)) {
break;
}
}
/* skip master ports */
for (i = 0; i < nmp; i++)
get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
/* Skip ASDs in port 0 */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh);
if (asd == 0) {
/* Try again to see if it is a bridge */
asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh,
&sizel, &sizeh);
}
j = 1;
do {
asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh,
&sizel, &sizeh);
j++;
} while (asd != 0);
/* Go through the ASDs for other slave ports */
for (i = 1; i < nsp; i++) {
j = 0;
do {
asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh,
&sizel, &sizeh);
if (asd == 0)
break;
if (!asidx--) {
*addr = addrl;
*size = sizel;
return;
}
j++;
} while (1);
if (j == 0) {
SI_ERROR((" SP %d has no address descriptors\n", i));
break;
}
}
error:
*size = 0;
return;
}
/* Return the number of address spaces in current core */
int
ai_numaddrspaces(si_t *sih)
{
return 2;
}
/* Return the address of the nth address space in the current core */
uint32
ai_addrspace(si_t *sih, uint asidx)
{
si_info_t *sii;
uint cidx;
sii = SI_INFO(sih);
cidx = sii->curidx;
if (asidx == 0)
return sii->coresba[cidx];
else if (asidx == 1)
return sii->coresba2[cidx];
else {
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
__FUNCTION__, asidx));
return 0;
}
}
/* Return the size of the nth address space in the current core */
uint32
ai_addrspacesize(si_t *sih, uint asidx)
{
si_info_t *sii;
uint cidx;
sii = SI_INFO(sih);
cidx = sii->curidx;
if (asidx == 0)
return sii->coresba_size[cidx];
else if (asidx == 1)
return sii->coresba2_size[cidx];
else {
SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n",
__FUNCTION__, asidx));
return 0;
}
}
uint
ai_flag(si_t *sih)
{
si_info_t *sii;
aidmp_t *ai;
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
return sii->curidx;
}
if (BCM5357_DMP()) {
SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
return sii->curidx;
}
if (BCM4707_DMP()) {
SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
__FUNCTION__));
return sii->curidx;
}
ai = sii->curwrap;
return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f);
}
uint
ai_flag_alt(si_t *sih)
{
si_info_t *sii;
aidmp_t *ai;
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Attempting to read MIPS DMP registers on 47162a0", __FUNCTION__));
return sii->curidx;
}
if (BCM5357_DMP()) {
SI_ERROR(("%s: Attempting to read USB20H DMP registers on 5357b0\n", __FUNCTION__));
return sii->curidx;
}
if (BCM4707_DMP()) {
SI_ERROR(("%s: Attempting to read CHIPCOMMONB DMP registers on 4707\n",
__FUNCTION__));
return sii->curidx;
}
ai = sii->curwrap;
return ((R_REG(sii->osh, &ai->oobselouta30) >> AI_OOBSEL_1_SHIFT) & AI_OOBSEL_MASK);
}
void
ai_setint(si_t *sih, int siflag)
{
}
uint
ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val)
{
si_info_t *sii = SI_INFO(sih);
uint32 *map = (uint32 *) sii->curwrap;
if (mask || val) {
uint32 w = R_REG(sii->osh, map+(offset/4));
w &= ~mask;
w |= val;
W_REG(sii->osh, map+(offset/4), val);
}
return (R_REG(sii->osh, map+(offset/4)));
}
uint
ai_corevendor(si_t *sih)
{
si_info_t *sii;
uint32 cia;
sii = SI_INFO(sih);
cia = sii->cia[sii->curidx];
return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT);
}
uint
ai_corerev(si_t *sih)
{
si_info_t *sii;
uint32 cib;
sii = SI_INFO(sih);
cib = sii->cib[sii->curidx];
return remap_corerev(sih, (cib & CIB_REV_MASK) >> CIB_REV_SHIFT);
}
bool
ai_iscoreup(si_t *sih)
{
si_info_t *sii;
aidmp_t *ai;
sii = SI_INFO(sih);
ai = sii->curwrap;
return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) &&
((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0));
}
/*
* Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation,
* switch back to the original core, and return the new value.
*
* When using the silicon backplane, no fiddling with interrupts or core switches is needed.
*
* Also, when using pci/pcie, we can optimize away the core switching for pci registers
* and (on newer pci cores) chipcommon registers.
*/
uint
ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val)
{
uint origidx = 0;
uint32 *r = NULL;
uint w;
uint intr_val = 0;
bool fast = FALSE;
si_info_t *sii;
sii = SI_INFO(sih);
ASSERT(GOODIDX(coreidx));
ASSERT(regoff < SI_CORE_SIZE);
ASSERT((val & ~mask) == 0);
if (coreidx >= SI_MAXCORES)
return 0;
if (BUSTYPE(sih->bustype) == SI_BUS) {
/* If internal bus, we can always get at everything */
fast = TRUE;
/* map if does not exist */
if (!sii->regs[coreidx]) {
sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx],
SI_CORE_SIZE);
ASSERT(GOODREGS(sii->regs[coreidx]));
}
r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff);
} else if (BUSTYPE(sih->bustype) == PCI_BUS) {
/* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */
if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
/* Chipc registers are mapped at 12KB */
fast = TRUE;
r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff);
} else if (sii->pub.buscoreidx == coreidx) {
/* pci registers are at either in the last 2KB of an 8KB window
* or, in pcie and pci rev 13 at 8KB
*/
fast = TRUE;
if (SI_FAST(sii))
r = (uint32 *)((char *)sii->curmap +
PCI_16KB0_PCIREGS_OFFSET + regoff);
else
r = (uint32 *)((char *)sii->curmap +
((regoff >= SBCONFIGOFF) ?
PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) +
regoff);
}
}
if (!fast) {
INTR_OFF(sii, intr_val);
/* save current core index */
origidx = si_coreidx(&sii->pub);
/* switch core */
r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff);
}
ASSERT(r != NULL);
/* mask and set */
if (mask || val) {
w = (R_REG(sii->osh, r) & ~mask) | val;
W_REG(sii->osh, r, w);
}
/* readback */
w = R_REG(sii->osh, r);
if (!fast) {
/* restore core index */
if (origidx != coreidx)
ai_setcoreidx(&sii->pub, origidx);
INTR_RESTORE(sii, intr_val);
}
return (w);
}
void
ai_core_disable(si_t *sih, uint32 bits)
{
si_info_t *sii;
volatile uint32 dummy;
uint32 status;
aidmp_t *ai;
sii = SI_INFO(sih);
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
/* if core is already in reset, just return */
if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET)
return;
/* ensure there are no pending backplane operations */
SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300);
/* if pending backplane ops still, try waiting longer */
if (status != 0) {
/* 300usecs was sufficient to allow backplane ops to clear for big hammer */
/* during driver load we may need more time */
SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000);
/* if still pending ops, continue on and try disable anyway */
/* this is in big hammer path, so don't call wl_reinit in this case... */
}
W_REG(sii->osh, &ai->resetctrl, AIRC_RESET);
dummy = R_REG(sii->osh, &ai->resetctrl);
BCM_REFERENCE(dummy);
OSL_DELAY(1);
W_REG(sii->osh, &ai->ioctrl, bits);
dummy = R_REG(sii->osh, &ai->ioctrl);
BCM_REFERENCE(dummy);
OSL_DELAY(10);
}
/* reset and re-enable a core
* inputs:
* bits - core specific bits that are set during and after reset sequence
* resetbits - core specific bits that are set only during reset sequence
*/
void
ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits)
{
si_info_t *sii;
aidmp_t *ai;
volatile uint32 dummy;
sii = SI_INFO(sih);
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
/*
* Must do the disable sequence first to work for arbitrary current core state.
*/
ai_core_disable(sih, (bits | resetbits));
/*
* Now do the initialization sequence.
*/
W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
BCM_REFERENCE(dummy);
W_REG(sii->osh, &ai->resetctrl, 0);
dummy = R_REG(sii->osh, &ai->resetctrl);
BCM_REFERENCE(dummy);
OSL_DELAY(1);
W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN));
dummy = R_REG(sii->osh, &ai->ioctrl);
BCM_REFERENCE(dummy);
OSL_DELAY(1);
}
void
ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val)
{
si_info_t *sii;
aidmp_t *ai;
uint32 w;
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
__FUNCTION__));
return;
}
if (BCM5357_DMP()) {
SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
__FUNCTION__));
return;
}
if (BCM4707_DMP()) {
SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
__FUNCTION__));
return;
}
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
ASSERT((val & ~mask) == 0);
if (mask || val) {
w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
W_REG(sii->osh, &ai->ioctrl, w);
}
}
uint32
ai_core_cflags(si_t *sih, uint32 mask, uint32 val)
{
si_info_t *sii;
aidmp_t *ai;
uint32 w;
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (ioctrl) on 47162a0",
__FUNCTION__));
return 0;
}
if (BCM5357_DMP()) {
SI_ERROR(("%s: Accessing USB20H DMP register (ioctrl) on 5357\n",
__FUNCTION__));
return 0;
}
if (BCM4707_DMP()) {
SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
__FUNCTION__));
return 0;
}
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
ASSERT((val & ~mask) == 0);
if (mask || val) {
w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val);
W_REG(sii->osh, &ai->ioctrl, w);
}
return R_REG(sii->osh, &ai->ioctrl);
}
uint32
ai_core_sflags(si_t *sih, uint32 mask, uint32 val)
{
si_info_t *sii;
aidmp_t *ai;
uint32 w;
sii = SI_INFO(sih);
if (BCM47162_DMP()) {
SI_ERROR(("%s: Accessing MIPS DMP register (iostatus) on 47162a0",
__FUNCTION__));
return 0;
}
if (BCM5357_DMP()) {
SI_ERROR(("%s: Accessing USB20H DMP register (iostatus) on 5357\n",
__FUNCTION__));
return 0;
}
if (BCM4707_DMP()) {
SI_ERROR(("%s: Accessing CHIPCOMMONB DMP register (ioctrl) on 4707\n",
__FUNCTION__));
return 0;
}
ASSERT(GOODREGS(sii->curwrap));
ai = sii->curwrap;
ASSERT((val & ~mask) == 0);
ASSERT((mask & ~SISF_CORE_BITS) == 0);
if (mask || val) {
w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val);
W_REG(sii->osh, &ai->iostatus, w);
}
return R_REG(sii->osh, &ai->iostatus);
}
|
gpl-2.0
|
cahdudul/akh8960_cm
|
security/selinux/netnode.c
|
1532
|
8516
|
/*
* Network node table
*
* SELinux must keep a mapping of network nodes to labels/SIDs. This
* mapping is maintained as part of the normal policy but a fast cache is
* needed to reduce the lookup overhead since most of these queries happen on
* a per-packet basis.
*
* Author: Paul Moore <paul@paul-moore.com>
*
* This code is heavily based on the "netif" concept originally developed by
* James Morris <jmorris@redhat.com>
* (see security/selinux/netif.c for more information)
*
*/
/*
* (c) Copyright Hewlett-Packard Development Company, L.P., 2007
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include "netnode.h"
#include "objsec.h"
#define SEL_NETNODE_HASH_SIZE 256
#define SEL_NETNODE_HASH_BKT_LIMIT 16
struct sel_netnode_bkt {
unsigned int size;
struct list_head list;
};
struct sel_netnode {
struct netnode_security_struct nsec;
struct list_head list;
struct rcu_head rcu;
};
/* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason
* for this is that I suspect most users will not make heavy use of both
* address families at the same time so one table will usually end up wasted,
* if this becomes a problem we can always add a hash table for each address
* family later */
static LIST_HEAD(sel_netnode_list);
static DEFINE_SPINLOCK(sel_netnode_lock);
static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
/**
* sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
* @addr: IPv4 address
*
* Description:
* This is the IPv4 hashing function for the node interface table, it returns
* the bucket number for the given IP address.
*
*/
static unsigned int sel_netnode_hashfn_ipv4(__be32 addr)
{
/* at some point we should determine if the mismatch in byte order
* affects the hash function dramatically */
return (addr & (SEL_NETNODE_HASH_SIZE - 1));
}
/**
* sel_netnode_hashfn_ipv6 - IPv6 hashing function for the node table
* @addr: IPv6 address
*
* Description:
* This is the IPv6 hashing function for the node interface table, it returns
* the bucket number for the given IP address.
*
*/
static unsigned int sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
{
/* just hash the least significant 32 bits to keep things fast (they
* are the most likely to be different anyway), we can revisit this
* later if needed */
return (addr->s6_addr32[3] & (SEL_NETNODE_HASH_SIZE - 1));
}
/**
* sel_netnode_find - Search for a node record
* @addr: IP address
* @family: address family
*
* Description:
* Search the network node table and return the record matching @addr. If an
* entry can not be found in the table return NULL.
*
*/
static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
{
unsigned int idx;
struct sel_netnode *node;
switch (family) {
case PF_INET:
idx = sel_netnode_hashfn_ipv4(*(__be32 *)addr);
break;
case PF_INET6:
idx = sel_netnode_hashfn_ipv6(addr);
break;
default:
BUG();
return NULL;
}
list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
if (node->nsec.family == family)
switch (family) {
case PF_INET:
if (node->nsec.addr.ipv4 == *(__be32 *)addr)
return node;
break;
case PF_INET6:
if (ipv6_addr_equal(&node->nsec.addr.ipv6,
addr))
return node;
break;
}
return NULL;
}
/**
* sel_netnode_insert - Insert a new node into the table
* @node: the new node record
*
* Description:
* Add a new node record to the network address hash table.
*
*/
static void sel_netnode_insert(struct sel_netnode *node)
{
unsigned int idx;
switch (node->nsec.family) {
case PF_INET:
idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
break;
case PF_INET6:
idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
break;
default:
BUG();
}
/* we need to impose a limit on the growth of the hash table so check
* this bucket to make sure it is within the specified bounds */
list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
struct sel_netnode *tail;
tail = list_entry(
rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
lockdep_is_held(&sel_netnode_lock)),
struct sel_netnode, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
} else
sel_netnode_hash[idx].size++;
}
/**
* sel_netnode_sid_slow - Lookup the SID of a network address using the policy
* @addr: the IP address
* @family: the address family
* @sid: node SID
*
* Description:
* This function determines the SID of a network address by quering the
* security policy. The result is added to the network address table to
* speedup future queries. Returns zero on success, negative values on
* failure.
*
*/
static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
{
int ret = -ENOMEM;
struct sel_netnode *node;
struct sel_netnode *new = NULL;
spin_lock_bh(&sel_netnode_lock);
node = sel_netnode_find(addr, family);
if (node != NULL) {
*sid = node->nsec.sid;
spin_unlock_bh(&sel_netnode_lock);
return 0;
}
new = kzalloc(sizeof(*new), GFP_ATOMIC);
if (new == NULL)
goto out;
switch (family) {
case PF_INET:
ret = security_node_sid(PF_INET,
addr, sizeof(struct in_addr), sid);
new->nsec.addr.ipv4 = *(__be32 *)addr;
break;
case PF_INET6:
ret = security_node_sid(PF_INET6,
addr, sizeof(struct in6_addr), sid);
new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
default:
BUG();
}
if (ret != 0)
goto out;
new->nsec.family = family;
new->nsec.sid = *sid;
sel_netnode_insert(new);
out:
spin_unlock_bh(&sel_netnode_lock);
if (unlikely(ret)) {
printk(KERN_WARNING
"SELinux: failure in sel_netnode_sid_slow(),"
" unable to determine network node label\n");
kfree(new);
}
return ret;
}
/**
* sel_netnode_sid - Lookup the SID of a network address
* @addr: the IP address
* @family: the address family
* @sid: node SID
*
* Description:
* This function determines the SID of a network address using the fastest
* method possible. First the address table is queried, but if an entry
* can't be found then the policy is queried and the result is added to the
* table to speedup future queries. Returns zero on success, negative values
* on failure.
*
*/
int sel_netnode_sid(void *addr, u16 family, u32 *sid)
{
struct sel_netnode *node;
rcu_read_lock();
node = sel_netnode_find(addr, family);
if (node != NULL) {
*sid = node->nsec.sid;
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
return sel_netnode_sid_slow(addr, family, sid);
}
/**
* sel_netnode_flush - Flush the entire network address table
*
* Description:
* Remove all entries from the network address table.
*
*/
static void sel_netnode_flush(void)
{
unsigned int idx;
struct sel_netnode *node, *node_tmp;
spin_lock_bh(&sel_netnode_lock);
for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) {
list_for_each_entry_safe(node, node_tmp,
&sel_netnode_hash[idx].list, list) {
list_del_rcu(&node->list);
kfree_rcu(node, rcu);
}
sel_netnode_hash[idx].size = 0;
}
spin_unlock_bh(&sel_netnode_lock);
}
static int sel_netnode_avc_callback(u32 event, u32 ssid, u32 tsid,
u16 class, u32 perms, u32 *retained)
{
if (event == AVC_CALLBACK_RESET) {
sel_netnode_flush();
synchronize_net();
}
return 0;
}
static __init int sel_netnode_init(void)
{
int iter;
int ret;
if (!selinux_enabled)
return 0;
for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
sel_netnode_hash[iter].size = 0;
}
ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
if (ret != 0)
panic("avc_add_callback() failed, error %d\n", ret);
return ret;
}
__initcall(sel_netnode_init);
|
gpl-2.0
|
GAXUSXX/G935FGaXusKernel3
|
drivers/mfd/rdc321x-southbridge.c
|
1532
|
3173
|
/*
* RDC321x MFD southbridge driver
*
* Copyright (C) 2007-2010 Florian Fainelli <florian@openwrt.org>
* Copyright (C) 2010 Bernhard Loos <bernhardloos@googlemail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pci.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rdc321x.h>
static struct rdc321x_wdt_pdata rdc321x_wdt_pdata;
static struct resource rdc321x_wdt_resource[] = {
{
.name = "wdt-reg",
.start = RDC321X_WDT_CTRL,
.end = RDC321X_WDT_CTRL + 0x3,
.flags = IORESOURCE_IO,
}
};
static struct rdc321x_gpio_pdata rdc321x_gpio_pdata = {
.max_gpios = RDC321X_NUM_GPIO,
};
static struct resource rdc321x_gpio_resources[] = {
{
.name = "gpio-reg1",
.start = RDC321X_GPIO_CTRL_REG1,
.end = RDC321X_GPIO_CTRL_REG1 + 0x7,
.flags = IORESOURCE_IO,
}, {
.name = "gpio-reg2",
.start = RDC321X_GPIO_CTRL_REG2,
.end = RDC321X_GPIO_CTRL_REG2 + 0x7,
.flags = IORESOURCE_IO,
}
};
static const struct mfd_cell rdc321x_sb_cells[] = {
{
.name = "rdc321x-wdt",
.resources = rdc321x_wdt_resource,
.num_resources = ARRAY_SIZE(rdc321x_wdt_resource),
.platform_data = &rdc321x_wdt_pdata,
.pdata_size = sizeof(rdc321x_wdt_pdata),
}, {
.name = "rdc321x-gpio",
.resources = rdc321x_gpio_resources,
.num_resources = ARRAY_SIZE(rdc321x_gpio_resources),
.platform_data = &rdc321x_gpio_pdata,
.pdata_size = sizeof(rdc321x_gpio_pdata),
},
};
static int rdc321x_sb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "failed to enable device\n");
return err;
}
rdc321x_gpio_pdata.sb_pdev = pdev;
rdc321x_wdt_pdata.sb_pdev = pdev;
return mfd_add_devices(&pdev->dev, -1,
rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells),
NULL, 0, NULL);
}
static void rdc321x_sb_remove(struct pci_dev *pdev)
{
mfd_remove_devices(&pdev->dev);
}
static const struct pci_device_id rdc321x_sb_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, PCI_DEVICE_ID_RDC_R6030) },
{}
};
MODULE_DEVICE_TABLE(pci, rdc321x_sb_table);
static struct pci_driver rdc321x_sb_driver = {
.name = "RDC321x Southbridge",
.id_table = rdc321x_sb_table,
.probe = rdc321x_sb_probe,
.remove = rdc321x_sb_remove,
};
module_pci_driver(rdc321x_sb_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R-321x MFD southbridge driver");
|
gpl-2.0
|
Nihhaar/android_kernel_xiaomi_mocha
|
drivers/net/wireless/prism54/isl_ioctl.c
|
2300
|
77316
|
/*
* Copyright (C) 2002 Intersil Americas Inc.
* (C) 2003,2004 Aurelien Alleaume <slts@free.fr>
* (C) 2003 Herbert Valerio Riedel <hvr@gnu.org>
* (C) 2003 Luis R. Rodriguez <mcgrof@ruslug.rutgers.edu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/if_arp.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <asm/uaccess.h>
#include "prismcompat.h"
#include "isl_ioctl.h"
#include "islpci_mgt.h"
#include "isl_oid.h" /* additional types and defs for isl38xx fw */
#include "oid_mgt.h"
#include <net/iw_handler.h> /* New driver API */
#define KEY_SIZE_WEP104 13 /* 104/128-bit WEP keys */
#define KEY_SIZE_WEP40 5 /* 40/64-bit WEP keys */
/* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */
#define KEY_SIZE_TKIP 32 /* TKIP keys */
static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
u8 *wpa_ie, size_t wpa_ie_len);
static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
static int prism54_set_wpa(struct net_device *, struct iw_request_info *,
__u32 *, char *);
/* In 500 kbps */
static const unsigned char scan_rate_list[] = { 2, 4, 11, 22,
12, 18, 24, 36,
48, 72, 96, 108 };
/**
* prism54_mib_mode_helper - MIB change mode helper function
* @mib: the &struct islpci_mib object to modify
* @iw_mode: new mode (%IW_MODE_*)
*
* This is a helper function, hence it does not lock. Make sure
* caller deals with locking *if* necessary. This function sets the
* mode-dependent mib values and does the mapping of the Linux
* Wireless API modes to Device firmware modes. It also checks for
* correct valid Linux wireless modes.
*/
static int
prism54_mib_mode_helper(islpci_private *priv, u32 iw_mode)
{
u32 config = INL_CONFIG_MANUALRUN;
u32 mode, bsstype;
/* For now, just catch early the Repeater and Secondary modes here */
if (iw_mode == IW_MODE_REPEAT || iw_mode == IW_MODE_SECOND) {
printk(KERN_DEBUG
"%s(): Sorry, Repeater mode and Secondary mode "
"are not yet supported by this driver.\n", __func__);
return -EINVAL;
}
priv->iw_mode = iw_mode;
switch (iw_mode) {
case IW_MODE_AUTO:
mode = INL_MODE_CLIENT;
bsstype = DOT11_BSSTYPE_ANY;
break;
case IW_MODE_ADHOC:
mode = INL_MODE_CLIENT;
bsstype = DOT11_BSSTYPE_IBSS;
break;
case IW_MODE_INFRA:
mode = INL_MODE_CLIENT;
bsstype = DOT11_BSSTYPE_INFRA;
break;
case IW_MODE_MASTER:
mode = INL_MODE_AP;
bsstype = DOT11_BSSTYPE_INFRA;
break;
case IW_MODE_MONITOR:
mode = INL_MODE_PROMISCUOUS;
bsstype = DOT11_BSSTYPE_ANY;
config |= INL_CONFIG_RXANNEX;
break;
default:
return -EINVAL;
}
if (init_wds)
config |= INL_CONFIG_WDS;
mgt_set(priv, DOT11_OID_BSSTYPE, &bsstype);
mgt_set(priv, OID_INL_CONFIG, &config);
mgt_set(priv, OID_INL_MODE, &mode);
return 0;
}
/**
* prism54_mib_init - fill MIB cache with defaults
*
* this function initializes the struct given as @mib with defaults,
* of which many are retrieved from the global module parameter
* variables.
*/
void
prism54_mib_init(islpci_private *priv)
{
u32 channel, authen, wep, filter, dot1x, mlme, conformance, power, mode;
struct obj_buffer psm_buffer = {
.size = PSM_BUFFER_SIZE,
.addr = priv->device_psm_buffer
};
channel = CARD_DEFAULT_CHANNEL;
authen = CARD_DEFAULT_AUTHEN;
wep = CARD_DEFAULT_WEP;
filter = CARD_DEFAULT_FILTER; /* (0) Do not filter un-encrypted data */
dot1x = CARD_DEFAULT_DOT1X;
mlme = CARD_DEFAULT_MLME_MODE;
conformance = CARD_DEFAULT_CONFORMANCE;
power = 127;
mode = CARD_DEFAULT_IW_MODE;
mgt_set(priv, DOT11_OID_CHANNEL, &channel);
mgt_set(priv, DOT11_OID_AUTHENABLE, &authen);
mgt_set(priv, DOT11_OID_PRIVACYINVOKED, &wep);
mgt_set(priv, DOT11_OID_PSMBUFFER, &psm_buffer);
mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &filter);
mgt_set(priv, DOT11_OID_DOT1XENABLE, &dot1x);
mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlme);
mgt_set(priv, OID_INL_DOT11D_CONFORMANCE, &conformance);
mgt_set(priv, OID_INL_OUTPUTPOWER, &power);
/* This sets all of the mode-dependent values */
prism54_mib_mode_helper(priv, mode);
}
/* this will be executed outside of atomic context thanks to
* schedule_work(), thus we can as well use sleeping semaphore
* locking */
void
prism54_update_stats(struct work_struct *work)
{
islpci_private *priv = container_of(work, islpci_private, stats_work);
char *data;
int j;
struct obj_bss bss, *bss2;
union oid_res_t r;
mutex_lock(&priv->stats_lock);
/* Noise floor.
* I'm not sure if the unit is dBm.
* Note : If we are not connected, this value seems to be irrelevant. */
mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
priv->local_iwstatistics.qual.noise = r.u;
/* Get the rssi of the link. To do this we need to retrieve a bss. */
/* First get the MAC address of the AP we are associated with. */
mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
data = r.ptr;
/* copy this MAC to the bss */
memcpy(bss.address, data, 6);
kfree(data);
/* now ask for the corresponding bss */
j = mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r);
bss2 = r.ptr;
/* report the rssi and use it to calculate
* link quality through a signal-noise
* ratio */
priv->local_iwstatistics.qual.level = bss2->rssi;
priv->local_iwstatistics.qual.qual =
bss2->rssi - priv->iwstatistics.qual.noise;
kfree(bss2);
/* report that the stats are new */
priv->local_iwstatistics.qual.updated = 0x7;
/* Rx : unable to decrypt the MPDU */
mgt_get_request(priv, DOT11_OID_PRIVRXFAILED, 0, NULL, &r);
priv->local_iwstatistics.discard.code = r.u;
/* Tx : Max MAC retries num reached */
mgt_get_request(priv, DOT11_OID_MPDUTXFAILED, 0, NULL, &r);
priv->local_iwstatistics.discard.retries = r.u;
mutex_unlock(&priv->stats_lock);
}
struct iw_statistics *
prism54_get_wireless_stats(struct net_device *ndev)
{
islpci_private *priv = netdev_priv(ndev);
/* If the stats are being updated return old data */
if (mutex_trylock(&priv->stats_lock)) {
memcpy(&priv->iwstatistics, &priv->local_iwstatistics,
sizeof (struct iw_statistics));
/* They won't be marked updated for the next time */
priv->local_iwstatistics.qual.updated = 0;
mutex_unlock(&priv->stats_lock);
} else
priv->iwstatistics.qual.updated = 0;
/* Update our wireless stats, but do not schedule to often
* (max 1 HZ) */
if ((priv->stats_timestamp == 0) ||
time_after(jiffies, priv->stats_timestamp + 1 * HZ)) {
schedule_work(&priv->stats_work);
priv->stats_timestamp = jiffies;
}
return &priv->iwstatistics;
}
static int
prism54_commit(struct net_device *ndev, struct iw_request_info *info,
char *cwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
/* simply re-set the last set SSID, this should commit most stuff */
/* Commit in Monitor mode is not necessary, also setting essid
* in Monitor mode does not make sense and isn't allowed for this
* device's firmware */
if (priv->iw_mode != IW_MODE_MONITOR)
return mgt_set_request(priv, DOT11_OID_SSID, 0, NULL);
return 0;
}
static int
prism54_get_name(struct net_device *ndev, struct iw_request_info *info,
char *cwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
char *capabilities;
union oid_res_t r;
int rvalue;
if (islpci_get_state(priv) < PRV_STATE_INIT) {
strncpy(cwrq, "NOT READY!", IFNAMSIZ);
return 0;
}
rvalue = mgt_get_request(priv, OID_INL_PHYCAPABILITIES, 0, NULL, &r);
switch (r.u) {
case INL_PHYCAP_5000MHZ:
capabilities = "IEEE 802.11a/b/g";
break;
case INL_PHYCAP_FAA:
capabilities = "IEEE 802.11b/g - FAA Support";
break;
case INL_PHYCAP_2400MHZ:
default:
capabilities = "IEEE 802.11b/g"; /* Default */
break;
}
strncpy(cwrq, capabilities, IFNAMSIZ);
return rvalue;
}
static int
prism54_set_freq(struct net_device *ndev, struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
int rvalue;
u32 c;
if (fwrq->m < 1000)
/* we have a channel number */
c = fwrq->m;
else
c = (fwrq->e == 1) ? channel_of_freq(fwrq->m / 100000) : 0;
rvalue = c ? mgt_set_request(priv, DOT11_OID_CHANNEL, 0, &c) : -EINVAL;
/* Call commit handler */
return (rvalue ? rvalue : -EINPROGRESS);
}
static int
prism54_get_freq(struct net_device *ndev, struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_CHANNEL, 0, NULL, &r);
fwrq->i = r.u;
rvalue |= mgt_get_request(priv, DOT11_OID_FREQUENCY, 0, NULL, &r);
fwrq->m = r.u;
fwrq->e = 3;
return rvalue;
}
static int
prism54_set_mode(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
u32 mlmeautolevel = CARD_DEFAULT_MLME_MODE;
/* Let's see if the user passed a valid Linux Wireless mode */
if (*uwrq > IW_MODE_MONITOR || *uwrq < IW_MODE_AUTO) {
printk(KERN_DEBUG
"%s: %s() You passed a non-valid init_mode.\n",
priv->ndev->name, __func__);
return -EINVAL;
}
down_write(&priv->mib_sem);
if (prism54_mib_mode_helper(priv, *uwrq)) {
up_write(&priv->mib_sem);
return -EOPNOTSUPP;
}
/* the ACL code needs an intermediate mlmeautolevel. The wpa stuff an
* extended one.
*/
if ((*uwrq == IW_MODE_MASTER) && (priv->acl.policy != MAC_POLICY_OPEN))
mlmeautolevel = DOT11_MLME_INTERMEDIATE;
if (priv->wpa)
mlmeautolevel = DOT11_MLME_EXTENDED;
mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel);
if (mgt_commit(priv)) {
up_write(&priv->mib_sem);
return -EIO;
}
priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR)
? priv->monitor_type : ARPHRD_ETHER;
up_write(&priv->mib_sem);
return 0;
}
/* Use mib cache */
static int
prism54_get_mode(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
BUG_ON((priv->iw_mode < IW_MODE_AUTO) || (priv->iw_mode >
IW_MODE_MONITOR));
*uwrq = priv->iw_mode;
return 0;
}
/* we use DOT11_OID_EDTHRESHOLD. From what I guess the card will not try to
* emit data if (sensitivity > rssi - noise) (in dBm).
* prism54_set_sens does not seem to work.
*/
static int
prism54_set_sens(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
u32 sens;
/* by default the card sets this to 20. */
sens = vwrq->disabled ? 20 : vwrq->value;
return mgt_set_request(priv, DOT11_OID_EDTHRESHOLD, 0, &sens);
}
static int
prism54_get_sens(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_EDTHRESHOLD, 0, NULL, &r);
vwrq->value = r.u;
vwrq->disabled = (vwrq->value == 0);
vwrq->fixed = 1;
return rvalue;
}
static int
prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
struct iw_range *range = (struct iw_range *) extra;
islpci_private *priv = netdev_priv(ndev);
u8 *data;
int i, m, rvalue;
struct obj_frequencies *freq;
union oid_res_t r;
memset(range, 0, sizeof (struct iw_range));
dwrq->length = sizeof (struct iw_range);
/* set the wireless extension version number */
range->we_version_source = SUPPORTED_WIRELESS_EXT;
range->we_version_compiled = WIRELESS_EXT;
/* Now the encoding capabilities */
range->num_encoding_sizes = 3;
/* 64(40) bits WEP */
range->encoding_size[0] = 5;
/* 128(104) bits WEP */
range->encoding_size[1] = 13;
/* 256 bits for WPA-PSK */
range->encoding_size[2] = 32;
/* 4 keys are allowed */
range->max_encoding_tokens = 4;
/* we don't know the quality range... */
range->max_qual.level = 0;
range->max_qual.noise = 0;
range->max_qual.qual = 0;
/* these value describe an average quality. Needs more tweaking... */
range->avg_qual.level = -80; /* -80 dBm */
range->avg_qual.noise = 0; /* don't know what to put here */
range->avg_qual.qual = 0;
range->sensitivity = 200;
/* retry limit capabilities */
range->retry_capa = IW_RETRY_LIMIT | IW_RETRY_LIFETIME;
range->retry_flags = IW_RETRY_LIMIT;
range->r_time_flags = IW_RETRY_LIFETIME;
/* I don't know the range. Put stupid things here */
range->min_retry = 1;
range->max_retry = 65535;
range->min_r_time = 1024;
range->max_r_time = 65535 * 1024;
/* txpower is supported in dBm's */
range->txpower_capa = IW_TXPOW_DBM;
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
IW_EVENT_CAPA_MASK(SIOCGIWAP));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
/* Request the device for the supported frequencies
* not really relevant since some devices will report the 5 GHz band
* frequencies even if they don't support them.
*/
rvalue =
mgt_get_request(priv, DOT11_OID_SUPPORTEDFREQUENCIES, 0, NULL, &r);
freq = r.ptr;
range->num_channels = freq->nr;
range->num_frequency = freq->nr;
m = min(IW_MAX_FREQUENCIES, (int) freq->nr);
for (i = 0; i < m; i++) {
range->freq[i].m = freq->mhz[i];
range->freq[i].e = 6;
range->freq[i].i = channel_of_freq(freq->mhz[i]);
}
kfree(freq);
rvalue |= mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r);
data = r.ptr;
/* We got an array of char. It is NULL terminated. */
i = 0;
while ((i < IW_MAX_BITRATES) && (*data != 0)) {
/* the result must be in bps. The card gives us 500Kbps */
range->bitrate[i] = *data * 500000;
i++;
data++;
}
range->num_bitrates = i;
kfree(r.ptr);
return rvalue;
}
/* Set AP address*/
static int
prism54_set_wap(struct net_device *ndev, struct iw_request_info *info,
struct sockaddr *awrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
char bssid[6];
int rvalue;
if (awrq->sa_family != ARPHRD_ETHER)
return -EINVAL;
/* prepare the structure for the set object */
memcpy(&bssid[0], awrq->sa_data, 6);
/* set the bssid -- does this make sense when in AP mode? */
rvalue = mgt_set_request(priv, DOT11_OID_BSSID, 0, &bssid);
return (rvalue ? rvalue : -EINPROGRESS); /* Call commit handler */
}
/* get AP address*/
static int
prism54_get_wap(struct net_device *ndev, struct iw_request_info *info,
struct sockaddr *awrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_BSSID, 0, NULL, &r);
memcpy(awrq->sa_data, r.ptr, 6);
awrq->sa_family = ARPHRD_ETHER;
kfree(r.ptr);
return rvalue;
}
static int
prism54_set_scan(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
/* hehe the device does this automagicaly */
return 0;
}
/* a little helper that will translate our data into a card independent
* format that the Wireless Tools will understand. This was inspired by
* the "Aironet driver for 4500 and 4800 series cards" (GPL)
*/
static char *
prism54_translate_bss(struct net_device *ndev, struct iw_request_info *info,
char *current_ev, char *end_buf, struct obj_bss *bss,
char noise)
{
struct iw_event iwe; /* Temporary buffer */
short cap;
islpci_private *priv = netdev_priv(ndev);
u8 wpa_ie[MAX_WPA_IE_LEN];
size_t wpa_ie_len;
/* The first entry must be the MAC address */
memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
iwe.cmd = SIOCGIWAP;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_ADDR_LEN);
/* The following entries will be displayed in the same order we give them */
/* The ESSID. */
iwe.u.data.length = bss->ssid.length;
iwe.u.data.flags = 1;
iwe.cmd = SIOCGIWESSID;
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, bss->ssid.octets);
/* Capabilities */
#define CAP_ESS 0x01
#define CAP_IBSS 0x02
#define CAP_CRYPT 0x10
/* Mode */
cap = bss->capinfo;
iwe.u.mode = 0;
if (cap & CAP_ESS)
iwe.u.mode = IW_MODE_MASTER;
else if (cap & CAP_IBSS)
iwe.u.mode = IW_MODE_ADHOC;
iwe.cmd = SIOCGIWMODE;
if (iwe.u.mode)
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_UINT_LEN);
/* Encryption capability */
if (cap & CAP_CRYPT)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
iwe.cmd = SIOCGIWENCODE;
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, NULL);
/* Add frequency. (short) bss->channel is the frequency in MHz */
iwe.u.freq.m = bss->channel;
iwe.u.freq.e = 6;
iwe.cmd = SIOCGIWFREQ;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_FREQ_LEN);
/* Add quality statistics */
iwe.u.qual.level = bss->rssi;
iwe.u.qual.noise = noise;
/* do a simple SNR for quality */
iwe.u.qual.qual = bss->rssi - noise;
iwe.cmd = IWEVQUAL;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_QUAL_LEN);
/* Add WPA/RSN Information Element, if any */
wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
if (wpa_ie_len > 0) {
iwe.cmd = IWEVGENIE;
iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
current_ev = iwe_stream_add_point(info, current_ev, end_buf,
&iwe, wpa_ie);
}
/* Do the bitrates */
{
char *current_val = current_ev + iwe_stream_lcp_len(info);
int i;
int mask;
iwe.cmd = SIOCGIWRATE;
/* Those two flags are ignored... */
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
/* Parse the bitmask */
mask = 0x1;
for(i = 0; i < sizeof(scan_rate_list); i++) {
if(bss->rates & mask) {
iwe.u.bitrate.value = (scan_rate_list[i] * 500000);
current_val = iwe_stream_add_value(
info, current_ev, current_val,
end_buf, &iwe, IW_EV_PARAM_LEN);
}
mask <<= 1;
}
/* Check if we added any event */
if ((current_val - current_ev) > iwe_stream_lcp_len(info))
current_ev = current_val;
}
return current_ev;
}
static int
prism54_get_scan(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
int i, rvalue;
struct obj_bsslist *bsslist;
u32 noise = 0;
char *current_ev = extra;
union oid_res_t r;
if (islpci_get_state(priv) < PRV_STATE_INIT) {
/* device is not ready, fail gently */
dwrq->length = 0;
return 0;
}
/* first get the noise value. We will use it to report the link quality */
rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
noise = r.u;
/* Ask the device for a list of known bss.
* The old API, using SIOCGIWAPLIST, had a hard limit of IW_MAX_AP=64.
* The new API, using SIOCGIWSCAN, is only limited by the buffer size.
* WE-14->WE-16, the buffer is limited to IW_SCAN_MAX_DATA bytes.
* Starting with WE-17, the buffer can be as big as needed.
* But the device won't repport anything if you change the value
* of IWMAX_BSS=24. */
rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
bsslist = r.ptr;
/* ok now, scan the list and translate its info */
for (i = 0; i < (int) bsslist->nr; i++) {
current_ev = prism54_translate_bss(ndev, info, current_ev,
extra + dwrq->length,
&(bsslist->bsslist[i]),
noise);
/* Check if there is space for one more entry */
if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
/* Ask user space to try again with a bigger buffer */
rvalue = -E2BIG;
break;
}
}
kfree(bsslist);
dwrq->length = (current_ev - extra);
dwrq->flags = 0; /* todo */
return rvalue;
}
static int
prism54_set_essid(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct obj_ssid essid;
memset(essid.octets, 0, 33);
/* Check if we were asked for `any' */
if (dwrq->flags && dwrq->length) {
if (dwrq->length > 32)
return -E2BIG;
essid.length = dwrq->length;
memcpy(essid.octets, extra, dwrq->length);
} else
essid.length = 0;
if (priv->iw_mode != IW_MODE_MONITOR)
return mgt_set_request(priv, DOT11_OID_SSID, 0, &essid);
/* If in monitor mode, just save to mib */
mgt_set(priv, DOT11_OID_SSID, &essid);
return 0;
}
static int
prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct obj_ssid *essid;
union oid_res_t r;
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_SSID, 0, NULL, &r);
essid = r.ptr;
if (essid->length) {
dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
/* if it is too big, trunk it */
dwrq->length = min((u8)IW_ESSID_MAX_SIZE, essid->length);
} else {
dwrq->flags = 0;
dwrq->length = 0;
}
essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
return rvalue;
}
/* Provides no functionality, just completes the ioctl. In essence this is a
* just a cosmetic ioctl.
*/
static int
prism54_set_nick(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
if (dwrq->length > IW_ESSID_MAX_SIZE)
return -E2BIG;
down_write(&priv->mib_sem);
memset(priv->nickname, 0, sizeof (priv->nickname));
memcpy(priv->nickname, extra, dwrq->length);
up_write(&priv->mib_sem);
return 0;
}
static int
prism54_get_nick(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
dwrq->length = 0;
down_read(&priv->mib_sem);
dwrq->length = strlen(priv->nickname);
memcpy(extra, priv->nickname, dwrq->length);
up_read(&priv->mib_sem);
return 0;
}
/* Set the allowed Bitrates */
static int
prism54_set_rate(struct net_device *ndev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
u32 rate, profile;
char *data;
int ret, i;
union oid_res_t r;
if (vwrq->value == -1) {
/* auto mode. No limit. */
profile = 1;
return mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile);
}
ret = mgt_get_request(priv, DOT11_OID_SUPPORTEDRATES, 0, NULL, &r);
if (ret) {
kfree(r.ptr);
return ret;
}
rate = (u32) (vwrq->value / 500000);
data = r.ptr;
i = 0;
while (data[i]) {
if (rate && (data[i] == rate)) {
break;
}
if (vwrq->value == i) {
break;
}
data[i] |= 0x80;
i++;
}
if (!data[i]) {
kfree(r.ptr);
return -EINVAL;
}
data[i] |= 0x80;
data[i + 1] = 0;
/* Now, check if we want a fixed or auto value */
if (vwrq->fixed) {
data[0] = data[i];
data[1] = 0;
}
/*
i = 0;
printk("prism54 rate: ");
while(data[i]) {
printk("%u ", data[i]);
i++;
}
printk("0\n");
*/
profile = -1;
ret = mgt_set_request(priv, DOT11_OID_PROFILES, 0, &profile);
ret |= mgt_set_request(priv, DOT11_OID_EXTENDEDRATES, 0, data);
ret |= mgt_set_request(priv, DOT11_OID_RATES, 0, data);
kfree(r.ptr);
return ret;
}
/* Get the current bit rate */
static int
prism54_get_rate(struct net_device *ndev,
struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
int rvalue;
char *data;
union oid_res_t r;
/* Get the current bit rate */
if ((rvalue = mgt_get_request(priv, GEN_OID_LINKSTATE, 0, NULL, &r)))
return rvalue;
vwrq->value = r.u * 500000;
/* request the device for the enabled rates */
rvalue = mgt_get_request(priv, DOT11_OID_RATES, 0, NULL, &r);
if (rvalue) {
kfree(r.ptr);
return rvalue;
}
data = r.ptr;
vwrq->fixed = (data[0] != 0) && (data[1] == 0);
kfree(r.ptr);
return 0;
}
static int
prism54_set_rts(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
return mgt_set_request(priv, DOT11_OID_RTSTHRESH, 0, &vwrq->value);
}
static int
prism54_get_rts(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue;
/* get the rts threshold */
rvalue = mgt_get_request(priv, DOT11_OID_RTSTHRESH, 0, NULL, &r);
vwrq->value = r.u;
return rvalue;
}
static int
prism54_set_frag(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
return mgt_set_request(priv, DOT11_OID_FRAGTHRESH, 0, &vwrq->value);
}
static int
prism54_get_frag(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue;
rvalue = mgt_get_request(priv, DOT11_OID_FRAGTHRESH, 0, NULL, &r);
vwrq->value = r.u;
return rvalue;
}
/* Here we have (min,max) = max retries for (small frames, big frames). Where
* big frame <=> bigger than the rts threshold
* small frame <=> smaller than the rts threshold
* This is not really the behavior expected by the wireless tool but it seems
* to be a common behavior in other drivers.
*/
static int
prism54_set_retry(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
u32 slimit = 0, llimit = 0; /* short and long limit */
u32 lifetime = 0;
int rvalue = 0;
if (vwrq->disabled)
/* we cannot disable this feature */
return -EINVAL;
if (vwrq->flags & IW_RETRY_LIMIT) {
if (vwrq->flags & IW_RETRY_SHORT)
slimit = vwrq->value;
else if (vwrq->flags & IW_RETRY_LONG)
llimit = vwrq->value;
else {
/* we are asked to set both */
slimit = vwrq->value;
llimit = vwrq->value;
}
}
if (vwrq->flags & IW_RETRY_LIFETIME)
/* Wireless tools use us unit while the device uses 1024 us unit */
lifetime = vwrq->value / 1024;
/* now set what is requested */
if (slimit)
rvalue =
mgt_set_request(priv, DOT11_OID_SHORTRETRIES, 0, &slimit);
if (llimit)
rvalue |=
mgt_set_request(priv, DOT11_OID_LONGRETRIES, 0, &llimit);
if (lifetime)
rvalue |=
mgt_set_request(priv, DOT11_OID_MAXTXLIFETIME, 0,
&lifetime);
return rvalue;
}
static int
prism54_get_retry(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue = 0;
vwrq->disabled = 0; /* It cannot be disabled */
if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
/* we are asked for the life time */
rvalue =
mgt_get_request(priv, DOT11_OID_MAXTXLIFETIME, 0, NULL, &r);
vwrq->value = r.u * 1024;
vwrq->flags = IW_RETRY_LIFETIME;
} else if ((vwrq->flags & IW_RETRY_LONG)) {
/* we are asked for the long retry limit */
rvalue |=
mgt_get_request(priv, DOT11_OID_LONGRETRIES, 0, NULL, &r);
vwrq->value = r.u;
vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
} else {
/* default. get the short retry limit */
rvalue |=
mgt_get_request(priv, DOT11_OID_SHORTRETRIES, 0, NULL, &r);
vwrq->value = r.u;
vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
}
return rvalue;
}
static int
prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
int rvalue = 0, force = 0;
int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
union oid_res_t r;
/* with the new API, it's impossible to get a NULL pointer.
* New version of iwconfig set the IW_ENCODE_NOKEY flag
* when no key is given, but older versions don't. */
if (dwrq->length > 0) {
/* we have a key to set */
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
int current_index;
struct obj_key key = { DOT11_PRIV_WEP, 0, "" };
/* get the current key index */
rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
current_index = r.u;
/* Verify that the key is not marked as invalid */
if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
if (dwrq->length > KEY_SIZE_TKIP) {
/* User-provided key data too big */
return -EINVAL;
}
if (dwrq->length > KEY_SIZE_WEP104) {
/* WPA-PSK TKIP */
key.type = DOT11_PRIV_TKIP;
key.length = KEY_SIZE_TKIP;
} else if (dwrq->length > KEY_SIZE_WEP40) {
/* WEP 104/128 */
key.length = KEY_SIZE_WEP104;
} else {
/* WEP 40/64 */
key.length = KEY_SIZE_WEP40;
}
memset(key.key, 0, sizeof (key.key));
memcpy(key.key, extra, dwrq->length);
if ((index < 0) || (index > 3))
/* no index provided use the current one */
index = current_index;
/* now send the key to the card */
rvalue |=
mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
&key);
}
/*
* If a valid key is set, encryption should be enabled
* (user may turn it off later).
* This is also how "iwconfig ethX key on" works
*/
if ((index == current_index) && (key.length > 0))
force = 1;
} else {
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
if ((index >= 0) && (index <= 3)) {
/* we want to set the key index */
rvalue |=
mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
&index);
} else {
if (!(dwrq->flags & IW_ENCODE_MODE)) {
/* we cannot do anything. Complain. */
return -EINVAL;
}
}
}
/* now read the flags */
if (dwrq->flags & IW_ENCODE_DISABLED) {
/* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
}
if (dwrq->flags & IW_ENCODE_OPEN)
/* Encode but accept non-encoded packets. No auth */
invoke = 1;
if ((dwrq->flags & IW_ENCODE_RESTRICTED) || force) {
/* Refuse non-encoded packets. Auth */
authen = DOT11_AUTH_BOTH;
invoke = 1;
exunencrypt = 1;
}
/* do the change if requested */
if ((dwrq->flags & IW_ENCODE_MODE) || force) {
rvalue |=
mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
rvalue |=
mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
rvalue |=
mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
&exunencrypt);
}
return rvalue;
}
static int
prism54_get_encode(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct obj_key *key;
u32 devindex, index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
u32 authen = 0, invoke = 0, exunencrypt = 0;
int rvalue;
union oid_res_t r;
/* first get the flags */
rvalue = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
authen = r.u;
rvalue |= mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
invoke = r.u;
rvalue |= mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
exunencrypt = r.u;
if (invoke && (authen == DOT11_AUTH_BOTH) && exunencrypt)
dwrq->flags = IW_ENCODE_RESTRICTED;
else if ((authen == DOT11_AUTH_OS) && !exunencrypt) {
if (invoke)
dwrq->flags = IW_ENCODE_OPEN;
else
dwrq->flags = IW_ENCODE_DISABLED;
} else
/* The card should not work in this state */
dwrq->flags = 0;
/* get the current device key index */
rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
devindex = r.u;
/* Now get the key, return it */
if (index == -1 || index > 3)
/* no index provided, use the current one */
index = devindex;
rvalue |= mgt_get_request(priv, DOT11_OID_DEFKEYX, index, NULL, &r);
key = r.ptr;
dwrq->length = key->length;
memcpy(extra, key->key, dwrq->length);
kfree(key);
/* return the used key index */
dwrq->flags |= devindex + 1;
return rvalue;
}
static int
prism54_get_txpower(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
union oid_res_t r;
int rvalue;
rvalue = mgt_get_request(priv, OID_INL_OUTPUTPOWER, 0, NULL, &r);
/* intersil firmware operates in 0.25 dBm (1/4 dBm) */
vwrq->value = (s32) r.u / 4;
vwrq->fixed = 1;
/* radio is not turned of
* btw: how is possible to turn off only the radio
*/
vwrq->disabled = 0;
return rvalue;
}
static int
prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
s32 u = vwrq->value;
/* intersil firmware operates in 0.25 dBm (1/4) */
u *= 4;
if (vwrq->disabled) {
/* don't know how to disable radio */
printk(KERN_DEBUG
"%s: %s() disabling radio is not yet supported.\n",
priv->ndev->name, __func__);
return -ENOTSUPP;
} else if (vwrq->fixed)
/* currently only fixed value is supported */
return mgt_set_request(priv, OID_INL_OUTPUTPOWER, 0, &u);
else {
printk(KERN_DEBUG
"%s: %s() auto power will be implemented later.\n",
priv->ndev->name, __func__);
return -ENOTSUPP;
}
}
static int prism54_set_genie(struct net_device *ndev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
int alen, ret = 0;
struct obj_attachment *attach;
if (data->length > MAX_WPA_IE_LEN ||
(data->length && extra == NULL))
return -EINVAL;
memcpy(priv->wpa_ie, extra, data->length);
priv->wpa_ie_len = data->length;
alen = sizeof(*attach) + priv->wpa_ie_len;
attach = kzalloc(alen, GFP_KERNEL);
if (attach == NULL)
return -ENOMEM;
#define WLAN_FC_TYPE_MGMT 0
#define WLAN_FC_STYPE_ASSOC_REQ 0
#define WLAN_FC_STYPE_REASSOC_REQ 2
/* Note: endianness is covered by mgt_set_varlen */
attach->type = (WLAN_FC_TYPE_MGMT << 2) |
(WLAN_FC_STYPE_ASSOC_REQ << 4);
attach->id = -1;
attach->size = priv->wpa_ie_len;
memcpy(attach->data, extra, priv->wpa_ie_len);
ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
priv->wpa_ie_len);
if (ret == 0) {
attach->type = (WLAN_FC_TYPE_MGMT << 2) |
(WLAN_FC_STYPE_REASSOC_REQ << 4);
ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
priv->wpa_ie_len);
if (ret == 0)
printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
ndev->name);
}
kfree(attach);
return ret;
}
static int prism54_get_genie(struct net_device *ndev,
struct iw_request_info *info,
struct iw_point *data, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
int len = priv->wpa_ie_len;
if (len <= 0) {
data->length = 0;
return 0;
}
if (data->length < len)
return -E2BIG;
data->length = len;
memcpy(extra, priv->wpa_ie, len);
return 0;
}
static int prism54_set_auth(struct net_device *ndev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct iw_param *param = &wrqu->param;
u32 mlmelevel = 0, authen = 0, dot1x = 0;
u32 exunencrypt = 0, privinvoked = 0, wpa = 0;
u32 old_wpa;
int ret = 0;
union oid_res_t r;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
/* first get the flags */
down_write(&priv->mib_sem);
wpa = old_wpa = priv->wpa;
up_write(&priv->mib_sem);
ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
authen = r.u;
ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
privinvoked = r.u;
ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
exunencrypt = r.u;
ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
dot1x = r.u;
ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r);
mlmelevel = r.u;
if (ret < 0)
goto out;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
break;
case IW_AUTH_WPA_ENABLED:
/* Do the same thing as IW_AUTH_WPA_VERSION */
if (param->value) {
wpa = 1;
privinvoked = 1; /* For privacy invoked */
exunencrypt = 1; /* Filter out all unencrypted frames */
dot1x = 0x01; /* To enable eap filter */
mlmelevel = DOT11_MLME_EXTENDED;
authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
} else {
wpa = 0;
privinvoked = 0;
exunencrypt = 0; /* Do not filter un-encrypted data */
dot1x = 0;
mlmelevel = DOT11_MLME_AUTO;
}
break;
case IW_AUTH_WPA_VERSION:
if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
wpa = 0;
privinvoked = 0;
exunencrypt = 0; /* Do not filter un-encrypted data */
dot1x = 0;
mlmelevel = DOT11_MLME_AUTO;
} else {
if (param->value & IW_AUTH_WPA_VERSION_WPA)
wpa = 1;
else if (param->value & IW_AUTH_WPA_VERSION_WPA2)
wpa = 2;
privinvoked = 1; /* For privacy invoked */
exunencrypt = 1; /* Filter out all unencrypted frames */
dot1x = 0x01; /* To enable eap filter */
mlmelevel = DOT11_MLME_EXTENDED;
authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
}
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
/* dot1x should be the opposite of RX_UNENCRYPTED_EAPOL;
* turn off dot1x when allowing receipt of unencrypted EAPOL
* frames, turn on dot1x when receipt should be disallowed
*/
dot1x = param->value ? 0 : 0x01;
break;
case IW_AUTH_PRIVACY_INVOKED:
privinvoked = param->value ? 1 : 0;
break;
case IW_AUTH_DROP_UNENCRYPTED:
exunencrypt = param->value ? 1 : 0;
break;
case IW_AUTH_80211_AUTH_ALG:
if (param->value & IW_AUTH_ALG_SHARED_KEY) {
/* Only WEP uses _SK and _BOTH */
if (wpa > 0) {
ret = -EINVAL;
goto out;
}
authen = DOT11_AUTH_SK;
} else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
authen = DOT11_AUTH_OS;
} else {
ret = -EINVAL;
goto out;
}
break;
default:
return -EOPNOTSUPP;
}
/* Set all the values */
down_write(&priv->mib_sem);
priv->wpa = wpa;
up_write(&priv->mib_sem);
mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked);
mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt);
mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x);
mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel);
out:
return ret;
}
static int prism54_get_auth(struct net_device *ndev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct iw_param *param = &wrqu->param;
u32 wpa = 0;
int ret = 0;
union oid_res_t r;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
/* first get the flags */
down_write(&priv->mib_sem);
wpa = priv->wpa;
up_write(&priv->mib_sem);
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
/*
* wpa_supplicant will control these internally
*/
ret = -EOPNOTSUPP;
break;
case IW_AUTH_WPA_VERSION:
switch (wpa) {
case 1:
param->value = IW_AUTH_WPA_VERSION_WPA;
break;
case 2:
param->value = IW_AUTH_WPA_VERSION_WPA2;
break;
case 0:
default:
param->value = IW_AUTH_WPA_VERSION_DISABLED;
break;
}
break;
case IW_AUTH_DROP_UNENCRYPTED:
ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
if (ret >= 0)
param->value = r.u > 0 ? 1 : 0;
break;
case IW_AUTH_80211_AUTH_ALG:
ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
if (ret >= 0) {
switch (r.u) {
case DOT11_AUTH_OS:
param->value = IW_AUTH_ALG_OPEN_SYSTEM;
break;
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
param->value = IW_AUTH_ALG_SHARED_KEY;
break;
case DOT11_AUTH_NONE:
default:
param->value = 0;
break;
}
}
break;
case IW_AUTH_WPA_ENABLED:
param->value = wpa > 0 ? 1 : 0;
break;
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
if (ret >= 0)
param->value = r.u > 0 ? 1 : 0;
break;
case IW_AUTH_PRIVACY_INVOKED:
ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
if (ret >= 0)
param->value = r.u > 0 ? 1 : 0;
break;
default:
return -EOPNOTSUPP;
}
return ret;
}
static int prism54_set_encodeext(struct net_device *ndev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, alg = ext->alg, set_key = 1;
union oid_res_t r;
int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
int ret = 0;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
/* Determine and validate the key index */
idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
if (idx) {
if (idx < 0 || idx > 3)
return -EINVAL;
} else {
ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
if (ret < 0)
goto out;
idx = r.u;
}
if (encoding->flags & IW_ENCODE_DISABLED)
alg = IW_ENCODE_ALG_NONE;
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
/* Only set transmit key index here, actual
* key is set below if needed.
*/
ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx);
set_key = ext->key_len > 0 ? 1 : 0;
}
if (set_key) {
struct obj_key key = { DOT11_PRIV_WEP, 0, "" };
switch (alg) {
case IW_ENCODE_ALG_NONE:
break;
case IW_ENCODE_ALG_WEP:
if (ext->key_len > KEY_SIZE_WEP104) {
ret = -EINVAL;
goto out;
}
if (ext->key_len > KEY_SIZE_WEP40)
key.length = KEY_SIZE_WEP104;
else
key.length = KEY_SIZE_WEP40;
break;
case IW_ENCODE_ALG_TKIP:
if (ext->key_len > KEY_SIZE_TKIP) {
ret = -EINVAL;
goto out;
}
key.type = DOT11_PRIV_TKIP;
key.length = KEY_SIZE_TKIP;
break;
default:
return -EINVAL;
}
if (key.length) {
memset(key.key, 0, sizeof(key.key));
memcpy(key.key, ext->key, ext->key_len);
ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx,
&key);
if (ret < 0)
goto out;
}
}
/* Read the flags */
if (encoding->flags & IW_ENCODE_DISABLED) {
/* Encoding disabled,
* authen = DOT11_AUTH_OS;
* invoke = 0;
* exunencrypt = 0; */
}
if (encoding->flags & IW_ENCODE_OPEN) {
/* Encode but accept non-encoded packets. No auth */
invoke = 1;
}
if (encoding->flags & IW_ENCODE_RESTRICTED) {
/* Refuse non-encoded packets. Auth */
authen = DOT11_AUTH_BOTH;
invoke = 1;
exunencrypt = 1;
}
/* do the change if requested */
if (encoding->flags & IW_ENCODE_MODE) {
ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0,
&authen);
ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0,
&invoke);
ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
&exunencrypt);
}
out:
return ret;
}
static int prism54_get_encodeext(struct net_device *ndev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, max_key_len;
union oid_res_t r;
int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0;
int ret = 0;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
/* first get the flags */
ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
authen = r.u;
ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
invoke = r.u;
ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
exunencrypt = r.u;
if (ret < 0)
goto out;
max_key_len = encoding->length - sizeof(*ext);
if (max_key_len < 0)
return -EINVAL;
idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
if (idx) {
if (idx < 0 || idx > 3)
return -EINVAL;
} else {
ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
if (ret < 0)
goto out;
idx = r.u;
}
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
switch (authen) {
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
case DOT11_AUTH_OS:
default:
wrqu->encoding.flags |= IW_ENCODE_OPEN;
break;
}
down_write(&priv->mib_sem);
wpa = priv->wpa;
up_write(&priv->mib_sem);
if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) {
/* No encryption */
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
wrqu->encoding.flags |= IW_ENCODE_DISABLED;
} else {
struct obj_key *key;
ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r);
if (ret < 0)
goto out;
key = r.ptr;
if (max_key_len < key->length) {
ret = -E2BIG;
goto out;
}
memcpy(ext->key, key->key, key->length);
ext->key_len = key->length;
switch (key->type) {
case DOT11_PRIV_TKIP:
ext->alg = IW_ENCODE_ALG_TKIP;
break;
default:
case DOT11_PRIV_WEP:
ext->alg = IW_ENCODE_ALG_WEP;
break;
}
wrqu->encoding.flags |= IW_ENCODE_ENABLED;
}
out:
return ret;
}
static int
prism54_reset(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_reset(netdev_priv(ndev), 0);
return 0;
}
static int
prism54_get_oid(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
union oid_res_t r;
int rvalue;
enum oid_num_t n = dwrq->flags;
rvalue = mgt_get_request(netdev_priv(ndev), n, 0, NULL, &r);
dwrq->length = mgt_response_to_str(n, &r, extra);
if ((isl_oid[n].flags & OID_FLAG_TYPE) != OID_TYPE_U32)
kfree(r.ptr);
return rvalue;
}
static int
prism54_set_u32(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
u32 oid = uwrq[0], u = uwrq[1];
return mgt_set_request(netdev_priv(ndev), oid, 0, &u);
}
static int
prism54_set_raw(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
u32 oid = dwrq->flags;
return mgt_set_request(netdev_priv(ndev), oid, 0, extra);
}
void
prism54_acl_init(struct islpci_acl *acl)
{
mutex_init(&acl->lock);
INIT_LIST_HEAD(&acl->mac_list);
acl->size = 0;
acl->policy = MAC_POLICY_OPEN;
}
static void
prism54_clear_mac(struct islpci_acl *acl)
{
struct list_head *ptr, *next;
struct mac_entry *entry;
mutex_lock(&acl->lock);
if (acl->size == 0) {
mutex_unlock(&acl->lock);
return;
}
for (ptr = acl->mac_list.next, next = ptr->next;
ptr != &acl->mac_list; ptr = next, next = ptr->next) {
entry = list_entry(ptr, struct mac_entry, _list);
list_del(ptr);
kfree(entry);
}
acl->size = 0;
mutex_unlock(&acl->lock);
}
void
prism54_acl_clean(struct islpci_acl *acl)
{
prism54_clear_mac(acl);
}
static int
prism54_add_mac(struct net_device *ndev, struct iw_request_info *info,
struct sockaddr *awrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
struct mac_entry *entry;
struct sockaddr *addr = (struct sockaddr *) extra;
if (addr->sa_family != ARPHRD_ETHER)
return -EOPNOTSUPP;
entry = kmalloc(sizeof (struct mac_entry), GFP_KERNEL);
if (entry == NULL)
return -ENOMEM;
memcpy(entry->addr, addr->sa_data, ETH_ALEN);
if (mutex_lock_interruptible(&acl->lock)) {
kfree(entry);
return -ERESTARTSYS;
}
list_add_tail(&entry->_list, &acl->mac_list);
acl->size++;
mutex_unlock(&acl->lock);
return 0;
}
static int
prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
struct sockaddr *awrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
struct mac_entry *entry;
struct sockaddr *addr = (struct sockaddr *) extra;
if (addr->sa_family != ARPHRD_ETHER)
return -EOPNOTSUPP;
if (mutex_lock_interruptible(&acl->lock))
return -ERESTARTSYS;
list_for_each_entry(entry, &acl->mac_list, _list) {
if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
list_del(&entry->_list);
acl->size--;
kfree(entry);
mutex_unlock(&acl->lock);
return 0;
}
}
mutex_unlock(&acl->lock);
return -EINVAL;
}
static int
prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
struct mac_entry *entry;
struct sockaddr *dst = (struct sockaddr *) extra;
dwrq->length = 0;
if (mutex_lock_interruptible(&acl->lock))
return -ERESTARTSYS;
list_for_each_entry(entry, &acl->mac_list, _list) {
memcpy(dst->sa_data, entry->addr, ETH_ALEN);
dst->sa_family = ARPHRD_ETHER;
dwrq->length++;
dst++;
}
mutex_unlock(&acl->lock);
return 0;
}
/* Setting policy also clears the MAC acl, even if we don't change the default
* policy
*/
static int
prism54_set_policy(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
u32 mlmeautolevel;
prism54_clear_mac(acl);
if ((*uwrq < MAC_POLICY_OPEN) || (*uwrq > MAC_POLICY_REJECT))
return -EINVAL;
down_write(&priv->mib_sem);
acl->policy = *uwrq;
/* the ACL code needs an intermediate mlmeautolevel */
if ((priv->iw_mode == IW_MODE_MASTER) &&
(acl->policy != MAC_POLICY_OPEN))
mlmeautolevel = DOT11_MLME_INTERMEDIATE;
else
mlmeautolevel = CARD_DEFAULT_MLME_MODE;
if (priv->wpa)
mlmeautolevel = DOT11_MLME_EXTENDED;
mgt_set(priv, DOT11_OID_MLMEAUTOLEVEL, &mlmeautolevel);
/* restart the card with our new policy */
if (mgt_commit(priv)) {
up_write(&priv->mib_sem);
return -EIO;
}
up_write(&priv->mib_sem);
return 0;
}
static int
prism54_get_policy(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_acl *acl = &priv->acl;
*uwrq = acl->policy;
return 0;
}
/* Return 1 only if client should be accepted. */
static int
prism54_mac_accept(struct islpci_acl *acl, char *mac)
{
struct mac_entry *entry;
int res = 0;
if (mutex_lock_interruptible(&acl->lock))
return -ERESTARTSYS;
if (acl->policy == MAC_POLICY_OPEN) {
mutex_unlock(&acl->lock);
return 1;
}
list_for_each_entry(entry, &acl->mac_list, _list) {
if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
res = 1;
break;
}
}
res = (acl->policy == MAC_POLICY_ACCEPT) ? !res : res;
mutex_unlock(&acl->lock);
return res;
}
static int
prism54_kick_all(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
struct obj_mlme *mlme;
int rvalue;
mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL);
if (mlme == NULL)
return -ENOMEM;
/* Tell the card to kick every client */
mlme->id = 0;
rvalue =
mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme);
kfree(mlme);
return rvalue;
}
static int
prism54_kick_mac(struct net_device *ndev, struct iw_request_info *info,
struct sockaddr *awrq, char *extra)
{
struct obj_mlme *mlme;
struct sockaddr *addr = (struct sockaddr *) extra;
int rvalue;
if (addr->sa_family != ARPHRD_ETHER)
return -EOPNOTSUPP;
mlme = kmalloc(sizeof (struct obj_mlme), GFP_KERNEL);
if (mlme == NULL)
return -ENOMEM;
/* Tell the card to only kick the corresponding bastard */
memcpy(mlme->address, addr->sa_data, ETH_ALEN);
mlme->id = -1;
rvalue =
mgt_set_request(netdev_priv(ndev), DOT11_OID_DISASSOCIATE, 0, mlme);
kfree(mlme);
return rvalue;
}
/* Translate a TRAP oid into a wireless event. Called in islpci_mgt_receive. */
static void
format_event(islpci_private *priv, char *dest, const char *str,
const struct obj_mlme *mlme, u16 *length, int error)
{
int n = snprintf(dest, IW_CUSTOM_MAX,
"%s %s %pM %s (%2.2X)",
str,
((priv->iw_mode == IW_MODE_MASTER) ? "from" : "to"),
mlme->address,
(error ? (mlme->code ? " : REJECTED " : " : ACCEPTED ")
: ""), mlme->code);
BUG_ON(n > IW_CUSTOM_MAX);
*length = n;
}
static void
send_formatted_event(islpci_private *priv, const char *str,
const struct obj_mlme *mlme, int error)
{
union iwreq_data wrqu;
char *memptr;
memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL);
if (!memptr)
return;
wrqu.data.pointer = memptr;
wrqu.data.length = 0;
format_event(priv, memptr, str, mlme, &wrqu.data.length,
error);
wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr);
kfree(memptr);
}
static void
send_simple_event(islpci_private *priv, const char *str)
{
union iwreq_data wrqu;
char *memptr;
int n = strlen(str);
memptr = kmalloc(IW_CUSTOM_MAX, GFP_KERNEL);
if (!memptr)
return;
BUG_ON(n >= IW_CUSTOM_MAX);
wrqu.data.pointer = memptr;
wrqu.data.length = n;
strcpy(memptr, str);
wireless_send_event(priv->ndev, IWEVCUSTOM, &wrqu, memptr);
kfree(memptr);
}
static void
link_changed(struct net_device *ndev, u32 bitrate)
{
islpci_private *priv = netdev_priv(ndev);
if (bitrate) {
netif_carrier_on(ndev);
if (priv->iw_mode == IW_MODE_INFRA) {
union iwreq_data uwrq;
prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq,
NULL);
wireless_send_event(ndev, SIOCGIWAP, &uwrq, NULL);
} else
send_simple_event(netdev_priv(ndev),
"Link established");
} else {
netif_carrier_off(ndev);
send_simple_event(netdev_priv(ndev), "Link lost");
}
}
/* Beacon/ProbeResp payload header */
struct ieee80211_beacon_phdr {
u8 timestamp[8];
u16 beacon_int;
u16 capab_info;
} __packed;
#define WLAN_EID_GENERIC 0xdd
static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
static void
prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
u8 *wpa_ie, size_t wpa_ie_len)
{
struct list_head *ptr;
struct islpci_bss_wpa_ie *bss = NULL;
if (wpa_ie_len > MAX_WPA_IE_LEN)
wpa_ie_len = MAX_WPA_IE_LEN;
mutex_lock(&priv->wpa_lock);
/* try to use existing entry */
list_for_each(ptr, &priv->bss_wpa_list) {
bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0) {
list_move(&bss->list, &priv->bss_wpa_list);
break;
}
bss = NULL;
}
if (bss == NULL) {
/* add a new BSS entry; if max number of entries is already
* reached, replace the least recently updated */
if (priv->num_bss_wpa >= MAX_BSS_WPA_IE_COUNT) {
bss = list_entry(priv->bss_wpa_list.prev,
struct islpci_bss_wpa_ie, list);
list_del(&bss->list);
} else {
bss = kzalloc(sizeof (*bss), GFP_ATOMIC);
if (bss != NULL)
priv->num_bss_wpa++;
}
if (bss != NULL) {
memcpy(bss->bssid, bssid, ETH_ALEN);
list_add(&bss->list, &priv->bss_wpa_list);
}
}
if (bss != NULL) {
memcpy(bss->wpa_ie, wpa_ie, wpa_ie_len);
bss->wpa_ie_len = wpa_ie_len;
bss->last_update = jiffies;
} else {
printk(KERN_DEBUG "Failed to add BSS WPA entry for "
"%pM\n", bssid);
}
/* expire old entries from WPA list */
while (priv->num_bss_wpa > 0) {
bss = list_entry(priv->bss_wpa_list.prev,
struct islpci_bss_wpa_ie, list);
if (!time_after(jiffies, bss->last_update + 60 * HZ))
break;
list_del(&bss->list);
priv->num_bss_wpa--;
kfree(bss);
}
mutex_unlock(&priv->wpa_lock);
}
static size_t
prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
{
struct list_head *ptr;
struct islpci_bss_wpa_ie *bss = NULL;
size_t len = 0;
mutex_lock(&priv->wpa_lock);
list_for_each(ptr, &priv->bss_wpa_list) {
bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0)
break;
bss = NULL;
}
if (bss) {
len = bss->wpa_ie_len;
memcpy(wpa_ie, bss->wpa_ie, len);
}
mutex_unlock(&priv->wpa_lock);
return len;
}
void
prism54_wpa_bss_ie_init(islpci_private *priv)
{
INIT_LIST_HEAD(&priv->bss_wpa_list);
mutex_init(&priv->wpa_lock);
}
void
prism54_wpa_bss_ie_clean(islpci_private *priv)
{
struct islpci_bss_wpa_ie *bss, *n;
list_for_each_entry_safe(bss, n, &priv->bss_wpa_list, list) {
kfree(bss);
}
}
static void
prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
u8 *payload, size_t len)
{
struct ieee80211_beacon_phdr *hdr;
u8 *pos, *end;
if (!priv->wpa)
return;
hdr = (struct ieee80211_beacon_phdr *) payload;
pos = (u8 *) (hdr + 1);
end = payload + len;
while (pos < end) {
if (pos + 2 + pos[1] > end) {
printk(KERN_DEBUG "Parsing Beacon/ProbeResp failed "
"for %pM\n", addr);
return;
}
if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 &&
memcmp(pos + 2, wpa_oid, 4) == 0) {
prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2);
return;
}
pos += 2 + pos[1];
}
}
static void
handle_request(islpci_private *priv, struct obj_mlme *mlme, enum oid_num_t oid)
{
if (((mlme->state == DOT11_STATE_AUTHING) ||
(mlme->state == DOT11_STATE_ASSOCING))
&& mgt_mlme_answer(priv)) {
/* Someone is requesting auth and we must respond. Just send back
* the trap with error code set accordingly.
*/
mlme->code = prism54_mac_accept(&priv->acl,
mlme->address) ? 0 : 1;
mgt_set_request(priv, oid, 0, mlme);
}
}
static int
prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
char *data)
{
struct obj_mlme *mlme = (struct obj_mlme *) data;
struct obj_mlmeex *mlmeex = (struct obj_mlmeex *) data;
struct obj_mlmeex *confirm;
u8 wpa_ie[MAX_WPA_IE_LEN];
int wpa_ie_len;
size_t len = 0; /* u16, better? */
u8 *payload = NULL, *pos = NULL;
int ret;
/* I think all trapable objects are listed here.
* Some oids have a EX version. The difference is that they are emitted
* in DOT11_MLME_EXTENDED mode (set with DOT11_OID_MLMEAUTOLEVEL)
* with more info.
* The few events already defined by the wireless tools are not really
* suited. We use the more flexible custom event facility.
*/
if (oid >= DOT11_OID_BEACON) {
len = mlmeex->size;
payload = pos = mlmeex->data;
}
/* I fear prism54_process_bss_data won't work with big endian data */
if ((oid == DOT11_OID_BEACON) || (oid == DOT11_OID_PROBE))
prism54_process_bss_data(priv, oid, mlmeex->address,
payload, len);
mgt_le_to_cpu(isl_oid[oid].flags & OID_FLAG_TYPE, (void *) mlme);
switch (oid) {
case GEN_OID_LINKSTATE:
link_changed(priv->ndev, (u32) *data);
break;
case DOT11_OID_MICFAILURE:
send_simple_event(priv, "Mic failure");
break;
case DOT11_OID_DEAUTHENTICATE:
send_formatted_event(priv, "DeAuthenticate request", mlme, 0);
break;
case DOT11_OID_AUTHENTICATE:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Authenticate request", mlme, 1);
break;
case DOT11_OID_DISASSOCIATE:
send_formatted_event(priv, "Disassociate request", mlme, 0);
break;
case DOT11_OID_ASSOCIATE:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Associate request", mlme, 1);
break;
case DOT11_OID_REASSOCIATE:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "ReAssociate request", mlme, 1);
break;
case DOT11_OID_BEACON:
send_formatted_event(priv,
"Received a beacon from an unknown AP",
mlme, 0);
break;
case DOT11_OID_PROBE:
/* we received a probe from a client. */
send_formatted_event(priv, "Received a probe from client", mlme,
0);
break;
/* Note : "mlme" is actually a "struct obj_mlmeex *" here, but this
* is backward compatible layout-wise with "struct obj_mlme".
*/
case DOT11_OID_DEAUTHENTICATEEX:
send_formatted_event(priv, "DeAuthenticate request", mlme, 0);
break;
case DOT11_OID_AUTHENTICATEEX:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Authenticate request (ex)", mlme, 1);
if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_AUTHING)
break;
confirm = kmalloc(sizeof(struct obj_mlmeex) + 6, GFP_ATOMIC);
if (!confirm)
break;
memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
printk(KERN_DEBUG "Authenticate from: address:\t%pM\n",
mlmeex->address);
confirm->id = -1; /* or mlmeex->id ? */
confirm->state = 0; /* not used */
confirm->code = 0;
confirm->size = 6;
confirm->data[0] = 0x00;
confirm->data[1] = 0x00;
confirm->data[2] = 0x02;
confirm->data[3] = 0x00;
confirm->data[4] = 0x00;
confirm->data[5] = 0x00;
ret = mgt_set_varlen(priv, DOT11_OID_ASSOCIATEEX, confirm, 6);
kfree(confirm);
if (ret)
return ret;
break;
case DOT11_OID_DISASSOCIATEEX:
send_formatted_event(priv, "Disassociate request (ex)", mlme, 0);
break;
case DOT11_OID_ASSOCIATEEX:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Associate request (ex)", mlme, 1);
if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_ASSOCING)
break;
confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
if (!confirm)
break;
memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
confirm->id = ((struct obj_mlmeex *)mlme)->id;
confirm->state = 0; /* not used */
confirm->code = 0;
wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
if (!wpa_ie_len) {
printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n",
mlmeex->address);
kfree(confirm);
break;
}
confirm->size = wpa_ie_len;
memcpy(&confirm->data, wpa_ie, wpa_ie_len);
mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
kfree(confirm);
break;
case DOT11_OID_REASSOCIATEEX:
handle_request(priv, mlme, oid);
send_formatted_event(priv, "Reassociate request (ex)", mlme, 1);
if (priv->iw_mode != IW_MODE_MASTER
&& mlmeex->state != DOT11_STATE_ASSOCING)
break;
confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
if (!confirm)
break;
memcpy(&confirm->address, mlmeex->address, ETH_ALEN);
confirm->id = mlmeex->id;
confirm->state = 0; /* not used */
confirm->code = 0;
wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
if (!wpa_ie_len) {
printk(KERN_DEBUG "No WPA IE found from address:\t%pM\n",
mlmeex->address);
kfree(confirm);
break;
}
confirm->size = wpa_ie_len;
memcpy(&confirm->data, wpa_ie, wpa_ie_len);
mgt_set_varlen(priv, oid, confirm, wpa_ie_len);
kfree(confirm);
break;
default:
return -EINVAL;
}
return 0;
}
/*
* Process a device trap. This is called via schedule_work(), outside of
* interrupt context, no locks held.
*/
void
prism54_process_trap(struct work_struct *work)
{
struct islpci_mgmtframe *frame =
container_of(work, struct islpci_mgmtframe, ws);
struct net_device *ndev = frame->ndev;
enum oid_num_t n = mgt_oidtonum(frame->header->oid);
if (n != OID_NUM_LAST)
prism54_process_trap_helper(netdev_priv(ndev), n, frame->data);
islpci_mgt_release(frame);
}
int
prism54_set_mac_address(struct net_device *ndev, void *addr)
{
islpci_private *priv = netdev_priv(ndev);
int ret;
if (ndev->addr_len != 6)
return -EINVAL;
ret = mgt_set_request(priv, GEN_OID_MACADDRESS, 0,
&((struct sockaddr *) addr)->sa_data);
if (!ret)
memcpy(priv->ndev->dev_addr,
&((struct sockaddr *) addr)->sa_data, 6);
return ret;
}
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
static int
prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
u32 mlme, authen, dot1x, filter, wep;
if (islpci_get_state(priv) < PRV_STATE_INIT)
return 0;
wep = 1; /* For privacy invoked */
filter = 1; /* Filter out all unencrypted frames */
dot1x = 0x01; /* To enable eap filter */
mlme = DOT11_MLME_EXTENDED;
authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
down_write(&priv->mib_sem);
priv->wpa = *uwrq;
switch (priv->wpa) {
default:
case 0: /* Clears/disables WPA and friends */
wep = 0;
filter = 0; /* Do not filter un-encrypted data */
dot1x = 0;
mlme = DOT11_MLME_AUTO;
printk("%s: Disabling WPA\n", ndev->name);
break;
case 2:
case 1: /* WPA */
printk("%s: Enabling WPA\n", ndev->name);
break;
}
up_write(&priv->mib_sem);
mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &wep);
mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &filter);
mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x);
mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlme);
return 0;
}
static int
prism54_get_wpa(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
*uwrq = priv->wpa;
return 0;
}
static int
prism54_set_prismhdr(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
priv->monitor_type =
(*uwrq ? ARPHRD_IEEE80211_PRISM : ARPHRD_IEEE80211);
if (priv->iw_mode == IW_MODE_MONITOR)
priv->ndev->type = priv->monitor_type;
return 0;
}
static int
prism54_get_prismhdr(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
*uwrq = (priv->monitor_type == ARPHRD_IEEE80211_PRISM);
return 0;
}
static int
prism54_debug_oid(struct net_device *ndev, struct iw_request_info *info,
__u32 * uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
priv->priv_oid = *uwrq;
printk("%s: oid 0x%08X\n", ndev->name, *uwrq);
return 0;
}
static int
prism54_debug_get_oid(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *data, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_mgmtframe *response;
int ret = -EIO;
printk("%s: get_oid 0x%08X\n", ndev->name, priv->priv_oid);
data->length = 0;
if (islpci_get_state(priv) >= PRV_STATE_INIT) {
ret =
islpci_mgt_transaction(priv->ndev, PIMFOR_OP_GET,
priv->priv_oid, extra, 256,
&response);
printk("%s: ret: %i\n", ndev->name, ret);
if (ret || !response
|| response->header->operation == PIMFOR_OP_ERROR) {
if (response) {
islpci_mgt_release(response);
}
printk("%s: EIO\n", ndev->name);
ret = -EIO;
}
if (!ret) {
data->length = response->header->length;
memcpy(extra, response->data, data->length);
islpci_mgt_release(response);
printk("%s: len: %i\n", ndev->name, data->length);
}
}
return ret;
}
static int
prism54_debug_set_oid(struct net_device *ndev, struct iw_request_info *info,
struct iw_point *data, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
struct islpci_mgmtframe *response;
int ret = 0, response_op = PIMFOR_OP_ERROR;
printk("%s: set_oid 0x%08X\tlen: %d\n", ndev->name, priv->priv_oid,
data->length);
if (islpci_get_state(priv) >= PRV_STATE_INIT) {
ret =
islpci_mgt_transaction(priv->ndev, PIMFOR_OP_SET,
priv->priv_oid, extra, data->length,
&response);
printk("%s: ret: %i\n", ndev->name, ret);
if (ret || !response
|| response->header->operation == PIMFOR_OP_ERROR) {
if (response) {
islpci_mgt_release(response);
}
printk("%s: EIO\n", ndev->name);
ret = -EIO;
}
if (!ret) {
response_op = response->header->operation;
printk("%s: response_op: %i\n", ndev->name,
response_op);
islpci_mgt_release(response);
}
}
return (ret ? ret : -EINPROGRESS);
}
static int
prism54_set_spy(struct net_device *ndev,
struct iw_request_info *info,
union iwreq_data *uwrq, char *extra)
{
islpci_private *priv = netdev_priv(ndev);
u32 u;
enum oid_num_t oid = OID_INL_CONFIG;
down_write(&priv->mib_sem);
mgt_get(priv, OID_INL_CONFIG, &u);
if ((uwrq->data.length == 0) && (priv->spy_data.spy_number > 0))
/* disable spy */
u &= ~INL_CONFIG_RXANNEX;
else if ((uwrq->data.length > 0) && (priv->spy_data.spy_number == 0))
/* enable spy */
u |= INL_CONFIG_RXANNEX;
mgt_set(priv, OID_INL_CONFIG, &u);
mgt_commit_list(priv, &oid, 1);
up_write(&priv->mib_sem);
return iw_handler_set_spy(ndev, info, uwrq, extra);
}
static const iw_handler prism54_handler[] = {
(iw_handler) prism54_commit, /* SIOCSIWCOMMIT */
(iw_handler) prism54_get_name, /* SIOCGIWNAME */
(iw_handler) NULL, /* SIOCSIWNWID */
(iw_handler) NULL, /* SIOCGIWNWID */
(iw_handler) prism54_set_freq, /* SIOCSIWFREQ */
(iw_handler) prism54_get_freq, /* SIOCGIWFREQ */
(iw_handler) prism54_set_mode, /* SIOCSIWMODE */
(iw_handler) prism54_get_mode, /* SIOCGIWMODE */
(iw_handler) prism54_set_sens, /* SIOCSIWSENS */
(iw_handler) prism54_get_sens, /* SIOCGIWSENS */
(iw_handler) NULL, /* SIOCSIWRANGE */
(iw_handler) prism54_get_range, /* SIOCGIWRANGE */
(iw_handler) NULL, /* SIOCSIWPRIV */
(iw_handler) NULL, /* SIOCGIWPRIV */
(iw_handler) NULL, /* SIOCSIWSTATS */
(iw_handler) NULL, /* SIOCGIWSTATS */
prism54_set_spy, /* SIOCSIWSPY */
iw_handler_get_spy, /* SIOCGIWSPY */
iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
(iw_handler) prism54_set_wap, /* SIOCSIWAP */
(iw_handler) prism54_get_wap, /* SIOCGIWAP */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* SIOCGIWAPLIST deprecated */
(iw_handler) prism54_set_scan, /* SIOCSIWSCAN */
(iw_handler) prism54_get_scan, /* SIOCGIWSCAN */
(iw_handler) prism54_set_essid, /* SIOCSIWESSID */
(iw_handler) prism54_get_essid, /* SIOCGIWESSID */
(iw_handler) prism54_set_nick, /* SIOCSIWNICKN */
(iw_handler) prism54_get_nick, /* SIOCGIWNICKN */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) prism54_set_rate, /* SIOCSIWRATE */
(iw_handler) prism54_get_rate, /* SIOCGIWRATE */
(iw_handler) prism54_set_rts, /* SIOCSIWRTS */
(iw_handler) prism54_get_rts, /* SIOCGIWRTS */
(iw_handler) prism54_set_frag, /* SIOCSIWFRAG */
(iw_handler) prism54_get_frag, /* SIOCGIWFRAG */
(iw_handler) prism54_set_txpower, /* SIOCSIWTXPOW */
(iw_handler) prism54_get_txpower, /* SIOCGIWTXPOW */
(iw_handler) prism54_set_retry, /* SIOCSIWRETRY */
(iw_handler) prism54_get_retry, /* SIOCGIWRETRY */
(iw_handler) prism54_set_encode, /* SIOCSIWENCODE */
(iw_handler) prism54_get_encode, /* SIOCGIWENCODE */
(iw_handler) NULL, /* SIOCSIWPOWER */
(iw_handler) NULL, /* SIOCGIWPOWER */
NULL, /* -- hole -- */
NULL, /* -- hole -- */
(iw_handler) prism54_set_genie, /* SIOCSIWGENIE */
(iw_handler) prism54_get_genie, /* SIOCGIWGENIE */
(iw_handler) prism54_set_auth, /* SIOCSIWAUTH */
(iw_handler) prism54_get_auth, /* SIOCGIWAUTH */
(iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */
(iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */
NULL, /* SIOCSIWPMKSA */
};
/* The low order bit identify a SET (0) or a GET (1) ioctl. */
#define PRISM54_RESET SIOCIWFIRSTPRIV
#define PRISM54_GET_POLICY SIOCIWFIRSTPRIV+1
#define PRISM54_SET_POLICY SIOCIWFIRSTPRIV+2
#define PRISM54_GET_MAC SIOCIWFIRSTPRIV+3
#define PRISM54_ADD_MAC SIOCIWFIRSTPRIV+4
#define PRISM54_DEL_MAC SIOCIWFIRSTPRIV+6
#define PRISM54_KICK_MAC SIOCIWFIRSTPRIV+8
#define PRISM54_KICK_ALL SIOCIWFIRSTPRIV+10
#define PRISM54_GET_WPA SIOCIWFIRSTPRIV+11
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
#define PRISM54_DBG_OID SIOCIWFIRSTPRIV+14
#define PRISM54_DBG_GET_OID SIOCIWFIRSTPRIV+15
#define PRISM54_DBG_SET_OID SIOCIWFIRSTPRIV+16
#define PRISM54_GET_OID SIOCIWFIRSTPRIV+17
#define PRISM54_SET_OID_U32 SIOCIWFIRSTPRIV+18
#define PRISM54_SET_OID_STR SIOCIWFIRSTPRIV+20
#define PRISM54_SET_OID_ADDR SIOCIWFIRSTPRIV+22
#define PRISM54_GET_PRISMHDR SIOCIWFIRSTPRIV+23
#define PRISM54_SET_PRISMHDR SIOCIWFIRSTPRIV+24
#define IWPRIV_SET_U32(n,x) { n, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x }
#define IWPRIV_SET_SSID(n,x) { n, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x }
#define IWPRIV_SET_ADDR(n,x) { n, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "s_"x }
#define IWPRIV_GET(n,x) { n, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, "g_"x }
#define IWPRIV_U32(n,x) IWPRIV_SET_U32(n,x), IWPRIV_GET(n,x)
#define IWPRIV_SSID(n,x) IWPRIV_SET_SSID(n,x), IWPRIV_GET(n,x)
#define IWPRIV_ADDR(n,x) IWPRIV_SET_ADDR(n,x), IWPRIV_GET(n,x)
/* Note : limited to 128 private ioctls (wireless tools 26) */
static const struct iw_priv_args prism54_private_args[] = {
/*{ cmd, set_args, get_args, name } */
{PRISM54_RESET, 0, 0, "reset"},
{PRISM54_GET_PRISMHDR, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_prismhdr"},
{PRISM54_SET_PRISMHDR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
"set_prismhdr"},
{PRISM54_GET_POLICY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getPolicy"},
{PRISM54_SET_POLICY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
"setPolicy"},
{PRISM54_GET_MAC, 0, IW_PRIV_TYPE_ADDR | 64, "getMac"},
{PRISM54_ADD_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,
"addMac"},
{PRISM54_DEL_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,
"delMac"},
{PRISM54_KICK_MAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0,
"kickMac"},
{PRISM54_KICK_ALL, 0, 0, "kickAll"},
{PRISM54_GET_WPA, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_wpa"},
{PRISM54_SET_WPA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
"set_wpa"},
{PRISM54_DBG_OID, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0,
"dbg_oid"},
{PRISM54_DBG_GET_OID, 0, IW_PRIV_TYPE_BYTE | 256, "dbg_get_oid"},
{PRISM54_DBG_SET_OID, IW_PRIV_TYPE_BYTE | 256, 0, "dbg_set_oid"},
/* --- sub-ioctls handlers --- */
{PRISM54_GET_OID,
0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | PRIV_STR_SIZE, ""},
{PRISM54_SET_OID_U32,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, ""},
{PRISM54_SET_OID_STR,
IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | 1, 0, ""},
{PRISM54_SET_OID_ADDR,
IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, ""},
/* --- sub-ioctls definitions --- */
IWPRIV_ADDR(GEN_OID_MACADDRESS, "addr"),
IWPRIV_GET(GEN_OID_LINKSTATE, "linkstate"),
IWPRIV_U32(DOT11_OID_BSSTYPE, "bsstype"),
IWPRIV_ADDR(DOT11_OID_BSSID, "bssid"),
IWPRIV_U32(DOT11_OID_STATE, "state"),
IWPRIV_U32(DOT11_OID_AID, "aid"),
IWPRIV_SSID(DOT11_OID_SSIDOVERRIDE, "ssidoverride"),
IWPRIV_U32(DOT11_OID_MEDIUMLIMIT, "medlimit"),
IWPRIV_U32(DOT11_OID_BEACONPERIOD, "beacon"),
IWPRIV_U32(DOT11_OID_DTIMPERIOD, "dtimperiod"),
IWPRIV_U32(DOT11_OID_AUTHENABLE, "authenable"),
IWPRIV_U32(DOT11_OID_PRIVACYINVOKED, "privinvok"),
IWPRIV_U32(DOT11_OID_EXUNENCRYPTED, "exunencrypt"),
IWPRIV_U32(DOT11_OID_REKEYTHRESHOLD, "rekeythresh"),
IWPRIV_U32(DOT11_OID_MAXTXLIFETIME, "maxtxlife"),
IWPRIV_U32(DOT11_OID_MAXRXLIFETIME, "maxrxlife"),
IWPRIV_U32(DOT11_OID_ALOFT_FIXEDRATE, "fixedrate"),
IWPRIV_U32(DOT11_OID_MAXFRAMEBURST, "frameburst"),
IWPRIV_U32(DOT11_OID_PSM, "psm"),
IWPRIV_U32(DOT11_OID_BRIDGELOCAL, "bridge"),
IWPRIV_U32(DOT11_OID_CLIENTS, "clients"),
IWPRIV_U32(DOT11_OID_CLIENTSASSOCIATED, "clientassoc"),
IWPRIV_U32(DOT11_OID_DOT1XENABLE, "dot1xenable"),
IWPRIV_U32(DOT11_OID_ANTENNARX, "rxant"),
IWPRIV_U32(DOT11_OID_ANTENNATX, "txant"),
IWPRIV_U32(DOT11_OID_ANTENNADIVERSITY, "antdivers"),
IWPRIV_U32(DOT11_OID_EDTHRESHOLD, "edthresh"),
IWPRIV_U32(DOT11_OID_PREAMBLESETTINGS, "preamble"),
IWPRIV_GET(DOT11_OID_RATES, "rates"),
IWPRIV_U32(DOT11_OID_OUTPUTPOWER, ".11outpower"),
IWPRIV_GET(DOT11_OID_SUPPORTEDRATES, "supprates"),
IWPRIV_GET(DOT11_OID_SUPPORTEDFREQUENCIES, "suppfreq"),
IWPRIV_U32(DOT11_OID_NOISEFLOOR, "noisefloor"),
IWPRIV_GET(DOT11_OID_FREQUENCYACTIVITY, "freqactivity"),
IWPRIV_U32(DOT11_OID_NONERPPROTECTION, "nonerpprotec"),
IWPRIV_U32(DOT11_OID_PROFILES, "profile"),
IWPRIV_GET(DOT11_OID_EXTENDEDRATES, "extrates"),
IWPRIV_U32(DOT11_OID_MLMEAUTOLEVEL, "mlmelevel"),
IWPRIV_GET(DOT11_OID_BSSS, "bsss"),
IWPRIV_GET(DOT11_OID_BSSLIST, "bsslist"),
IWPRIV_U32(OID_INL_MODE, "mode"),
IWPRIV_U32(OID_INL_CONFIG, "config"),
IWPRIV_U32(OID_INL_DOT11D_CONFORMANCE, ".11dconform"),
IWPRIV_GET(OID_INL_PHYCAPABILITIES, "phycapa"),
IWPRIV_U32(OID_INL_OUTPUTPOWER, "outpower"),
};
static const iw_handler prism54_private_handler[] = {
(iw_handler) prism54_reset,
(iw_handler) prism54_get_policy,
(iw_handler) prism54_set_policy,
(iw_handler) prism54_get_mac,
(iw_handler) prism54_add_mac,
(iw_handler) NULL,
(iw_handler) prism54_del_mac,
(iw_handler) NULL,
(iw_handler) prism54_kick_mac,
(iw_handler) NULL,
(iw_handler) prism54_kick_all,
(iw_handler) prism54_get_wpa,
(iw_handler) prism54_set_wpa,
(iw_handler) NULL,
(iw_handler) prism54_debug_oid,
(iw_handler) prism54_debug_get_oid,
(iw_handler) prism54_debug_set_oid,
(iw_handler) prism54_get_oid,
(iw_handler) prism54_set_u32,
(iw_handler) NULL,
(iw_handler) prism54_set_raw,
(iw_handler) NULL,
(iw_handler) prism54_set_raw,
(iw_handler) prism54_get_prismhdr,
(iw_handler) prism54_set_prismhdr,
};
const struct iw_handler_def prism54_handler_def = {
.num_standard = ARRAY_SIZE(prism54_handler),
.num_private = ARRAY_SIZE(prism54_private_handler),
.num_private_args = ARRAY_SIZE(prism54_private_args),
.standard = (iw_handler *) prism54_handler,
.private = (iw_handler *) prism54_private_handler,
.private_args = (struct iw_priv_args *) prism54_private_args,
.get_wireless_stats = prism54_get_wireless_stats,
};
|
gpl-2.0
|
RaYmunDooo/RaYmunDooo
|
arch/arm/mach-pxa/pcm990-baseboard.c
|
2812
|
13622
|
/*
* arch/arm/mach-pxa/pcm990-baseboard.c
* Support for the Phytec phyCORE-PXA270 Development Platform (PCM-990).
*
* Refer
* http://www.phytec.com/products/rdk/ARM-XScale/phyCORE-XScale-PXA270.html
* for additional hardware info
*
* Author: Juergen Kilb
* Created: April 05, 2005
* Copyright: Phytec Messtechnik GmbH
* e-Mail: armlinux@phytec.de
*
* based on Intel Mainstone Board
*
* Copyright 2007 Juergen Beisert @ Pengutronix (j.beisert@pengutronix.de)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/irq.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/pwm_backlight.h>
#include <media/soc_camera.h>
#include <asm/gpio.h>
#include <mach/camera.h>
#include <asm/mach/map.h>
#include <mach/pxa27x.h>
#include <mach/audio.h>
#include <mach/mmc.h>
#include <mach/ohci.h>
#include <mach/pcm990_baseboard.h>
#include <mach/pxafb.h>
#include "devices.h"
#include "generic.h"
static unsigned long pcm990_pin_config[] __initdata = {
/* MMC */
GPIO32_MMC_CLK,
GPIO112_MMC_CMD,
GPIO92_MMC_DAT_0,
GPIO109_MMC_DAT_1,
GPIO110_MMC_DAT_2,
GPIO111_MMC_DAT_3,
/* USB */
GPIO88_USBH1_PWR,
GPIO89_USBH1_PEN,
/* PWM0 */
GPIO16_PWM0_OUT,
/* I2C */
GPIO117_I2C_SCL,
GPIO118_I2C_SDA,
/* AC97 */
GPIO28_AC97_BITCLK,
GPIO29_AC97_SDATA_IN_0,
GPIO30_AC97_SDATA_OUT,
GPIO31_AC97_SYNC,
};
/*
* pcm990_lcd_power - control power supply to the LCD
* @on: 0 = switch off, 1 = switch on
*
* Called by the pxafb driver
*/
#ifndef CONFIG_PCM990_DISPLAY_NONE
static void pcm990_lcd_power(int on, struct fb_var_screeninfo *var)
{
if (on) {
/* enable LCD-Latches
* power on LCD
*/
__PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) =
PCM990_CTRL_LCDPWR + PCM990_CTRL_LCDON;
} else {
/* disable LCD-Latches
* power off LCD
*/
__PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG3) = 0x00;
}
}
#endif
#if defined(CONFIG_PCM990_DISPLAY_SHARP)
static struct pxafb_mode_info fb_info_sharp_lq084v1dg21 = {
.pixclock = 28000,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 20,
.left_margin = 103,
.right_margin = 47,
.vsync_len = 6,
.upper_margin = 28,
.lower_margin = 5,
.sync = 0,
.cmap_greyscale = 0,
};
static struct pxafb_mach_info pcm990_fbinfo __initdata = {
.modes = &fb_info_sharp_lq084v1dg21,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
.pxafb_lcd_power = pcm990_lcd_power,
};
#elif defined(CONFIG_PCM990_DISPLAY_NEC)
struct pxafb_mode_info fb_info_nec_nl6448bc20_18d = {
.pixclock = 39720,
.xres = 640,
.yres = 480,
.bpp = 16,
.hsync_len = 32,
.left_margin = 16,
.right_margin = 48,
.vsync_len = 2,
.upper_margin = 12,
.lower_margin = 17,
.sync = 0,
.cmap_greyscale = 0,
};
static struct pxafb_mach_info pcm990_fbinfo __initdata = {
.modes = &fb_info_nec_nl6448bc20_18d,
.num_modes = 1,
.lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
.pxafb_lcd_power = pcm990_lcd_power,
};
#endif
static struct platform_pwm_backlight_data pcm990_backlight_data = {
.pwm_id = 0,
.max_brightness = 1023,
.dft_brightness = 1023,
.pwm_period_ns = 78770,
};
static struct platform_device pcm990_backlight_device = {
.name = "pwm-backlight",
.dev = {
.parent = &pxa27x_device_pwm0.dev,
.platform_data = &pcm990_backlight_data,
},
};
/*
* The PCM-990 development baseboard uses PCM-027's hardware in the
* following way:
*
* - LCD support is in use
* - GPIO16 is output for back light on/off with PWM
* - GPIO58 ... GPIO73 are outputs for display data
* - GPIO74 is output output for LCDFCLK
* - GPIO75 is output for LCDLCLK
* - GPIO76 is output for LCDPCLK
* - GPIO77 is output for LCDBIAS
* - MMC support is in use
* - GPIO32 is output for MMCCLK
* - GPIO92 is MMDAT0
* - GPIO109 is MMDAT1
* - GPIO110 is MMCS0
* - GPIO111 is MMCS1
* - GPIO112 is MMCMD
* - IDE/CF card is in use
* - GPIO48 is output /POE
* - GPIO49 is output /PWE
* - GPIO50 is output /PIOR
* - GPIO51 is output /PIOW
* - GPIO54 is output /PCE2
* - GPIO55 is output /PREG
* - GPIO56 is input /PWAIT
* - GPIO57 is output /PIOS16
* - GPIO79 is output PSKTSEL
* - GPIO85 is output /PCE1
* - FFUART is in use
* - GPIO34 is input FFRXD
* - GPIO35 is input FFCTS
* - GPIO36 is input FFDCD
* - GPIO37 is input FFDSR
* - GPIO38 is input FFRI
* - GPIO39 is output FFTXD
* - GPIO40 is output FFDTR
* - GPIO41 is output FFRTS
* - BTUART is in use
* - GPIO42 is input BTRXD
* - GPIO43 is output BTTXD
* - GPIO44 is input BTCTS
* - GPIO45 is output BTRTS
* - IRUART is in use
* - GPIO46 is input STDRXD
* - GPIO47 is output STDTXD
* - AC97 is in use*)
* - GPIO28 is input AC97CLK
* - GPIO29 is input AC97DatIn
* - GPIO30 is output AC97DatO
* - GPIO31 is output AC97SYNC
* - GPIO113 is output AC97_RESET
* - SSP is in use
* - GPIO23 is output SSPSCLK
* - GPIO24 is output chip select to Max7301
* - GPIO25 is output SSPTXD
* - GPIO26 is input SSPRXD
* - GPIO27 is input for Max7301 IRQ
* - GPIO53 is input SSPSYSCLK
* - SSP3 is in use
* - GPIO81 is output SSPTXD3
* - GPIO82 is input SSPRXD3
* - GPIO83 is output SSPSFRM
* - GPIO84 is output SSPCLK3
*
* Otherwise claimed GPIOs:
* GPIO1 -> IRQ from user switch
* GPIO9 -> IRQ from power management
* GPIO10 -> IRQ from WML9712 AC97 controller
* GPIO11 -> IRQ from IDE controller
* GPIO12 -> IRQ from CF controller
* GPIO13 -> IRQ from CF controller
* GPIO14 -> GPIO free
* GPIO15 -> /CS1 selects baseboard's Control CPLD (U7, 16 bit wide data path)
* GPIO19 -> GPIO free
* GPIO20 -> /SDCS2
* GPIO21 -> /CS3 PC card socket select
* GPIO33 -> /CS5 network controller select
* GPIO78 -> /CS2 (16 bit wide data path)
* GPIO80 -> /CS4 (16 bit wide data path)
* GPIO86 -> GPIO free
* GPIO87 -> GPIO free
* GPIO90 -> LED0 on CPU module
* GPIO91 -> LED1 on CPI module
* GPIO117 -> SCL
* GPIO118 -> SDA
*/
static unsigned long pcm990_irq_enabled;
static void pcm990_mask_ack_irq(struct irq_data *d)
{
int pcm990_irq = (d->irq - PCM027_IRQ(0));
PCM990_INTMSKENA = (pcm990_irq_enabled &= ~(1 << pcm990_irq));
}
static void pcm990_unmask_irq(struct irq_data *d)
{
int pcm990_irq = (d->irq - PCM027_IRQ(0));
/* the irq can be acknowledged only if deasserted, so it's done here */
PCM990_INTSETCLR |= 1 << pcm990_irq;
PCM990_INTMSKENA = (pcm990_irq_enabled |= (1 << pcm990_irq));
}
static struct irq_chip pcm990_irq_chip = {
.irq_mask_ack = pcm990_mask_ack_irq,
.irq_unmask = pcm990_unmask_irq,
};
static void pcm990_irq_handler(unsigned int irq, struct irq_desc *desc)
{
unsigned long pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled;
do {
/* clear our parent IRQ */
desc->irq_data.chip->irq_ack(&desc->irq_data);
if (likely(pending)) {
irq = PCM027_IRQ(0) + __ffs(pending);
generic_handle_irq(irq);
}
pending = (~PCM990_INTSETCLR) & pcm990_irq_enabled;
} while (pending);
}
static void __init pcm990_init_irq(void)
{
int irq;
/* setup extra PCM990 irqs */
for (irq = PCM027_IRQ(0); irq <= PCM027_IRQ(3); irq++) {
irq_set_chip_and_handler(irq, &pcm990_irq_chip,
handle_level_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
PCM990_INTMSKENA = 0x00; /* disable all Interrupts */
PCM990_INTSETCLR = 0xFF;
irq_set_chained_handler(PCM990_CTRL_INT_IRQ, pcm990_irq_handler);
irq_set_irq_type(PCM990_CTRL_INT_IRQ, PCM990_CTRL_INT_IRQ_EDGE);
}
static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
void *data)
{
int err;
err = request_irq(PCM027_MMCDET_IRQ, mci_detect_int, IRQF_DISABLED,
"MMC card detect", data);
if (err)
printk(KERN_ERR "pcm990_mci_init: MMC/SD: can't request MMC "
"card detect IRQ\n");
return err;
}
static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data *p_d = dev->platform_data;
if ((1 << vdd) & p_d->ocr_mask)
__PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) =
PCM990_CTRL_MMC2PWR;
else
__PCM990_CTRL_REG(PCM990_CTRL_PHYS + PCM990_CTRL_REG5) =
~PCM990_CTRL_MMC2PWR;
}
static void pcm990_mci_exit(struct device *dev, void *data)
{
free_irq(PCM027_MMCDET_IRQ, data);
}
#define MSECS_PER_JIFFY (1000/HZ)
static struct pxamci_platform_data pcm990_mci_platform_data = {
.detect_delay_ms = 250,
.ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
.init = pcm990_mci_init,
.setpower = pcm990_mci_setpower,
.exit = pcm990_mci_exit,
.gpio_card_detect = -1,
.gpio_card_ro = -1,
.gpio_power = -1,
};
static struct pxaohci_platform_data pcm990_ohci_platform_data = {
.port_mode = PMM_PERPORT_MODE,
.flags = ENABLE_PORT1 | POWER_CONTROL_LOW | POWER_SENSE_LOW,
.power_on_delay = 10,
};
/*
* PXA27x Camera specific stuff
*/
#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE)
static unsigned long pcm990_camera_pin_config[] = {
/* CIF */
GPIO98_CIF_DD_0,
GPIO105_CIF_DD_1,
GPIO104_CIF_DD_2,
GPIO103_CIF_DD_3,
GPIO95_CIF_DD_4,
GPIO94_CIF_DD_5,
GPIO93_CIF_DD_6,
GPIO108_CIF_DD_7,
GPIO107_CIF_DD_8,
GPIO106_CIF_DD_9,
GPIO42_CIF_MCLK,
GPIO45_CIF_PCLK,
GPIO43_CIF_FV,
GPIO44_CIF_LV,
};
/*
* CICR4: PCLK_EN: Pixel clock is supplied by the sensor
* MCLK_EN: Master clock is generated by PXA
* PCP: Data sampled on the falling edge of pixel clock
*/
struct pxacamera_platform_data pcm990_pxacamera_platform_data = {
.flags = PXA_CAMERA_MASTER | PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_10 |
PXA_CAMERA_PCLK_EN | PXA_CAMERA_MCLK_EN/* | PXA_CAMERA_PCP*/,
.mclk_10khz = 1000,
};
#include <linux/i2c/pca953x.h>
static struct pca953x_platform_data pca9536_data = {
.gpio_base = NR_BUILTIN_GPIO,
};
static int gpio_bus_switch = -EINVAL;
static int pcm990_camera_set_bus_param(struct soc_camera_link *link,
unsigned long flags)
{
if (gpio_bus_switch < 0) {
if (flags == SOCAM_DATAWIDTH_10)
return 0;
else
return -EINVAL;
}
if (flags & SOCAM_DATAWIDTH_8)
gpio_set_value(gpio_bus_switch, 1);
else
gpio_set_value(gpio_bus_switch, 0);
return 0;
}
static unsigned long pcm990_camera_query_bus_param(struct soc_camera_link *link)
{
int ret;
if (gpio_bus_switch < 0) {
ret = gpio_request(NR_BUILTIN_GPIO, "camera");
if (!ret) {
gpio_bus_switch = NR_BUILTIN_GPIO;
gpio_direction_output(gpio_bus_switch, 0);
}
}
if (gpio_bus_switch >= 0)
return SOCAM_DATAWIDTH_8 | SOCAM_DATAWIDTH_10;
else
return SOCAM_DATAWIDTH_10;
}
static void pcm990_camera_free_bus(struct soc_camera_link *link)
{
if (gpio_bus_switch < 0)
return;
gpio_free(gpio_bus_switch);
gpio_bus_switch = -EINVAL;
}
/* Board I2C devices. */
static struct i2c_board_info __initdata pcm990_i2c_devices[] = {
{
/* Must initialize before the camera(s) */
I2C_BOARD_INFO("pca9536", 0x41),
.platform_data = &pca9536_data,
},
};
static struct i2c_board_info pcm990_camera_i2c[] = {
{
I2C_BOARD_INFO("mt9v022", 0x48),
}, {
I2C_BOARD_INFO("mt9m001", 0x5d),
},
};
static struct soc_camera_link iclink[] = {
{
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm990_camera_i2c[0],
.i2c_adapter_id = 0,
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
}, {
.bus_id = 0, /* Must match with the camera ID */
.board_info = &pcm990_camera_i2c[1],
.i2c_adapter_id = 0,
.query_bus_param = pcm990_camera_query_bus_param,
.set_bus_param = pcm990_camera_set_bus_param,
.free_bus = pcm990_camera_free_bus,
},
};
static struct platform_device pcm990_camera[] = {
{
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
.platform_data = &iclink[0],
},
}, {
.name = "soc-camera-pdrv",
.id = 1,
.dev = {
.platform_data = &iclink[1],
},
},
};
#endif /* CONFIG_VIDEO_PXA27x ||CONFIG_VIDEO_PXA27x_MODULE */
/*
* enable generic access to the base board control CPLDs U6 and U7
*/
static struct map_desc pcm990_io_desc[] __initdata = {
{
.virtual = PCM990_CTRL_BASE,
.pfn = __phys_to_pfn(PCM990_CTRL_PHYS),
.length = PCM990_CTRL_SIZE,
.type = MT_DEVICE /* CPLD */
}, {
.virtual = PCM990_CF_PLD_BASE,
.pfn = __phys_to_pfn(PCM990_CF_PLD_PHYS),
.length = PCM990_CF_PLD_SIZE,
.type = MT_DEVICE /* CPLD */
}
};
/*
* system init for baseboard usage. Will be called by pcm027 init.
*
* Add platform devices present on this baseboard and init
* them from CPU side as far as required to use them later on
*/
void __init pcm990_baseboard_init(void)
{
pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_pin_config));
/* register CPLD access */
iotable_init(ARRAY_AND_SIZE(pcm990_io_desc));
/* register CPLD's IRQ controller */
pcm990_init_irq();
#ifndef CONFIG_PCM990_DISPLAY_NONE
pxa_set_fb_info(NULL, &pcm990_fbinfo);
#endif
platform_device_register(&pcm990_backlight_device);
/* MMC */
pxa_set_mci_info(&pcm990_mci_platform_data);
/* USB host */
pxa_set_ohci_info(&pcm990_ohci_platform_data);
pxa_set_i2c_info(NULL);
pxa_set_ac97_info(NULL);
#if defined(CONFIG_VIDEO_PXA27x) || defined(CONFIG_VIDEO_PXA27x_MODULE)
pxa2xx_mfp_config(ARRAY_AND_SIZE(pcm990_camera_pin_config));
pxa_set_camera_info(&pcm990_pxacamera_platform_data);
i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices));
platform_device_register(&pcm990_camera[0]);
platform_device_register(&pcm990_camera[1]);
#endif
printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n");
}
|
gpl-2.0
|
mparus/android_kernel_huawei_msm8916_g760
|
kernel/debug/kdb/kdb_bp.c
|
2812
|
11107
|
/*
* Kernel Debugger Architecture Independent Breakpoint Handler
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kdb.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "kdb_private.h"
/*
* Table of kdb_breakpoints
*/
kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
static void kdb_setsinglestep(struct pt_regs *regs)
{
KDB_STATE_SET(DOING_SS);
}
static char *kdb_rwtypes[] = {
"Instruction(i)",
"Instruction(Register)",
"Data Write",
"I/O",
"Data Access"
};
static char *kdb_bptype(kdb_bp_t *bp)
{
if (bp->bp_type < 0 || bp->bp_type > 4)
return "";
return kdb_rwtypes[bp->bp_type];
}
static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
{
int nextarg = *nextargp;
int diag;
bp->bph_length = 1;
if ((argc + 1) != nextarg) {
if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0)
bp->bp_type = BP_ACCESS_WATCHPOINT;
else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
bp->bp_type = BP_WRITE_WATCHPOINT;
else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0)
bp->bp_type = BP_HARDWARE_BREAKPOINT;
else
return KDB_ARGCOUNT;
bp->bph_length = 1;
nextarg++;
if ((argc + 1) != nextarg) {
unsigned long len;
diag = kdbgetularg((char *)argv[nextarg],
&len);
if (diag)
return diag;
if (len > 8)
return KDB_BADLENGTH;
bp->bph_length = len;
nextarg++;
}
if ((argc + 1) != nextarg)
return KDB_ARGCOUNT;
}
*nextargp = nextarg;
return 0;
}
static int _kdb_bp_remove(kdb_bp_t *bp)
{
int ret = 1;
if (!bp->bp_installed)
return ret;
if (!bp->bp_type)
ret = dbg_remove_sw_break(bp->bp_addr);
else
ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr,
bp->bph_length,
bp->bp_type);
if (ret == 0)
bp->bp_installed = 0;
return ret;
}
static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
{
if (KDB_DEBUG(BP))
kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs));
/*
* Setup single step
*/
kdb_setsinglestep(regs);
/*
* Reset delay attribute
*/
bp->bp_delay = 0;
bp->bp_delayed = 1;
}
static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
{
int ret;
/*
* Install the breakpoint, if it is not already installed.
*/
if (KDB_DEBUG(BP))
kdb_printf("%s: bp_installed %d\n",
__func__, bp->bp_installed);
if (!KDB_STATE(SSBPT))
bp->bp_delay = 0;
if (bp->bp_installed)
return 1;
if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) {
if (KDB_DEBUG(BP))
kdb_printf("%s: delayed bp\n", __func__);
kdb_handle_bp(regs, bp);
return 0;
}
if (!bp->bp_type)
ret = dbg_set_sw_break(bp->bp_addr);
else
ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr,
bp->bph_length,
bp->bp_type);
if (ret == 0) {
bp->bp_installed = 1;
} else {
kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
__func__, bp->bp_addr);
#ifdef CONFIG_DEBUG_RODATA
if (!bp->bp_type) {
kdb_printf("Software breakpoints are unavailable.\n"
" Change the kernel CONFIG_DEBUG_RODATA=n\n"
" OR use hw breaks: help bph\n");
}
#endif
return 1;
}
return 0;
}
/*
* kdb_bp_install
*
* Install kdb_breakpoints prior to returning from the
* kernel debugger. This allows the kdb_breakpoints to be set
* upon functions that are used internally by kdb, such as
* printk(). This function is only called once per kdb session.
*/
void kdb_bp_install(struct pt_regs *regs)
{
int i;
for (i = 0; i < KDB_MAXBPT; i++) {
kdb_bp_t *bp = &kdb_breakpoints[i];
if (KDB_DEBUG(BP)) {
kdb_printf("%s: bp %d bp_enabled %d\n",
__func__, i, bp->bp_enabled);
}
if (bp->bp_enabled)
_kdb_bp_install(regs, bp);
}
}
/*
* kdb_bp_remove
*
* Remove kdb_breakpoints upon entry to the kernel debugger.
*
* Parameters:
* None.
* Outputs:
* None.
* Returns:
* None.
* Locking:
* None.
* Remarks:
*/
void kdb_bp_remove(void)
{
int i;
for (i = KDB_MAXBPT - 1; i >= 0; i--) {
kdb_bp_t *bp = &kdb_breakpoints[i];
if (KDB_DEBUG(BP)) {
kdb_printf("%s: bp %d bp_enabled %d\n",
__func__, i, bp->bp_enabled);
}
if (bp->bp_enabled)
_kdb_bp_remove(bp);
}
}
/*
* kdb_printbp
*
* Internal function to format and print a breakpoint entry.
*
* Parameters:
* None.
* Outputs:
* None.
* Returns:
* None.
* Locking:
* None.
* Remarks:
*/
static void kdb_printbp(kdb_bp_t *bp, int i)
{
kdb_printf("%s ", kdb_bptype(bp));
kdb_printf("BP #%d at ", i);
kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
if (bp->bp_enabled)
kdb_printf("\n is enabled");
else
kdb_printf("\n is disabled");
kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
bp->bp_addr, bp->bp_type, bp->bp_installed);
kdb_printf("\n");
}
/*
* kdb_bp
*
* Handle the bp commands.
*
* [bp|bph] <addr-expression> [DATAR|DATAW]
*
* Parameters:
* argc Count of arguments in argv
* argv Space delimited command line arguments
* Outputs:
* None.
* Returns:
* Zero for success, a kdb diagnostic if failure.
* Locking:
* None.
* Remarks:
*
* bp Set breakpoint on all cpus. Only use hardware assist if need.
* bph Set breakpoint on all cpus. Force hardware register
*/
static int kdb_bp(int argc, const char **argv)
{
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
char *symname = NULL;
long offset = 0ul;
int nextarg;
kdb_bp_t template = {0};
if (argc == 0) {
/*
* Display breakpoint table
*/
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT;
bpno++, bp++) {
if (bp->bp_free)
continue;
kdb_printbp(bp, bpno);
}
return 0;
}
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr,
&offset, &symname);
if (diag)
return diag;
if (!template.bp_addr)
return KDB_BADINT;
/*
* Find an empty bp structure to allocate
*/
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;
}
if (bpno == KDB_MAXBPT)
return KDB_TOOMANYBPT;
if (strcmp(argv[0], "bph") == 0) {
template.bp_type = BP_HARDWARE_BREAKPOINT;
diag = kdb_parsebp(argc, argv, &nextarg, &template);
if (diag)
return diag;
} else {
template.bp_type = BP_BREAKPOINT;
}
/*
* Check for clashing breakpoints.
*
* Note, in this design we can't have hardware breakpoints
* enabled for both read and write on the same address.
*/
for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT;
i++, bp_check++) {
if (!bp_check->bp_free &&
bp_check->bp_addr == template.bp_addr) {
kdb_printf("You already have a breakpoint at "
kdb_bfd_vma_fmt0 "\n", template.bp_addr);
return KDB_DUPBPT;
}
}
template.bp_enabled = 1;
/*
* Actually allocate the breakpoint found earlier
*/
*bp = template;
bp->bp_free = 0;
kdb_printbp(bp, bpno);
return 0;
}
/*
* kdb_bc
*
* Handles the 'bc', 'be', and 'bd' commands
*
* [bd|bc|be] <breakpoint-number>
* [bd|bc|be] *
*
* Parameters:
* argc Count of arguments in argv
* argv Space delimited command line arguments
* Outputs:
* None.
* Returns:
* Zero for success, a kdb diagnostic for failure
* Locking:
* None.
* Remarks:
*/
static int kdb_bc(int argc, const char **argv)
{
unsigned long addr;
kdb_bp_t *bp = NULL;
int lowbp = KDB_MAXBPT;
int highbp = 0;
int done = 0;
int i;
int diag = 0;
int cmd; /* KDBCMD_B? */
#define KDBCMD_BC 0
#define KDBCMD_BE 1
#define KDBCMD_BD 2
if (strcmp(argv[0], "be") == 0)
cmd = KDBCMD_BE;
else if (strcmp(argv[0], "bd") == 0)
cmd = KDBCMD_BD;
else
cmd = KDBCMD_BC;
if (argc != 1)
return KDB_ARGCOUNT;
if (strcmp(argv[1], "*") == 0) {
lowbp = 0;
highbp = KDB_MAXBPT;
} else {
diag = kdbgetularg(argv[1], &addr);
if (diag)
return diag;
/*
* For addresses less than the maximum breakpoint number,
* assume that the breakpoint number is desired.
*/
if (addr < KDB_MAXBPT) {
bp = &kdb_breakpoints[addr];
lowbp = highbp = addr;
highbp++;
} else {
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT;
i++, bp++) {
if (bp->bp_addr == addr) {
lowbp = highbp = i;
highbp++;
break;
}
}
}
}
/*
* Now operate on the set of breakpoints matching the input
* criteria (either '*' for all, or an individual breakpoint).
*/
for (bp = &kdb_breakpoints[lowbp], i = lowbp;
i < highbp;
i++, bp++) {
if (bp->bp_free)
continue;
done++;
switch (cmd) {
case KDBCMD_BC:
bp->bp_enabled = 0;
kdb_printf("Breakpoint %d at "
kdb_bfd_vma_fmt " cleared\n",
i, bp->bp_addr);
bp->bp_addr = 0;
bp->bp_free = 1;
break;
case KDBCMD_BE:
bp->bp_enabled = 1;
kdb_printf("Breakpoint %d at "
kdb_bfd_vma_fmt " enabled",
i, bp->bp_addr);
kdb_printf("\n");
break;
case KDBCMD_BD:
if (!bp->bp_enabled)
break;
bp->bp_enabled = 0;
kdb_printf("Breakpoint %d at "
kdb_bfd_vma_fmt " disabled\n",
i, bp->bp_addr);
break;
}
if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
bp->bp_delay = 0;
KDB_STATE_CLEAR(SSBPT);
}
}
return (!done) ? KDB_BPTNOTFOUND : 0;
}
/*
* kdb_ss
*
* Process the 'ss' (Single Step) command.
*
* ss
*
* Parameters:
* argc Argument count
* argv Argument vector
* Outputs:
* None.
* Returns:
* KDB_CMD_SS for success, a kdb error if failure.
* Locking:
* None.
* Remarks:
*
* Set the arch specific option to trigger a debug trap after the next
* instruction.
*/
static int kdb_ss(int argc, const char **argv)
{
if (argc != 0)
return KDB_ARGCOUNT;
/*
* Set trace flag and go.
*/
KDB_STATE_SET(DOING_SS);
return KDB_CMD_SS;
}
/* Initialize the breakpoint table and register breakpoint commands. */
void __init kdb_initbptab(void)
{
int i;
kdb_bp_t *bp;
/*
* First time initialization.
*/
memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
bp->bp_free = 1;
kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
"Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
"Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
"[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("bc", kdb_bc, "<bpnum>",
"Clear Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("be", kdb_bc, "<bpnum>",
"Enable Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bd", kdb_bc, "<bpnum>",
"Disable Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ss", kdb_ss, "",
"Single Step", 1, KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
}
|
gpl-2.0
|
mythos234/cmkernel_zeroltexx
|
kernel/debug/kdb/kdb_bp.c
|
2812
|
11107
|
/*
* Kernel Debugger Architecture Independent Breakpoint Handler
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
*/
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/kdb.h>
#include <linux/kgdb.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include "kdb_private.h"
/*
* Table of kdb_breakpoints
*/
kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
static void kdb_setsinglestep(struct pt_regs *regs)
{
KDB_STATE_SET(DOING_SS);
}
static char *kdb_rwtypes[] = {
"Instruction(i)",
"Instruction(Register)",
"Data Write",
"I/O",
"Data Access"
};
static char *kdb_bptype(kdb_bp_t *bp)
{
if (bp->bp_type < 0 || bp->bp_type > 4)
return "";
return kdb_rwtypes[bp->bp_type];
}
static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
{
int nextarg = *nextargp;
int diag;
bp->bph_length = 1;
if ((argc + 1) != nextarg) {
if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0)
bp->bp_type = BP_ACCESS_WATCHPOINT;
else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
bp->bp_type = BP_WRITE_WATCHPOINT;
else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0)
bp->bp_type = BP_HARDWARE_BREAKPOINT;
else
return KDB_ARGCOUNT;
bp->bph_length = 1;
nextarg++;
if ((argc + 1) != nextarg) {
unsigned long len;
diag = kdbgetularg((char *)argv[nextarg],
&len);
if (diag)
return diag;
if (len > 8)
return KDB_BADLENGTH;
bp->bph_length = len;
nextarg++;
}
if ((argc + 1) != nextarg)
return KDB_ARGCOUNT;
}
*nextargp = nextarg;
return 0;
}
static int _kdb_bp_remove(kdb_bp_t *bp)
{
int ret = 1;
if (!bp->bp_installed)
return ret;
if (!bp->bp_type)
ret = dbg_remove_sw_break(bp->bp_addr);
else
ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr,
bp->bph_length,
bp->bp_type);
if (ret == 0)
bp->bp_installed = 0;
return ret;
}
static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
{
if (KDB_DEBUG(BP))
kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs));
/*
* Setup single step
*/
kdb_setsinglestep(regs);
/*
* Reset delay attribute
*/
bp->bp_delay = 0;
bp->bp_delayed = 1;
}
static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
{
int ret;
/*
* Install the breakpoint, if it is not already installed.
*/
if (KDB_DEBUG(BP))
kdb_printf("%s: bp_installed %d\n",
__func__, bp->bp_installed);
if (!KDB_STATE(SSBPT))
bp->bp_delay = 0;
if (bp->bp_installed)
return 1;
if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) {
if (KDB_DEBUG(BP))
kdb_printf("%s: delayed bp\n", __func__);
kdb_handle_bp(regs, bp);
return 0;
}
if (!bp->bp_type)
ret = dbg_set_sw_break(bp->bp_addr);
else
ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr,
bp->bph_length,
bp->bp_type);
if (ret == 0) {
bp->bp_installed = 1;
} else {
kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
__func__, bp->bp_addr);
#ifdef CONFIG_DEBUG_RODATA
if (!bp->bp_type) {
kdb_printf("Software breakpoints are unavailable.\n"
" Change the kernel CONFIG_DEBUG_RODATA=n\n"
" OR use hw breaks: help bph\n");
}
#endif
return 1;
}
return 0;
}
/*
* kdb_bp_install
*
* Install kdb_breakpoints prior to returning from the
* kernel debugger. This allows the kdb_breakpoints to be set
* upon functions that are used internally by kdb, such as
* printk(). This function is only called once per kdb session.
*/
void kdb_bp_install(struct pt_regs *regs)
{
int i;
for (i = 0; i < KDB_MAXBPT; i++) {
kdb_bp_t *bp = &kdb_breakpoints[i];
if (KDB_DEBUG(BP)) {
kdb_printf("%s: bp %d bp_enabled %d\n",
__func__, i, bp->bp_enabled);
}
if (bp->bp_enabled)
_kdb_bp_install(regs, bp);
}
}
/*
* kdb_bp_remove
*
* Remove kdb_breakpoints upon entry to the kernel debugger.
*
* Parameters:
* None.
* Outputs:
* None.
* Returns:
* None.
* Locking:
* None.
* Remarks:
*/
void kdb_bp_remove(void)
{
int i;
for (i = KDB_MAXBPT - 1; i >= 0; i--) {
kdb_bp_t *bp = &kdb_breakpoints[i];
if (KDB_DEBUG(BP)) {
kdb_printf("%s: bp %d bp_enabled %d\n",
__func__, i, bp->bp_enabled);
}
if (bp->bp_enabled)
_kdb_bp_remove(bp);
}
}
/*
* kdb_printbp
*
* Internal function to format and print a breakpoint entry.
*
* Parameters:
* None.
* Outputs:
* None.
* Returns:
* None.
* Locking:
* None.
* Remarks:
*/
static void kdb_printbp(kdb_bp_t *bp, int i)
{
kdb_printf("%s ", kdb_bptype(bp));
kdb_printf("BP #%d at ", i);
kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
if (bp->bp_enabled)
kdb_printf("\n is enabled");
else
kdb_printf("\n is disabled");
kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
bp->bp_addr, bp->bp_type, bp->bp_installed);
kdb_printf("\n");
}
/*
* kdb_bp
*
* Handle the bp commands.
*
* [bp|bph] <addr-expression> [DATAR|DATAW]
*
* Parameters:
* argc Count of arguments in argv
* argv Space delimited command line arguments
* Outputs:
* None.
* Returns:
* Zero for success, a kdb diagnostic if failure.
* Locking:
* None.
* Remarks:
*
* bp Set breakpoint on all cpus. Only use hardware assist if need.
* bph Set breakpoint on all cpus. Force hardware register
*/
static int kdb_bp(int argc, const char **argv)
{
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
char *symname = NULL;
long offset = 0ul;
int nextarg;
kdb_bp_t template = {0};
if (argc == 0) {
/*
* Display breakpoint table
*/
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT;
bpno++, bp++) {
if (bp->bp_free)
continue;
kdb_printbp(bp, bpno);
}
return 0;
}
nextarg = 1;
diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr,
&offset, &symname);
if (diag)
return diag;
if (!template.bp_addr)
return KDB_BADINT;
/*
* Find an empty bp structure to allocate
*/
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;
}
if (bpno == KDB_MAXBPT)
return KDB_TOOMANYBPT;
if (strcmp(argv[0], "bph") == 0) {
template.bp_type = BP_HARDWARE_BREAKPOINT;
diag = kdb_parsebp(argc, argv, &nextarg, &template);
if (diag)
return diag;
} else {
template.bp_type = BP_BREAKPOINT;
}
/*
* Check for clashing breakpoints.
*
* Note, in this design we can't have hardware breakpoints
* enabled for both read and write on the same address.
*/
for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT;
i++, bp_check++) {
if (!bp_check->bp_free &&
bp_check->bp_addr == template.bp_addr) {
kdb_printf("You already have a breakpoint at "
kdb_bfd_vma_fmt0 "\n", template.bp_addr);
return KDB_DUPBPT;
}
}
template.bp_enabled = 1;
/*
* Actually allocate the breakpoint found earlier
*/
*bp = template;
bp->bp_free = 0;
kdb_printbp(bp, bpno);
return 0;
}
/*
* kdb_bc
*
* Handles the 'bc', 'be', and 'bd' commands
*
* [bd|bc|be] <breakpoint-number>
* [bd|bc|be] *
*
* Parameters:
* argc Count of arguments in argv
* argv Space delimited command line arguments
* Outputs:
* None.
* Returns:
* Zero for success, a kdb diagnostic for failure
* Locking:
* None.
* Remarks:
*/
static int kdb_bc(int argc, const char **argv)
{
unsigned long addr;
kdb_bp_t *bp = NULL;
int lowbp = KDB_MAXBPT;
int highbp = 0;
int done = 0;
int i;
int diag = 0;
int cmd; /* KDBCMD_B? */
#define KDBCMD_BC 0
#define KDBCMD_BE 1
#define KDBCMD_BD 2
if (strcmp(argv[0], "be") == 0)
cmd = KDBCMD_BE;
else if (strcmp(argv[0], "bd") == 0)
cmd = KDBCMD_BD;
else
cmd = KDBCMD_BC;
if (argc != 1)
return KDB_ARGCOUNT;
if (strcmp(argv[1], "*") == 0) {
lowbp = 0;
highbp = KDB_MAXBPT;
} else {
diag = kdbgetularg(argv[1], &addr);
if (diag)
return diag;
/*
* For addresses less than the maximum breakpoint number,
* assume that the breakpoint number is desired.
*/
if (addr < KDB_MAXBPT) {
bp = &kdb_breakpoints[addr];
lowbp = highbp = addr;
highbp++;
} else {
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT;
i++, bp++) {
if (bp->bp_addr == addr) {
lowbp = highbp = i;
highbp++;
break;
}
}
}
}
/*
* Now operate on the set of breakpoints matching the input
* criteria (either '*' for all, or an individual breakpoint).
*/
for (bp = &kdb_breakpoints[lowbp], i = lowbp;
i < highbp;
i++, bp++) {
if (bp->bp_free)
continue;
done++;
switch (cmd) {
case KDBCMD_BC:
bp->bp_enabled = 0;
kdb_printf("Breakpoint %d at "
kdb_bfd_vma_fmt " cleared\n",
i, bp->bp_addr);
bp->bp_addr = 0;
bp->bp_free = 1;
break;
case KDBCMD_BE:
bp->bp_enabled = 1;
kdb_printf("Breakpoint %d at "
kdb_bfd_vma_fmt " enabled",
i, bp->bp_addr);
kdb_printf("\n");
break;
case KDBCMD_BD:
if (!bp->bp_enabled)
break;
bp->bp_enabled = 0;
kdb_printf("Breakpoint %d at "
kdb_bfd_vma_fmt " disabled\n",
i, bp->bp_addr);
break;
}
if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
bp->bp_delay = 0;
KDB_STATE_CLEAR(SSBPT);
}
}
return (!done) ? KDB_BPTNOTFOUND : 0;
}
/*
* kdb_ss
*
* Process the 'ss' (Single Step) command.
*
* ss
*
* Parameters:
* argc Argument count
* argv Argument vector
* Outputs:
* None.
* Returns:
* KDB_CMD_SS for success, a kdb error if failure.
* Locking:
* None.
* Remarks:
*
* Set the arch specific option to trigger a debug trap after the next
* instruction.
*/
static int kdb_ss(int argc, const char **argv)
{
if (argc != 0)
return KDB_ARGCOUNT;
/*
* Set trace flag and go.
*/
KDB_STATE_SET(DOING_SS);
return KDB_CMD_SS;
}
/* Initialize the breakpoint table and register breakpoint commands. */
void __init kdb_initbptab(void)
{
int i;
kdb_bp_t *bp;
/*
* First time initialization.
*/
memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
bp->bp_free = 1;
kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
"Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
"Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
"[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
kdb_register_repeat("bc", kdb_bc, "<bpnum>",
"Clear Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("be", kdb_bc, "<bpnum>",
"Enable Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("bd", kdb_bc, "<bpnum>",
"Disable Breakpoint", 0, KDB_REPEAT_NONE);
kdb_register_repeat("ss", kdb_ss, "",
"Single Step", 1, KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
}
|
gpl-2.0
|
gurifuxi/gb_kernel_sc05d
|
drivers/net/arcnet/com20020.c
|
3580
|
10271
|
/*
* Linux ARCnet driver - COM20020 chipset support
*
* Written 1997 by David Woodhouse.
* Written 1994-1999 by Avery Pennarun.
* Written 1999 by Martin Mares <mj@ucw.cz>.
* Derived from skeleton.c by Donald Becker.
*
* Special thanks to Contemporary Controls, Inc. (www.ccontrols.com)
* for sponsoring the further development of this driver.
*
* **********************
*
* The original copyright of skeleton.c was as follows:
*
* skeleton.c Written 1993 by Donald Becker.
* Copyright 1993 United States Government as represented by the
* Director, National Security Agency. This software may only be used
* and distributed according to the terms of the GNU General Public License as
* modified by SRC, incorporated herein by reference.
*
* **********************
*
* For more details, see drivers/net/arcnet.c
*
* **********************
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ioport.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/init.h>
#include <linux/arcdevice.h>
#include <linux/com20020.h>
#include <asm/io.h>
#define VERSION "arcnet: COM20020 chipset support (by David Woodhouse et al.)\n"
static char *clockrates[] =
{"10 Mb/s", "Reserved", "5 Mb/s",
"2.5 Mb/s", "1.25Mb/s", "625 Kb/s", "312.5 Kb/s",
"156.25 Kb/s", "Reserved", "Reserved", "Reserved"};
static void com20020_command(struct net_device *dev, int command);
static int com20020_status(struct net_device *dev);
static void com20020_setmask(struct net_device *dev, int mask);
static int com20020_reset(struct net_device *dev, int really_reset);
static void com20020_copy_to_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count);
static void com20020_copy_from_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count);
static void com20020_set_mc_list(struct net_device *dev);
static void com20020_close(struct net_device *);
static void com20020_copy_from_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count)
{
int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
/* set up the address register */
outb((ofs >> 8) | RDDATAflag | AUTOINCflag, _ADDR_HI);
outb(ofs & 0xff, _ADDR_LO);
/* copy the data */
TIME("insb", count, insb(_MEMDATA, buf, count));
}
static void com20020_copy_to_card(struct net_device *dev, int bufnum,
int offset, void *buf, int count)
{
int ioaddr = dev->base_addr, ofs = 512 * bufnum + offset;
/* set up the address register */
outb((ofs >> 8) | AUTOINCflag, _ADDR_HI);
outb(ofs & 0xff, _ADDR_LO);
/* copy the data */
TIME("outsb", count, outsb(_MEMDATA, buf, count));
}
/* Reset the card and check some basic stuff during the detection stage. */
int com20020_check(struct net_device *dev)
{
int ioaddr = dev->base_addr, status;
struct arcnet_local *lp = netdev_priv(dev);
ARCRESET0;
mdelay(RESETtime);
lp->setup = lp->clockm ? 0 : (lp->clockp << 1);
lp->setup2 = (lp->clockm << 4) | 8;
/* CHECK: should we do this for SOHARD cards ? */
/* Enable P1Mode for backplane mode */
lp->setup = lp->setup | P1MODE;
SET_SUBADR(SUB_SETUP1);
outb(lp->setup, _XREG);
if (lp->clockm != 0)
{
SET_SUBADR(SUB_SETUP2);
outb(lp->setup2, _XREG);
/* must now write the magic "restart operation" command */
mdelay(1);
outb(0x18, _COMMAND);
}
lp->config = 0x21 | (lp->timeout << 3) | (lp->backplane << 2);
/* set node ID to 0x42 (but transmitter is disabled, so it's okay) */
SETCONF;
outb(0x42, ioaddr + BUS_ALIGN*7);
status = ASTATUS();
if ((status & 0x99) != (NORXflag | TXFREEflag | RESETflag)) {
BUGMSG(D_NORMAL, "status invalid (%Xh).\n", status);
return -ENODEV;
}
BUGMSG(D_INIT_REASONS, "status after reset: %X\n", status);
/* Enable TX */
outb(0x39, _CONFIG);
outb(inb(ioaddr + BUS_ALIGN*8), ioaddr + BUS_ALIGN*7);
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
status = ASTATUS();
BUGMSG(D_INIT_REASONS, "status after reset acknowledged: %X\n",
status);
/* Read first location of memory */
outb(0 | RDDATAflag | AUTOINCflag, _ADDR_HI);
outb(0, _ADDR_LO);
if ((status = inb(_MEMDATA)) != TESTvalue) {
BUGMSG(D_NORMAL, "Signature byte not found (%02Xh != D1h).\n",
status);
return -ENODEV;
}
return 0;
}
const struct net_device_ops com20020_netdev_ops = {
.ndo_open = arcnet_open,
.ndo_stop = arcnet_close,
.ndo_start_xmit = arcnet_send_packet,
.ndo_tx_timeout = arcnet_timeout,
.ndo_set_multicast_list = com20020_set_mc_list,
};
/* Set up the struct net_device associated with this card. Called after
* probing succeeds.
*/
int com20020_found(struct net_device *dev, int shared)
{
struct arcnet_local *lp;
int ioaddr = dev->base_addr;
/* Initialize the rest of the device structure. */
lp = netdev_priv(dev);
lp->hw.owner = THIS_MODULE;
lp->hw.command = com20020_command;
lp->hw.status = com20020_status;
lp->hw.intmask = com20020_setmask;
lp->hw.reset = com20020_reset;
lp->hw.copy_to_card = com20020_copy_to_card;
lp->hw.copy_from_card = com20020_copy_from_card;
lp->hw.close = com20020_close;
if (!dev->dev_addr[0])
dev->dev_addr[0] = inb(ioaddr + BUS_ALIGN*8); /* FIXME: do this some other way! */
SET_SUBADR(SUB_SETUP1);
outb(lp->setup, _XREG);
if (lp->card_flags & ARC_CAN_10MBIT)
{
SET_SUBADR(SUB_SETUP2);
outb(lp->setup2, _XREG);
/* must now write the magic "restart operation" command */
mdelay(1);
outb(0x18, _COMMAND);
}
lp->config = 0x20 | (lp->timeout << 3) | (lp->backplane << 2) | 1;
/* Default 0x38 + register: Node ID */
SETCONF;
outb(dev->dev_addr[0], _XREG);
/* reserve the irq */
if (request_irq(dev->irq, arcnet_interrupt, shared,
"arcnet (COM20020)", dev)) {
BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
return -ENODEV;
}
dev->base_addr = ioaddr;
BUGMSG(D_NORMAL, "%s: station %02Xh found at %03lXh, IRQ %d.\n",
lp->card_name, dev->dev_addr[0], dev->base_addr, dev->irq);
if (lp->backplane)
BUGMSG(D_NORMAL, "Using backplane mode.\n");
if (lp->timeout != 3)
BUGMSG(D_NORMAL, "Using extended timeout value of %d.\n", lp->timeout);
BUGMSG(D_NORMAL, "Using CKP %d - data rate %s.\n",
lp->setup >> 1,
clockrates[3 - ((lp->setup2 & 0xF0) >> 4) + ((lp->setup & 0x0F) >> 1)]);
if (register_netdev(dev)) {
free_irq(dev->irq, dev);
return -EIO;
}
return 0;
}
/*
* Do a hardware reset on the card, and set up necessary registers.
*
* This should be called as little as possible, because it disrupts the
* token on the network (causes a RECON) and requires a significant delay.
*
* However, it does make sure the card is in a defined state.
*/
static int com20020_reset(struct net_device *dev, int really_reset)
{
struct arcnet_local *lp = netdev_priv(dev);
u_int ioaddr = dev->base_addr;
u_char inbyte;
BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n",
__FILE__,__LINE__,__func__,dev,lp,dev->name);
BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n",
dev->name, ASTATUS());
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2);
/* power-up defaults */
SETCONF;
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
if (really_reset) {
/* reset the card */
ARCRESET;
mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */
}
/* clear flags & end reset */
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear);
/* verify that the ARCnet signature byte is present */
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
com20020_copy_from_card(dev, 0, 0, &inbyte, 1);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
if (inbyte != TESTvalue) {
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n");
return 1;
}
/* enable extended (512-byte) packets */
ACOMMAND(CONFIGcmd | EXTconf);
BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__);
/* done! return success. */
return 0;
}
static void com20020_setmask(struct net_device *dev, int mask)
{
u_int ioaddr = dev->base_addr;
BUGMSG(D_DURING, "Setting mask to %x at %x\n",mask,ioaddr);
AINTMASK(mask);
}
static void com20020_command(struct net_device *dev, int cmd)
{
u_int ioaddr = dev->base_addr;
ACOMMAND(cmd);
}
static int com20020_status(struct net_device *dev)
{
u_int ioaddr = dev->base_addr;
return ASTATUS() + (ADIAGSTATUS()<<8);
}
static void com20020_close(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
/* disable transmitter */
lp->config &= ~TXENcfg;
SETCONF;
}
/* Set or clear the multicast filter for this adaptor.
* num_addrs == -1 Promiscuous mode, receive all packets
* num_addrs == 0 Normal mode, clear multicast list
* num_addrs > 0 Multicast mode, receive normal and MC packets, and do
* best-effort filtering.
* FIXME - do multicast stuff, not just promiscuous.
*/
static void com20020_set_mc_list(struct net_device *dev)
{
struct arcnet_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
if ((dev->flags & IFF_PROMISC) && (dev->flags & IFF_UP)) { /* Enable promiscuous mode */
if (!(lp->setup & PROMISCset))
BUGMSG(D_NORMAL, "Setting promiscuous flag...\n");
SET_SUBADR(SUB_SETUP1);
lp->setup |= PROMISCset;
outb(lp->setup, _XREG);
} else
/* Disable promiscuous mode, use normal mode */
{
if ((lp->setup & PROMISCset))
BUGMSG(D_NORMAL, "Resetting promiscuous flag...\n");
SET_SUBADR(SUB_SETUP1);
lp->setup &= ~PROMISCset;
outb(lp->setup, _XREG);
}
}
#if defined(CONFIG_ARCNET_COM20020_PCI_MODULE) || \
defined(CONFIG_ARCNET_COM20020_ISA_MODULE) || \
defined(CONFIG_ARCNET_COM20020_CS_MODULE)
EXPORT_SYMBOL(com20020_check);
EXPORT_SYMBOL(com20020_found);
EXPORT_SYMBOL(com20020_netdev_ops);
#endif
MODULE_LICENSE("GPL");
#ifdef MODULE
static int __init com20020_module_init(void)
{
BUGLVL(D_NORMAL) printk(VERSION);
return 0;
}
static void __exit com20020_module_exit(void)
{
}
module_init(com20020_module_init);
module_exit(com20020_module_exit);
#endif /* MODULE */
|
gpl-2.0
|
dimfishr/android_kernel_lge_bullhead
|
arch/mips/pci/fixup-ip32.c
|
4348
|
1484
|
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <asm/ip32/ip32_ints.h>
/*
* O2 has up to 5 PCI devices connected into the MACE bridge. The device
* map looks like this:
*
* 0 aic7xxx 0
* 1 aic7xxx 1
* 2 expansion slot
* 3 N/C
* 4 N/C
*/
#define SCSI0 MACEPCI_SCSI0_IRQ
#define SCSI1 MACEPCI_SCSI1_IRQ
#define INTA0 MACEPCI_SLOT0_IRQ
#define INTA1 MACEPCI_SLOT1_IRQ
#define INTA2 MACEPCI_SLOT2_IRQ
#define INTB MACEPCI_SHARED0_IRQ
#define INTC MACEPCI_SHARED1_IRQ
#define INTD MACEPCI_SHARED2_IRQ
static char irq_tab_mace[][5] __initdata = {
/* Dummy INT#A INT#B INT#C INT#D */
{0, 0, 0, 0, 0}, /* This is placeholder row - never used */
{0, SCSI0, SCSI0, SCSI0, SCSI0},
{0, SCSI1, SCSI1, SCSI1, SCSI1},
{0, INTA0, INTB, INTC, INTD},
{0, INTA1, INTC, INTD, INTB},
{0, INTA2, INTD, INTB, INTC},
};
/*
* Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of
* the device (1-4 => A-D), tell what irq to use. Note that we don't
* in theory have slots 4 and 5, and we never normally use the shared
* irqs. I suppose a device without a pin A will thank us for doing it
* right if there exists such a broken piece of crap.
*/
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
return irq_tab_mace[slot][pin];
}
/* Do platform specific device initialization at pci_enable_device() time */
int pcibios_plat_dev_init(struct pci_dev *dev)
{
return 0;
}
|
gpl-2.0
|
sledges/msm
|
drivers/usb/atm/ueagle-atm.c
|
4860
|
68475
|
/*-
* Copyright (c) 2003, 2004
* Damien Bergamini <damien.bergamini@free.fr>. All rights reserved.
*
* Copyright (c) 2005-2007 Matthieu Castet <castet.matthieu@free.fr>
* Copyright (c) 2005-2007 Stanislaw Gruszka <stf_xl@wp.pl>
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* BSD license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* GPL license :
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*
* HISTORY : some part of the code was base on ueagle 1.3 BSD driver,
* Damien Bergamini agree to put his code under a DUAL GPL/BSD license.
*
* The rest of the code was was rewritten from scratch.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/usb.h>
#include <linux/firmware.h>
#include <linux/ctype.h>
#include <linux/sched.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <asm/unaligned.h>
#include "usbatm.h"
#define EAGLEUSBVERSION "ueagle 1.4"
/*
* Debug macros
*/
#define uea_dbg(usb_dev, format, args...) \
do { \
if (debug >= 1) \
dev_dbg(&(usb_dev)->dev, \
"[ueagle-atm dbg] %s: " format, \
__func__, ##args); \
} while (0)
#define uea_vdbg(usb_dev, format, args...) \
do { \
if (debug >= 2) \
dev_dbg(&(usb_dev)->dev, \
"[ueagle-atm vdbg] " format, ##args); \
} while (0)
#define uea_enters(usb_dev) \
uea_vdbg(usb_dev, "entering %s\n" , __func__)
#define uea_leaves(usb_dev) \
uea_vdbg(usb_dev, "leaving %s\n" , __func__)
#define uea_err(usb_dev, format, args...) \
dev_err(&(usb_dev)->dev , "[UEAGLE-ATM] " format , ##args)
#define uea_warn(usb_dev, format, args...) \
dev_warn(&(usb_dev)->dev , "[Ueagle-atm] " format, ##args)
#define uea_info(usb_dev, format, args...) \
dev_info(&(usb_dev)->dev , "[ueagle-atm] " format, ##args)
struct intr_pkt;
/* cmv's from firmware */
struct uea_cmvs_v1 {
u32 address;
u16 offset;
u32 data;
} __packed;
struct uea_cmvs_v2 {
u32 group;
u32 address;
u32 offset;
u32 data;
} __packed;
/* information about currently processed cmv */
struct cmv_dsc_e1 {
u8 function;
u16 idx;
u32 address;
u16 offset;
};
struct cmv_dsc_e4 {
u16 function;
u16 offset;
u16 address;
u16 group;
};
union cmv_dsc {
struct cmv_dsc_e1 e1;
struct cmv_dsc_e4 e4;
};
struct uea_softc {
struct usb_device *usb_dev;
struct usbatm_data *usbatm;
int modem_index;
unsigned int driver_info;
int annex;
#define ANNEXA 0
#define ANNEXB 1
int booting;
int reset;
wait_queue_head_t sync_q;
struct task_struct *kthread;
u32 data;
u32 data1;
int cmv_ack;
union cmv_dsc cmv_dsc;
struct work_struct task;
u16 pageno;
u16 ovl;
const struct firmware *dsp_firm;
struct urb *urb_int;
void (*dispatch_cmv) (struct uea_softc *, struct intr_pkt *);
void (*schedule_load_page) (struct uea_softc *, struct intr_pkt *);
int (*stat) (struct uea_softc *);
int (*send_cmvs) (struct uea_softc *);
/* keep in sync with eaglectl */
struct uea_stats {
struct {
u32 state;
u32 flags;
u32 mflags;
u32 vidcpe;
u32 vidco;
u32 dsrate;
u32 usrate;
u32 dsunc;
u32 usunc;
u32 dscorr;
u32 uscorr;
u32 txflow;
u32 rxflow;
u32 usattenuation;
u32 dsattenuation;
u32 dsmargin;
u32 usmargin;
u32 firmid;
} phy;
} stats;
};
/*
* Elsa IDs
*/
#define ELSA_VID 0x05CC
#define ELSA_PID_PSTFIRM 0x3350
#define ELSA_PID_PREFIRM 0x3351
#define ELSA_PID_A_PREFIRM 0x3352
#define ELSA_PID_A_PSTFIRM 0x3353
#define ELSA_PID_B_PREFIRM 0x3362
#define ELSA_PID_B_PSTFIRM 0x3363
/*
* Devolo IDs : pots if (pid & 0x10)
*/
#define DEVOLO_VID 0x1039
#define DEVOLO_EAGLE_I_A_PID_PSTFIRM 0x2110
#define DEVOLO_EAGLE_I_A_PID_PREFIRM 0x2111
#define DEVOLO_EAGLE_I_B_PID_PSTFIRM 0x2100
#define DEVOLO_EAGLE_I_B_PID_PREFIRM 0x2101
#define DEVOLO_EAGLE_II_A_PID_PSTFIRM 0x2130
#define DEVOLO_EAGLE_II_A_PID_PREFIRM 0x2131
#define DEVOLO_EAGLE_II_B_PID_PSTFIRM 0x2120
#define DEVOLO_EAGLE_II_B_PID_PREFIRM 0x2121
/*
* Reference design USB IDs
*/
#define ANALOG_VID 0x1110
#define ADI930_PID_PREFIRM 0x9001
#define ADI930_PID_PSTFIRM 0x9000
#define EAGLE_I_PID_PREFIRM 0x9010 /* Eagle I */
#define EAGLE_I_PID_PSTFIRM 0x900F /* Eagle I */
#define EAGLE_IIC_PID_PREFIRM 0x9024 /* Eagle IIC */
#define EAGLE_IIC_PID_PSTFIRM 0x9023 /* Eagle IIC */
#define EAGLE_II_PID_PREFIRM 0x9022 /* Eagle II */
#define EAGLE_II_PID_PSTFIRM 0x9021 /* Eagle II */
#define EAGLE_III_PID_PREFIRM 0x9032 /* Eagle III */
#define EAGLE_III_PID_PSTFIRM 0x9031 /* Eagle III */
#define EAGLE_IV_PID_PREFIRM 0x9042 /* Eagle IV */
#define EAGLE_IV_PID_PSTFIRM 0x9041 /* Eagle IV */
/*
* USR USB IDs
*/
#define USR_VID 0x0BAF
#define MILLER_A_PID_PREFIRM 0x00F2
#define MILLER_A_PID_PSTFIRM 0x00F1
#define MILLER_B_PID_PREFIRM 0x00FA
#define MILLER_B_PID_PSTFIRM 0x00F9
#define HEINEKEN_A_PID_PREFIRM 0x00F6
#define HEINEKEN_A_PID_PSTFIRM 0x00F5
#define HEINEKEN_B_PID_PREFIRM 0x00F8
#define HEINEKEN_B_PID_PSTFIRM 0x00F7
#define PREFIRM 0
#define PSTFIRM (1<<7)
#define AUTO_ANNEX_A (1<<8)
#define AUTO_ANNEX_B (1<<9)
enum {
ADI930 = 0,
EAGLE_I,
EAGLE_II,
EAGLE_III,
EAGLE_IV
};
/* macros for both struct usb_device_id and struct uea_softc */
#define UEA_IS_PREFIRM(x) \
(!((x)->driver_info & PSTFIRM))
#define UEA_CHIP_VERSION(x) \
((x)->driver_info & 0xf)
#define IS_ISDN(x) \
((x)->annex & ANNEXB)
#define INS_TO_USBDEV(ins) (ins->usb_dev)
#define GET_STATUS(data) \
((data >> 8) & 0xf)
#define IS_OPERATIONAL(sc) \
((UEA_CHIP_VERSION(sc) != EAGLE_IV) ? \
(GET_STATUS(sc->stats.phy.state) == 2) : \
(sc->stats.phy.state == 7))
/*
* Set of macros to handle unaligned data in the firmware blob.
* The FW_GET_BYTE() macro is provided only for consistency.
*/
#define FW_GET_BYTE(p) (*((__u8 *) (p)))
#define FW_DIR "ueagle-atm/"
#define UEA_FW_NAME_MAX 30
#define NB_MODEM 4
#define BULK_TIMEOUT 300
#define CTRL_TIMEOUT 1000
#define ACK_TIMEOUT msecs_to_jiffies(3000)
#define UEA_INTR_IFACE_NO 0
#define UEA_US_IFACE_NO 1
#define UEA_DS_IFACE_NO 2
#define FASTEST_ISO_INTF 8
#define UEA_BULK_DATA_PIPE 0x02
#define UEA_IDMA_PIPE 0x04
#define UEA_INTR_PIPE 0x04
#define UEA_ISO_DATA_PIPE 0x08
#define UEA_E1_SET_BLOCK 0x0001
#define UEA_E4_SET_BLOCK 0x002c
#define UEA_SET_MODE 0x0003
#define UEA_SET_2183_DATA 0x0004
#define UEA_SET_TIMEOUT 0x0011
#define UEA_LOOPBACK_OFF 0x0002
#define UEA_LOOPBACK_ON 0x0003
#define UEA_BOOT_IDMA 0x0006
#define UEA_START_RESET 0x0007
#define UEA_END_RESET 0x0008
#define UEA_SWAP_MAILBOX (0x3fcd | 0x4000)
#define UEA_MPTX_START (0x3fce | 0x4000)
#define UEA_MPTX_MAILBOX (0x3fd6 | 0x4000)
#define UEA_MPRX_MAILBOX (0x3fdf | 0x4000)
/* block information in eagle4 dsp firmware */
struct block_index {
__le32 PageOffset;
__le32 NotLastBlock;
__le32 dummy;
__le32 PageSize;
__le32 PageAddress;
__le16 dummy1;
__le16 PageNumber;
} __packed;
#define E4_IS_BOOT_PAGE(PageSize) ((le32_to_cpu(PageSize)) & 0x80000000)
#define E4_PAGE_BYTES(PageSize) ((le32_to_cpu(PageSize) & 0x7fffffff) * 4)
#define E4_L1_STRING_HEADER 0x10
#define E4_MAX_PAGE_NUMBER 0x58
#define E4_NO_SWAPPAGE_HEADERS 0x31
/* l1_code is eagle4 dsp firmware format */
struct l1_code {
u8 string_header[E4_L1_STRING_HEADER];
u8 page_number_to_block_index[E4_MAX_PAGE_NUMBER];
struct block_index page_header[E4_NO_SWAPPAGE_HEADERS];
u8 code[0];
} __packed;
/* structures describing a block within a DSP page */
struct block_info_e1 {
__le16 wHdr;
__le16 wAddress;
__le16 wSize;
__le16 wOvlOffset;
__le16 wOvl; /* overlay */
__le16 wLast;
} __packed;
#define E1_BLOCK_INFO_SIZE 12
struct block_info_e4 {
__be16 wHdr;
__u8 bBootPage;
__u8 bPageNumber;
__be32 dwSize;
__be32 dwAddress;
__be16 wReserved;
} __packed;
#define E4_BLOCK_INFO_SIZE 14
#define UEA_BIHDR 0xabcd
#define UEA_RESERVED 0xffff
/* constants describing cmv type */
#define E1_PREAMBLE 0x535c
#define E1_MODEMTOHOST 0x01
#define E1_HOSTTOMODEM 0x10
#define E1_MEMACCESS 0x1
#define E1_ADSLDIRECTIVE 0x7
#define E1_FUNCTION_TYPE(f) ((f) >> 4)
#define E1_FUNCTION_SUBTYPE(f) ((f) & 0x0f)
#define E4_MEMACCESS 0
#define E4_ADSLDIRECTIVE 0xf
#define E4_FUNCTION_TYPE(f) ((f) >> 8)
#define E4_FUNCTION_SIZE(f) ((f) & 0x0f)
#define E4_FUNCTION_SUBTYPE(f) (((f) >> 4) & 0x0f)
/* for MEMACCESS */
#define E1_REQUESTREAD 0x0
#define E1_REQUESTWRITE 0x1
#define E1_REPLYREAD 0x2
#define E1_REPLYWRITE 0x3
#define E4_REQUESTREAD 0x0
#define E4_REQUESTWRITE 0x4
#define E4_REPLYREAD (E4_REQUESTREAD | 1)
#define E4_REPLYWRITE (E4_REQUESTWRITE | 1)
/* for ADSLDIRECTIVE */
#define E1_KERNELREADY 0x0
#define E1_MODEMREADY 0x1
#define E4_KERNELREADY 0x0
#define E4_MODEMREADY 0x1
#define E1_MAKEFUNCTION(t, s) (((t) & 0xf) << 4 | ((s) & 0xf))
#define E4_MAKEFUNCTION(t, st, s) (((t) & 0xf) << 8 | \
((st) & 0xf) << 4 | ((s) & 0xf))
#define E1_MAKESA(a, b, c, d) \
(((c) & 0xff) << 24 | \
((d) & 0xff) << 16 | \
((a) & 0xff) << 8 | \
((b) & 0xff))
#define E1_GETSA1(a) ((a >> 8) & 0xff)
#define E1_GETSA2(a) (a & 0xff)
#define E1_GETSA3(a) ((a >> 24) & 0xff)
#define E1_GETSA4(a) ((a >> 16) & 0xff)
#define E1_SA_CNTL E1_MAKESA('C', 'N', 'T', 'L')
#define E1_SA_DIAG E1_MAKESA('D', 'I', 'A', 'G')
#define E1_SA_INFO E1_MAKESA('I', 'N', 'F', 'O')
#define E1_SA_OPTN E1_MAKESA('O', 'P', 'T', 'N')
#define E1_SA_RATE E1_MAKESA('R', 'A', 'T', 'E')
#define E1_SA_STAT E1_MAKESA('S', 'T', 'A', 'T')
#define E4_SA_CNTL 1
#define E4_SA_STAT 2
#define E4_SA_INFO 3
#define E4_SA_TEST 4
#define E4_SA_OPTN 5
#define E4_SA_RATE 6
#define E4_SA_DIAG 7
#define E4_SA_CNFG 8
/* structures representing a CMV (Configuration and Management Variable) */
struct cmv_e1 {
__le16 wPreamble;
__u8 bDirection;
__u8 bFunction;
__le16 wIndex;
__le32 dwSymbolicAddress;
__le16 wOffsetAddress;
__le32 dwData;
} __packed;
struct cmv_e4 {
__be16 wGroup;
__be16 wFunction;
__be16 wOffset;
__be16 wAddress;
__be32 dwData[6];
} __packed;
/* structures representing swap information */
struct swap_info_e1 {
__u8 bSwapPageNo;
__u8 bOvl; /* overlay */
} __packed;
struct swap_info_e4 {
__u8 bSwapPageNo;
} __packed;
/* structures representing interrupt data */
#define e1_bSwapPageNo u.e1.s1.swapinfo.bSwapPageNo
#define e1_bOvl u.e1.s1.swapinfo.bOvl
#define e4_bSwapPageNo u.e4.s1.swapinfo.bSwapPageNo
#define INT_LOADSWAPPAGE 0x0001
#define INT_INCOMINGCMV 0x0002
union intr_data_e1 {
struct {
struct swap_info_e1 swapinfo;
__le16 wDataSize;
} __packed s1;
struct {
struct cmv_e1 cmv;
__le16 wDataSize;
} __packed s2;
} __packed;
union intr_data_e4 {
struct {
struct swap_info_e4 swapinfo;
__le16 wDataSize;
} __packed s1;
struct {
struct cmv_e4 cmv;
__le16 wDataSize;
} __packed s2;
} __packed;
struct intr_pkt {
__u8 bType;
__u8 bNotification;
__le16 wValue;
__le16 wIndex;
__le16 wLength;
__le16 wInterrupt;
union {
union intr_data_e1 e1;
union intr_data_e4 e4;
} u;
} __packed;
#define E1_INTR_PKT_SIZE 28
#define E4_INTR_PKT_SIZE 64
static struct usb_driver uea_driver;
static DEFINE_MUTEX(uea_mutex);
static const char * const chip_name[] = {
"ADI930", "Eagle I", "Eagle II", "Eagle III", "Eagle IV"};
static int modem_index;
static unsigned int debug;
static unsigned int altsetting[NB_MODEM] = {
[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
static bool sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
module_param(debug, uint, 0644);
MODULE_PARM_DESC(debug, "module debug level (0=off,1=on,2=verbose)");
module_param_array(altsetting, uint, NULL, 0644);
MODULE_PARM_DESC(altsetting, "alternate setting for incoming traffic: 0=bulk, "
"1=isoc slowest, ... , 8=isoc fastest (default)");
module_param_array(sync_wait, bool, NULL, 0644);
MODULE_PARM_DESC(sync_wait, "wait the synchronisation before starting ATM");
module_param_array(cmv_file, charp, NULL, 0644);
MODULE_PARM_DESC(cmv_file,
"file name with configuration and management variables");
module_param_array(annex, uint, NULL, 0644);
MODULE_PARM_DESC(annex,
"manually set annex a/b (0=auto, 1=annex a, 2=annex b)");
#define uea_wait(sc, cond, timeo) \
({ \
int _r = wait_event_interruptible_timeout(sc->sync_q, \
(cond) || kthread_should_stop(), timeo); \
if (kthread_should_stop()) \
_r = -ENODEV; \
_r; \
})
#define UPDATE_ATM_STAT(type, val) \
do { \
if (sc->usbatm->atm_dev) \
sc->usbatm->atm_dev->type = val; \
} while (0)
#define UPDATE_ATM_SIGNAL(val) \
do { \
if (sc->usbatm->atm_dev) \
atm_dev_signal_change(sc->usbatm->atm_dev, val); \
} while (0)
/* Firmware loading */
#define LOAD_INTERNAL 0xA0
#define F8051_USBCS 0x7f92
/**
* uea_send_modem_cmd - Send a command for pre-firmware devices.
*/
static int uea_send_modem_cmd(struct usb_device *usb,
u16 addr, u16 size, const u8 *buff)
{
int ret = -ENOMEM;
u8 *xfer_buff;
xfer_buff = kmemdup(buff, size, GFP_KERNEL);
if (xfer_buff) {
ret = usb_control_msg(usb,
usb_sndctrlpipe(usb, 0),
LOAD_INTERNAL,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, addr, 0, xfer_buff,
size, CTRL_TIMEOUT);
kfree(xfer_buff);
}
if (ret < 0)
return ret;
return (ret == size) ? 0 : -EIO;
}
static void uea_upload_pre_firmware(const struct firmware *fw_entry,
void *context)
{
struct usb_device *usb = context;
const u8 *pfw;
u8 value;
u32 crc = 0;
int ret, size;
uea_enters(usb);
if (!fw_entry) {
uea_err(usb, "firmware is not available\n");
goto err;
}
pfw = fw_entry->data;
size = fw_entry->size;
if (size < 4)
goto err_fw_corrupted;
crc = get_unaligned_le32(pfw);
pfw += 4;
size -= 4;
if (crc32_be(0, pfw, size) != crc)
goto err_fw_corrupted;
/*
* Start to upload firmware : send reset
*/
value = 1;
ret = uea_send_modem_cmd(usb, F8051_USBCS, sizeof(value), &value);
if (ret < 0) {
uea_err(usb, "modem reset failed with error %d\n", ret);
goto err;
}
while (size > 3) {
u8 len = FW_GET_BYTE(pfw);
u16 add = get_unaligned_le16(pfw + 1);
size -= len + 3;
if (size < 0)
goto err_fw_corrupted;
ret = uea_send_modem_cmd(usb, add, len, pfw + 3);
if (ret < 0) {
uea_err(usb, "uploading firmware data failed "
"with error %d\n", ret);
goto err;
}
pfw += len + 3;
}
if (size != 0)
goto err_fw_corrupted;
/*
* Tell the modem we finish : de-assert reset
*/
value = 0;
ret = uea_send_modem_cmd(usb, F8051_USBCS, 1, &value);
if (ret < 0)
uea_err(usb, "modem de-assert failed with error %d\n", ret);
else
uea_info(usb, "firmware uploaded\n");
goto err;
err_fw_corrupted:
uea_err(usb, "firmware is corrupted\n");
err:
release_firmware(fw_entry);
uea_leaves(usb);
}
/**
* uea_load_firmware - Load usb firmware for pre-firmware devices.
*/
static int uea_load_firmware(struct usb_device *usb, unsigned int ver)
{
int ret;
char *fw_name = FW_DIR "eagle.fw";
uea_enters(usb);
uea_info(usb, "pre-firmware device, uploading firmware\n");
switch (ver) {
case ADI930:
fw_name = FW_DIR "adi930.fw";
break;
case EAGLE_I:
fw_name = FW_DIR "eagleI.fw";
break;
case EAGLE_II:
fw_name = FW_DIR "eagleII.fw";
break;
case EAGLE_III:
fw_name = FW_DIR "eagleIII.fw";
break;
case EAGLE_IV:
fw_name = FW_DIR "eagleIV.fw";
break;
}
ret = request_firmware_nowait(THIS_MODULE, 1, fw_name, &usb->dev,
GFP_KERNEL, usb,
uea_upload_pre_firmware);
if (ret)
uea_err(usb, "firmware %s is not available\n", fw_name);
else
uea_info(usb, "loading firmware %s\n", fw_name);
uea_leaves(usb);
return ret;
}
/* modem management : dsp firmware, send/read CMV, monitoring statistic
*/
/*
* Make sure that the DSP code provided is safe to use.
*/
static int check_dsp_e1(const u8 *dsp, unsigned int len)
{
u8 pagecount, blockcount;
u16 blocksize;
u32 pageoffset;
unsigned int i, j, p, pp;
pagecount = FW_GET_BYTE(dsp);
p = 1;
/* enough space for page offsets? */
if (p + 4 * pagecount > len)
return 1;
for (i = 0; i < pagecount; i++) {
pageoffset = get_unaligned_le32(dsp + p);
p += 4;
if (pageoffset == 0)
continue;
/* enough space for blockcount? */
if (pageoffset >= len)
return 1;
pp = pageoffset;
blockcount = FW_GET_BYTE(dsp + pp);
pp += 1;
for (j = 0; j < blockcount; j++) {
/* enough space for block header? */
if (pp + 4 > len)
return 1;
pp += 2; /* skip blockaddr */
blocksize = get_unaligned_le16(dsp + pp);
pp += 2;
/* enough space for block data? */
if (pp + blocksize > len)
return 1;
pp += blocksize;
}
}
return 0;
}
static int check_dsp_e4(const u8 *dsp, int len)
{
int i;
struct l1_code *p = (struct l1_code *) dsp;
unsigned int sum = p->code - dsp;
if (len < sum)
return 1;
if (strcmp("STRATIPHY ANEXA", p->string_header) != 0 &&
strcmp("STRATIPHY ANEXB", p->string_header) != 0)
return 1;
for (i = 0; i < E4_MAX_PAGE_NUMBER; i++) {
struct block_index *blockidx;
u8 blockno = p->page_number_to_block_index[i];
if (blockno >= E4_NO_SWAPPAGE_HEADERS)
continue;
do {
u64 l;
if (blockno >= E4_NO_SWAPPAGE_HEADERS)
return 1;
blockidx = &p->page_header[blockno++];
if ((u8 *)(blockidx + 1) - dsp >= len)
return 1;
if (le16_to_cpu(blockidx->PageNumber) != i)
return 1;
l = E4_PAGE_BYTES(blockidx->PageSize);
sum += l;
l += le32_to_cpu(blockidx->PageOffset);
if (l > len)
return 1;
/* zero is zero regardless endianes */
} while (blockidx->NotLastBlock);
}
return (sum == len) ? 0 : 1;
}
/*
* send data to the idma pipe
* */
static int uea_idma_write(struct uea_softc *sc, const void *data, u32 size)
{
int ret = -ENOMEM;
u8 *xfer_buff;
int bytes_read;
xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
ret = usb_bulk_msg(sc->usb_dev,
usb_sndbulkpipe(sc->usb_dev, UEA_IDMA_PIPE),
xfer_buff, size, &bytes_read, BULK_TIMEOUT);
kfree(xfer_buff);
if (ret < 0)
return ret;
if (size != bytes_read) {
uea_err(INS_TO_USBDEV(sc), "size != bytes_read %d %d\n", size,
bytes_read);
return -EIO;
}
return 0;
}
static int request_dsp(struct uea_softc *sc)
{
int ret;
char *dsp_name;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
if (IS_ISDN(sc))
dsp_name = FW_DIR "DSP4i.bin";
else
dsp_name = FW_DIR "DSP4p.bin";
} else if (UEA_CHIP_VERSION(sc) == ADI930) {
if (IS_ISDN(sc))
dsp_name = FW_DIR "DSP9i.bin";
else
dsp_name = FW_DIR "DSP9p.bin";
} else {
if (IS_ISDN(sc))
dsp_name = FW_DIR "DSPei.bin";
else
dsp_name = FW_DIR "DSPep.bin";
}
ret = request_firmware(&sc->dsp_firm, dsp_name, &sc->usb_dev->dev);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
dsp_name, ret);
return ret;
}
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
ret = check_dsp_e4(sc->dsp_firm->data, sc->dsp_firm->size);
else
ret = check_dsp_e1(sc->dsp_firm->data, sc->dsp_firm->size);
if (ret) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
dsp_name);
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
return -EILSEQ;
}
return 0;
}
/*
* The uea_load_page() function must be called within a process context
*/
static void uea_load_page_e1(struct work_struct *work)
{
struct uea_softc *sc = container_of(work, struct uea_softc, task);
u16 pageno = sc->pageno;
u16 ovl = sc->ovl;
struct block_info_e1 bi;
const u8 *p;
u8 pagecount, blockcount;
u16 blockaddr, blocksize;
u32 pageoffset;
int i;
/* reload firmware when reboot start and it's loaded already */
if (ovl == 0 && pageno == 0 && sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
return;
p = sc->dsp_firm->data;
pagecount = FW_GET_BYTE(p);
p += 1;
if (pageno >= pagecount)
goto bad1;
p += 4 * pageno;
pageoffset = get_unaligned_le32(p);
if (pageoffset == 0)
goto bad1;
p = sc->dsp_firm->data + pageoffset;
blockcount = FW_GET_BYTE(p);
p += 1;
uea_dbg(INS_TO_USBDEV(sc),
"sending %u blocks for DSP page %u\n", blockcount, pageno);
bi.wHdr = cpu_to_le16(UEA_BIHDR);
bi.wOvl = cpu_to_le16(ovl);
bi.wOvlOffset = cpu_to_le16(ovl | 0x8000);
for (i = 0; i < blockcount; i++) {
blockaddr = get_unaligned_le16(p);
p += 2;
blocksize = get_unaligned_le16(p);
p += 2;
bi.wSize = cpu_to_le16(blocksize);
bi.wAddress = cpu_to_le16(blockaddr);
bi.wLast = cpu_to_le16((i == blockcount - 1) ? 1 : 0);
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E1_BLOCK_INFO_SIZE))
goto bad2;
/* send block data through the IDMA pipe */
if (uea_idma_write(sc, p, blocksize))
goto bad2;
p += blocksize;
}
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", i);
return;
bad1:
uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
}
static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
{
struct block_info_e4 bi;
struct block_index *blockidx;
struct l1_code *p = (struct l1_code *) sc->dsp_firm->data;
u8 blockno = p->page_number_to_block_index[pageno];
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = boot;
bi.bPageNumber = pageno;
bi.wReserved = cpu_to_be16(UEA_RESERVED);
do {
const u8 *blockoffset;
unsigned int blocksize;
blockidx = &p->page_header[blockno];
blocksize = E4_PAGE_BYTES(blockidx->PageSize);
blockoffset = sc->dsp_firm->data + le32_to_cpu(
blockidx->PageOffset);
bi.dwSize = cpu_to_be32(blocksize);
bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress));
uea_dbg(INS_TO_USBDEV(sc),
"sending block %u for DSP page "
"%u size %u address %x\n",
blockno, pageno, blocksize,
le32_to_cpu(blockidx->PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
goto bad;
/* send block data through the IDMA pipe */
if (uea_idma_write(sc, blockoffset, blocksize))
goto bad;
blockno++;
} while (blockidx->NotLastBlock);
return;
bad:
uea_err(INS_TO_USBDEV(sc), "sending DSP block %u failed\n", blockno);
return;
}
static void uea_load_page_e4(struct work_struct *work)
{
struct uea_softc *sc = container_of(work, struct uea_softc, task);
u8 pageno = sc->pageno;
int i;
struct block_info_e4 bi;
struct l1_code *p;
uea_dbg(INS_TO_USBDEV(sc), "sending DSP page %u\n", pageno);
/* reload firmware when reboot start and it's loaded already */
if (pageno == 0 && sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
if (sc->dsp_firm == NULL && request_dsp(sc) < 0)
return;
p = (struct l1_code *) sc->dsp_firm->data;
if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
uea_err(INS_TO_USBDEV(sc), "invalid DSP "
"page %u requested\n", pageno);
return;
}
if (pageno != 0) {
__uea_load_page_e4(sc, pageno, 0);
return;
}
uea_dbg(INS_TO_USBDEV(sc),
"sending Main DSP page %u\n", p->page_header[0].PageNumber);
for (i = 0; i < le16_to_cpu(p->page_header[0].PageNumber); i++) {
if (E4_IS_BOOT_PAGE(p->page_header[i].PageSize))
__uea_load_page_e4(sc, i, 1);
}
uea_dbg(INS_TO_USBDEV(sc) , "sending start bi\n");
bi.wHdr = cpu_to_be16(UEA_BIHDR);
bi.bBootPage = 0;
bi.bPageNumber = 0xff;
bi.wReserved = cpu_to_be16(UEA_RESERVED);
bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize));
bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress));
/* send block info through the IDMA pipe */
if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
uea_err(INS_TO_USBDEV(sc), "sending DSP start bi failed\n");
}
static inline void wake_up_cmv_ack(struct uea_softc *sc)
{
BUG_ON(sc->cmv_ack);
sc->cmv_ack = 1;
wake_up(&sc->sync_q);
}
static inline int wait_cmv_ack(struct uea_softc *sc)
{
int ret = uea_wait(sc, sc->cmv_ack , ACK_TIMEOUT);
sc->cmv_ack = 0;
uea_dbg(INS_TO_USBDEV(sc), "wait_event_timeout : %d ms\n",
jiffies_to_msecs(ret));
if (ret < 0)
return ret;
return (ret == 0) ? -ETIMEDOUT : 0;
}
#define UCDC_SEND_ENCAPSULATED_COMMAND 0x00
static int uea_request(struct uea_softc *sc,
u16 value, u16 index, u16 size, const void *data)
{
u8 *xfer_buff;
int ret = -ENOMEM;
xfer_buff = kmemdup(data, size, GFP_KERNEL);
if (!xfer_buff) {
uea_err(INS_TO_USBDEV(sc), "can't allocate xfer_buff\n");
return ret;
}
ret = usb_control_msg(sc->usb_dev, usb_sndctrlpipe(sc->usb_dev, 0),
UCDC_SEND_ENCAPSULATED_COMMAND,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, xfer_buff, size, CTRL_TIMEOUT);
kfree(xfer_buff);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc), "usb_control_msg error %d\n", ret);
return ret;
}
if (ret != size) {
uea_err(INS_TO_USBDEV(sc),
"usb_control_msg send only %d bytes (instead of %d)\n",
ret, size);
return -EIO;
}
return 0;
}
static int uea_cmv_e1(struct uea_softc *sc,
u8 function, u32 address, u16 offset, u32 data)
{
struct cmv_e1 cmv;
int ret;
uea_enters(INS_TO_USBDEV(sc));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Address : %c%c%c%c, "
"offset : 0x%04x, data : 0x%08x\n",
E1_FUNCTION_TYPE(function),
E1_FUNCTION_SUBTYPE(function),
E1_GETSA1(address), E1_GETSA2(address),
E1_GETSA3(address),
E1_GETSA4(address), offset, data);
/* we send a request, but we expect a reply */
sc->cmv_dsc.e1.function = function | 0x2;
sc->cmv_dsc.e1.idx++;
sc->cmv_dsc.e1.address = address;
sc->cmv_dsc.e1.offset = offset;
cmv.wPreamble = cpu_to_le16(E1_PREAMBLE);
cmv.bDirection = E1_HOSTTOMODEM;
cmv.bFunction = function;
cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
put_unaligned_le32(address, &cmv.dwSymbolicAddress);
cmv.wOffsetAddress = cpu_to_le16(offset);
put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START,
sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
static int uea_cmv_e4(struct uea_softc *sc,
u16 function, u16 group, u16 address, u16 offset, u32 data)
{
struct cmv_e4 cmv;
int ret;
uea_enters(INS_TO_USBDEV(sc));
memset(&cmv, 0, sizeof(cmv));
uea_vdbg(INS_TO_USBDEV(sc), "Function : %d-%d, Group : 0x%04x, "
"Address : 0x%04x, offset : 0x%04x, data : 0x%08x\n",
E4_FUNCTION_TYPE(function), E4_FUNCTION_SUBTYPE(function),
group, address, offset, data);
/* we send a request, but we expect a reply */
sc->cmv_dsc.e4.function = function | (0x1 << 4);
sc->cmv_dsc.e4.offset = offset;
sc->cmv_dsc.e4.address = address;
sc->cmv_dsc.e4.group = group;
cmv.wFunction = cpu_to_be16(function);
cmv.wGroup = cpu_to_be16(group);
cmv.wAddress = cpu_to_be16(address);
cmv.wOffset = cpu_to_be16(offset);
cmv.dwData[0] = cpu_to_be32(data);
ret = uea_request(sc, UEA_E4_SET_BLOCK, UEA_MPTX_START,
sizeof(cmv), &cmv);
if (ret < 0)
return ret;
ret = wait_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
static inline int uea_read_cmv_e1(struct uea_softc *sc,
u32 address, u16 offset, u32 *data)
{
int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTREAD),
address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else
*data = sc->data;
return ret;
}
static inline int uea_read_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 *data)
{
int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
E4_REQUESTREAD, size),
group, address, offset, 0);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"reading cmv failed with error %d\n", ret);
else {
*data = sc->data;
/* size is in 16-bit word quantities */
if (size > 2)
*(data + 1) = sc->data1;
}
return ret;
}
static inline int uea_write_cmv_e1(struct uea_softc *sc,
u32 address, u16 offset, u32 data)
{
int ret = uea_cmv_e1(sc, E1_MAKEFUNCTION(E1_MEMACCESS, E1_REQUESTWRITE),
address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"writing cmv failed with error %d\n", ret);
return ret;
}
static inline int uea_write_cmv_e4(struct uea_softc *sc,
u8 size, u16 group, u16 address, u16 offset, u32 data)
{
int ret = uea_cmv_e4(sc, E4_MAKEFUNCTION(E4_MEMACCESS,
E4_REQUESTWRITE, size),
group, address, offset, data);
if (ret < 0)
uea_err(INS_TO_USBDEV(sc),
"writing cmv failed with error %d\n", ret);
return ret;
}
static void uea_set_bulk_timeout(struct uea_softc *sc, u32 dsrate)
{
int ret;
u16 timeout;
/* in bulk mode the modem have problem with high rate
* changing internal timing could improve things, but the
* value is mysterious.
* ADI930 don't support it (-EPIPE error).
*/
if (UEA_CHIP_VERSION(sc) == ADI930 ||
altsetting[sc->modem_index] > 0 ||
sc->stats.phy.dsrate == dsrate)
return;
/* Original timming (1Mbit/s) from ADI (used in windows driver) */
timeout = (dsrate <= 1024*1024) ? 0 : 1;
ret = uea_request(sc, UEA_SET_TIMEOUT, timeout, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "setting new timeout %d%s\n",
timeout, ret < 0 ? " failed" : "");
}
/*
* Monitor the modem and update the stat
* return 0 if everything is ok
* return < 0 if an error occurs (-EAGAIN reboot needed)
*/
static int uea_stat_e1(struct uea_softc *sc)
{
u32 data;
int ret;
uea_enters(INS_TO_USBDEV(sc));
data = sc->stats.phy.state;
ret = uea_read_cmv_e1(sc, E1_SA_STAT, 0, &sc->stats.phy.state);
if (ret < 0)
return ret;
switch (GET_STATUS(sc->stats.phy.state)) {
case 0: /* not yet synchronized */
uea_dbg(INS_TO_USBDEV(sc),
"modem not yet synchronized\n");
return 0;
case 1: /* initialization */
uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
return 0;
case 2: /* operational */
uea_vdbg(INS_TO_USBDEV(sc), "modem operational\n");
break;
case 3: /* fail ... */
uea_info(INS_TO_USBDEV(sc), "modem synchronization failed"
" (may be try other cmv/dsp)\n");
return -EAGAIN;
case 4 ... 6: /* test state */
uea_warn(INS_TO_USBDEV(sc),
"modem in test mode - not supported\n");
return -EAGAIN;
case 7: /* fast-retain ... */
uea_info(INS_TO_USBDEV(sc), "modem in fast-retain mode\n");
return 0;
default:
uea_err(INS_TO_USBDEV(sc), "modem invalid SW mode %d\n",
GET_STATUS(sc->stats.phy.state));
return -EAGAIN;
}
if (GET_STATUS(data) != 2) {
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "modem operational\n");
/* release the dsp firmware as it is not needed until
* the next failure
*/
if (sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
}
/* always update it as atm layer could not be init when we switch to
* operational state
*/
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND);
/* wake up processes waiting for synchronization */
wake_up(&sc->sync_q);
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 2, &sc->stats.phy.flags);
if (ret < 0)
return ret;
sc->stats.phy.mflags |= sc->stats.phy.flags;
/* in case of a flags ( for example delineation LOSS (& 0x10)),
* we check the status again in order to detect the failure earlier
*/
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
return 0;
}
ret = uea_read_cmv_e1(sc, E1_SA_RATE, 0, &data);
if (ret < 0)
return ret;
uea_set_bulk_timeout(sc, (data >> 16) * 32);
sc->stats.phy.dsrate = (data >> 16) * 32;
sc->stats.phy.usrate = (data & 0xffff) * 32;
UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 23, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsattenuation = (data & 0xff) / 2;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 47, &data);
if (ret < 0)
return ret;
sc->stats.phy.usattenuation = (data & 0xff) / 2;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 25, &sc->stats.phy.dsmargin);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 49, &sc->stats.phy.usmargin);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 51, &sc->stats.phy.rxflow);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 52, &sc->stats.phy.txflow);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 54, &sc->stats.phy.dsunc);
if (ret < 0)
return ret;
/* only for atu-c */
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 58, &sc->stats.phy.usunc);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 53, &sc->stats.phy.dscorr);
if (ret < 0)
return ret;
/* only for atu-c */
ret = uea_read_cmv_e1(sc, E1_SA_DIAG, 57, &sc->stats.phy.uscorr);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 8, &sc->stats.phy.vidco);
if (ret < 0)
return ret;
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 13, &sc->stats.phy.vidcpe);
if (ret < 0)
return ret;
return 0;
}
static int uea_stat_e4(struct uea_softc *sc)
{
u32 data;
u32 tmp_arr[2];
int ret;
uea_enters(INS_TO_USBDEV(sc));
data = sc->stats.phy.state;
/* XXX only need to be done before operationnal... */
ret = uea_read_cmv_e4(sc, 1, E4_SA_STAT, 0, 0, &sc->stats.phy.state);
if (ret < 0)
return ret;
switch (sc->stats.phy.state) {
case 0x0: /* not yet synchronized */
case 0x1:
case 0x3:
case 0x4:
uea_dbg(INS_TO_USBDEV(sc), "modem not yet "
"synchronized\n");
return 0;
case 0x5: /* initialization */
case 0x6:
case 0x9:
case 0xa:
uea_dbg(INS_TO_USBDEV(sc), "modem initializing\n");
return 0;
case 0x2: /* fail ... */
uea_info(INS_TO_USBDEV(sc), "modem synchronization "
"failed (may be try other cmv/dsp)\n");
return -EAGAIN;
case 0x7: /* operational */
break;
default:
uea_warn(INS_TO_USBDEV(sc), "unknown state: %x\n",
sc->stats.phy.state);
return 0;
}
if (data != 7) {
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_OFF, 0, NULL);
uea_info(INS_TO_USBDEV(sc), "modem operational\n");
/* release the dsp firmware as it is not needed until
* the next failure
*/
if (sc->dsp_firm) {
release_firmware(sc->dsp_firm);
sc->dsp_firm = NULL;
}
}
/* always update it as atm layer could not be init when we switch to
* operational state
*/
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_FOUND);
/* wake up processes waiting for synchronization */
wake_up(&sc->sync_q);
/* TODO improve this state machine :
* we need some CMV info : what they do and their unit
* we should find the equivalent of eagle3- CMV
*/
/* check flags */
ret = uea_read_cmv_e4(sc, 1, E4_SA_DIAG, 0, 0, &sc->stats.phy.flags);
if (ret < 0)
return ret;
sc->stats.phy.mflags |= sc->stats.phy.flags;
/* in case of a flags ( for example delineation LOSS (& 0x10)),
* we check the status again in order to detect the failure earlier
*/
if (sc->stats.phy.flags) {
uea_dbg(INS_TO_USBDEV(sc), "Stat flag = 0x%x\n",
sc->stats.phy.flags);
if (sc->stats.phy.flags & 1) /* delineation LOSS */
return -EAGAIN;
if (sc->stats.phy.flags & 0x4000) /* Reset Flag */
return -EAGAIN;
return 0;
}
/* rate data may be in upper or lower half of 64 bit word, strange */
ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 0, 0, tmp_arr);
if (ret < 0)
return ret;
data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
sc->stats.phy.usrate = data / 1000;
ret = uea_read_cmv_e4(sc, 4, E4_SA_RATE, 1, 0, tmp_arr);
if (ret < 0)
return ret;
data = (tmp_arr[0]) ? tmp_arr[0] : tmp_arr[1];
uea_set_bulk_timeout(sc, data / 1000);
sc->stats.phy.dsrate = data / 1000;
UPDATE_ATM_STAT(link_rate, sc->stats.phy.dsrate * 1000 / 424);
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 1, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsattenuation = data / 10;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 1, &data);
if (ret < 0)
return ret;
sc->stats.phy.usattenuation = data / 10;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 68, 3, &data);
if (ret < 0)
return ret;
sc->stats.phy.dsmargin = data / 2;
ret = uea_read_cmv_e4(sc, 1, E4_SA_INFO, 69, 3, &data);
if (ret < 0)
return ret;
sc->stats.phy.usmargin = data / 10;
return 0;
}
static void cmvs_file_name(struct uea_softc *sc, char *const cmv_name, int ver)
{
char file_arr[] = "CMVxy.bin";
char *file;
kparam_block_sysfs_write(cmv_file);
/* set proper name corresponding modem version and line type */
if (cmv_file[sc->modem_index] == NULL) {
if (UEA_CHIP_VERSION(sc) == ADI930)
file_arr[3] = '9';
else if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
file_arr[3] = '4';
else
file_arr[3] = 'e';
file_arr[4] = IS_ISDN(sc) ? 'i' : 'p';
file = file_arr;
} else
file = cmv_file[sc->modem_index];
strcpy(cmv_name, FW_DIR);
strlcat(cmv_name, file, UEA_FW_NAME_MAX);
if (ver == 2)
strlcat(cmv_name, ".v2", UEA_FW_NAME_MAX);
kparam_unblock_sysfs_write(cmv_file);
}
static int request_cmvs_old(struct uea_softc *sc,
void **cmvs, const struct firmware **fw)
{
int ret, size;
u8 *data;
char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 1);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
cmv_name, ret);
return ret;
}
data = (u8 *) (*fw)->data;
size = (*fw)->size;
if (size < 1)
goto err_fw_corrupted;
if (size != *data * sizeof(struct uea_cmvs_v1) + 1)
goto err_fw_corrupted;
*cmvs = (void *)(data + 1);
return *data;
err_fw_corrupted:
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
release_firmware(*fw);
return -EILSEQ;
}
static int request_cmvs(struct uea_softc *sc,
void **cmvs, const struct firmware **fw, int *ver)
{
int ret, size;
u32 crc;
u8 *data;
char cmv_name[UEA_FW_NAME_MAX]; /* 30 bytes stack variable */
cmvs_file_name(sc, cmv_name, 2);
ret = request_firmware(fw, cmv_name, &sc->usb_dev->dev);
if (ret < 0) {
/* if caller can handle old version, try to provide it */
if (*ver == 1) {
uea_warn(INS_TO_USBDEV(sc), "requesting "
"firmware %s failed, "
"try to get older cmvs\n", cmv_name);
return request_cmvs_old(sc, cmvs, fw);
}
uea_err(INS_TO_USBDEV(sc),
"requesting firmware %s failed with error %d\n",
cmv_name, ret);
return ret;
}
size = (*fw)->size;
data = (u8 *) (*fw)->data;
if (size < 4 || strncmp(data, "cmv2", 4) != 0) {
if (*ver == 1) {
uea_warn(INS_TO_USBDEV(sc), "firmware %s is corrupted,"
" try to get older cmvs\n", cmv_name);
release_firmware(*fw);
return request_cmvs_old(sc, cmvs, fw);
}
goto err_fw_corrupted;
}
*ver = 2;
data += 4;
size -= 4;
if (size < 5)
goto err_fw_corrupted;
crc = get_unaligned_le32(data);
data += 4;
size -= 4;
if (crc32_be(0, data, size) != crc)
goto err_fw_corrupted;
if (size != *data * sizeof(struct uea_cmvs_v2) + 1)
goto err_fw_corrupted;
*cmvs = (void *) (data + 1);
return *data;
err_fw_corrupted:
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n", cmv_name);
release_firmware(*fw);
return -EILSEQ;
}
static int uea_send_cmvs_e1(struct uea_softc *sc)
{
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
int ver = 1; /* we can handle v1 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 1);
if (ret < 0)
return ret;
/* Dump firmware version */
ret = uea_read_cmv_e1(sc, E1_SA_INFO, 10, &sc->stats.phy.firmid);
if (ret < 0)
return ret;
uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
sc->stats.phy.firmid);
/* get options */
ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
/* send options */
if (ver == 1) {
struct uea_cmvs_v1 *cmvs_v1 = cmvs_ptr;
uea_warn(INS_TO_USBDEV(sc), "use deprecated cmvs version, "
"please update your firmware\n");
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e1(sc,
get_unaligned_le32(&cmvs_v1[i].address),
get_unaligned_le16(&cmvs_v1[i].offset),
get_unaligned_le32(&cmvs_v1[i].data));
if (ret < 0)
goto out;
}
} else if (ver == 2) {
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e1(sc,
get_unaligned_le32(&cmvs_v2[i].address),
(u16) get_unaligned_le32(&cmvs_v2[i].offset),
get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
} else {
/* This really should not happen */
uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
goto out;
}
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e1(sc, E1_SA_CNTL, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
"synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
}
static int uea_send_cmvs_e4(struct uea_softc *sc)
{
int i, ret, len;
void *cmvs_ptr;
const struct firmware *cmvs_fw;
int ver = 2; /* we can only handle v2 cmv firmware version; */
/* Enter in R-IDLE (cmv) until instructed otherwise */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 1);
if (ret < 0)
return ret;
/* Dump firmware version */
/* XXX don't read the 3th byte as it is always 6 */
ret = uea_read_cmv_e4(sc, 2, E4_SA_INFO, 55, 0, &sc->stats.phy.firmid);
if (ret < 0)
return ret;
uea_info(INS_TO_USBDEV(sc), "ATU-R firmware version : %x\n",
sc->stats.phy.firmid);
/* get options */
ret = len = request_cmvs(sc, &cmvs_ptr, &cmvs_fw, &ver);
if (ret < 0)
return ret;
/* send options */
if (ver == 2) {
struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
for (i = 0; i < len; i++) {
ret = uea_write_cmv_e4(sc, 1,
get_unaligned_le32(&cmvs_v2[i].group),
get_unaligned_le32(&cmvs_v2[i].address),
get_unaligned_le32(&cmvs_v2[i].offset),
get_unaligned_le32(&cmvs_v2[i].data));
if (ret < 0)
goto out;
}
} else {
/* This really should not happen */
uea_err(INS_TO_USBDEV(sc), "bad cmvs version %d\n", ver);
goto out;
}
/* Enter in R-ACT-REQ */
ret = uea_write_cmv_e4(sc, 1, E4_SA_CNTL, 0, 0, 2);
uea_vdbg(INS_TO_USBDEV(sc), "Entering in R-ACT-REQ state\n");
uea_info(INS_TO_USBDEV(sc), "modem started, waiting "
"synchronization...\n");
out:
release_firmware(cmvs_fw);
return ret;
}
/* Start boot post firmware modem:
* - send reset commands through usb control pipe
* - start workqueue for DSP loading
* - send CMV options to modem
*/
static int uea_start_reset(struct uea_softc *sc)
{
u16 zero = 0; /* ;-) */
int ret;
uea_enters(INS_TO_USBDEV(sc));
uea_info(INS_TO_USBDEV(sc), "(re)booting started\n");
/* mask interrupt */
sc->booting = 1;
/* We need to set this here because, a ack timeout could have occurred,
* but before we start the reboot, the ack occurs and set this to 1.
* So we will failed to wait Ready CMV.
*/
sc->cmv_ack = 0;
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST);
/* reset statistics */
memset(&sc->stats, 0, sizeof(struct uea_stats));
/* tell the modem that we want to boot in IDMA mode */
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
uea_request(sc, UEA_SET_MODE, UEA_BOOT_IDMA, 0, NULL);
/* enter reset mode */
uea_request(sc, UEA_SET_MODE, UEA_START_RESET, 0, NULL);
/* original driver use 200ms, but windows driver use 100ms */
ret = uea_wait(sc, 0, msecs_to_jiffies(100));
if (ret < 0)
return ret;
/* leave reset mode */
uea_request(sc, UEA_SET_MODE, UEA_END_RESET, 0, NULL);
if (UEA_CHIP_VERSION(sc) != EAGLE_IV) {
/* clear tx and rx mailboxes */
uea_request(sc, UEA_SET_2183_DATA, UEA_MPTX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_MPRX_MAILBOX, 2, &zero);
uea_request(sc, UEA_SET_2183_DATA, UEA_SWAP_MAILBOX, 2, &zero);
}
ret = uea_wait(sc, 0, msecs_to_jiffies(1000));
if (ret < 0)
return ret;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV)
sc->cmv_dsc.e4.function = E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
E4_MODEMREADY, 1);
else
sc->cmv_dsc.e1.function = E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
E1_MODEMREADY);
/* demask interrupt */
sc->booting = 0;
/* start loading DSP */
sc->pageno = 0;
sc->ovl = 0;
schedule_work(&sc->task);
/* wait for modem ready CMV */
ret = wait_cmv_ack(sc);
if (ret < 0)
return ret;
uea_vdbg(INS_TO_USBDEV(sc), "Ready CMV received\n");
ret = sc->send_cmvs(sc);
if (ret < 0)
return ret;
sc->reset = 0;
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/*
* In case of an error wait 1s before rebooting the modem
* if the modem don't request reboot (-EAGAIN).
* Monitor the modem every 1s.
*/
static int uea_kthread(void *data)
{
struct uea_softc *sc = data;
int ret = -EAGAIN;
set_freezable();
uea_enters(INS_TO_USBDEV(sc));
while (!kthread_should_stop()) {
if (ret < 0 || sc->reset)
ret = uea_start_reset(sc);
if (!ret)
ret = sc->stat(sc);
if (ret != -EAGAIN)
uea_wait(sc, 0, msecs_to_jiffies(1000));
try_to_freeze();
}
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/* Load second usb firmware for ADI930 chip */
static int load_XILINX_firmware(struct uea_softc *sc)
{
const struct firmware *fw_entry;
int ret, size, u, ln;
const u8 *pfw;
u8 value;
char *fw_name = FW_DIR "930-fpga.bin";
uea_enters(INS_TO_USBDEV(sc));
ret = request_firmware(&fw_entry, fw_name, &sc->usb_dev->dev);
if (ret) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is not available\n",
fw_name);
goto err0;
}
pfw = fw_entry->data;
size = fw_entry->size;
if (size != 0x577B) {
uea_err(INS_TO_USBDEV(sc), "firmware %s is corrupted\n",
fw_name);
ret = -EILSEQ;
goto err1;
}
for (u = 0; u < size; u += ln) {
ln = min(size - u, 64);
ret = uea_request(sc, 0xe, 0, ln, pfw + u);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"elsa download data failed (%d)\n", ret);
goto err1;
}
}
/* finish to send the fpga */
ret = uea_request(sc, 0xe, 1, 0, NULL);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"elsa download data failed (%d)\n", ret);
goto err1;
}
/* Tell the modem we finish : de-assert reset */
value = 0;
ret = uea_send_modem_cmd(sc->usb_dev, 0xe, 1, &value);
if (ret < 0)
uea_err(sc->usb_dev, "elsa de-assert failed with error"
" %d\n", ret);
err1:
release_firmware(fw_entry);
err0:
uea_leaves(INS_TO_USBDEV(sc));
return ret;
}
/* The modem send us an ack. First with check if it right */
static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
{
struct cmv_dsc_e1 *dsc = &sc->cmv_dsc.e1;
struct cmv_e1 *cmv = &intr->u.e1.s2.cmv;
uea_enters(INS_TO_USBDEV(sc));
if (le16_to_cpu(cmv->wPreamble) != E1_PREAMBLE)
goto bad1;
if (cmv->bDirection != E1_MODEMTOHOST)
goto bad1;
/* FIXME : ADI930 reply wrong preambule (func = 2, sub = 2) to
* the first MEMACCESS cmv. Ignore it...
*/
if (cmv->bFunction != dsc->function) {
if (UEA_CHIP_VERSION(sc) == ADI930
&& cmv->bFunction == E1_MAKEFUNCTION(2, 2)) {
cmv->wIndex = cpu_to_le16(dsc->idx);
put_unaligned_le32(dsc->address,
&cmv->dwSymbolicAddress);
cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
} else
goto bad2;
}
if (cmv->bFunction == E1_MAKEFUNCTION(E1_ADSLDIRECTIVE,
E1_MODEMREADY)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
}
/* in case of MEMACCESS */
if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address ||
le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
goto bad2;
sc->data = get_unaligned_le32(&cmv->dwData);
sc->data = sc->data << 16 | sc->data >> 16;
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, "
"Function : %d, Subfunction : %d\n",
E1_FUNCTION_TYPE(cmv->bFunction),
E1_FUNCTION_SUBTYPE(cmv->bFunction));
uea_leaves(INS_TO_USBDEV(sc));
return;
bad1:
uea_err(INS_TO_USBDEV(sc), "invalid cmv received, "
"wPreamble %d, bDirection %d\n",
le16_to_cpu(cmv->wPreamble), cmv->bDirection);
uea_leaves(INS_TO_USBDEV(sc));
}
/* The modem send us an ack. First with check if it right */
static void uea_dispatch_cmv_e4(struct uea_softc *sc, struct intr_pkt *intr)
{
struct cmv_dsc_e4 *dsc = &sc->cmv_dsc.e4;
struct cmv_e4 *cmv = &intr->u.e4.s2.cmv;
uea_enters(INS_TO_USBDEV(sc));
uea_dbg(INS_TO_USBDEV(sc), "cmv %x %x %x %x %x %x\n",
be16_to_cpu(cmv->wGroup), be16_to_cpu(cmv->wFunction),
be16_to_cpu(cmv->wOffset), be16_to_cpu(cmv->wAddress),
be32_to_cpu(cmv->dwData[0]), be32_to_cpu(cmv->dwData[1]));
if (be16_to_cpu(cmv->wFunction) != dsc->function)
goto bad2;
if (be16_to_cpu(cmv->wFunction) == E4_MAKEFUNCTION(E4_ADSLDIRECTIVE,
E4_MODEMREADY, 1)) {
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
}
/* in case of MEMACCESS */
if (be16_to_cpu(cmv->wOffset) != dsc->offset ||
be16_to_cpu(cmv->wGroup) != dsc->group ||
be16_to_cpu(cmv->wAddress) != dsc->address)
goto bad2;
sc->data = be32_to_cpu(cmv->dwData[0]);
sc->data1 = be32_to_cpu(cmv->dwData[1]);
wake_up_cmv_ack(sc);
uea_leaves(INS_TO_USBDEV(sc));
return;
bad2:
uea_err(INS_TO_USBDEV(sc), "unexpected cmv received, "
"Function : %d, Subfunction : %d\n",
E4_FUNCTION_TYPE(cmv->wFunction),
E4_FUNCTION_SUBTYPE(cmv->wFunction));
uea_leaves(INS_TO_USBDEV(sc));
return;
}
static void uea_schedule_load_page_e1(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e1_bSwapPageNo;
sc->ovl = intr->e1_bOvl >> 4 | intr->e1_bOvl << 4;
schedule_work(&sc->task);
}
static void uea_schedule_load_page_e4(struct uea_softc *sc,
struct intr_pkt *intr)
{
sc->pageno = intr->e4_bSwapPageNo;
schedule_work(&sc->task);
}
/*
* interrupt handler
*/
static void uea_intr(struct urb *urb)
{
struct uea_softc *sc = urb->context;
struct intr_pkt *intr = urb->transfer_buffer;
int status = urb->status;
uea_enters(INS_TO_USBDEV(sc));
if (unlikely(status < 0)) {
uea_err(INS_TO_USBDEV(sc), "uea_intr() failed with %d\n",
status);
return;
}
/* device-to-host interrupt */
if (intr->bType != 0x08 || sc->booting) {
uea_err(INS_TO_USBDEV(sc), "wrong interrupt\n");
goto resubmit;
}
switch (le16_to_cpu(intr->wInterrupt)) {
case INT_LOADSWAPPAGE:
sc->schedule_load_page(sc, intr);
break;
case INT_INCOMINGCMV:
sc->dispatch_cmv(sc, intr);
break;
default:
uea_err(INS_TO_USBDEV(sc), "unknown interrupt %u\n",
le16_to_cpu(intr->wInterrupt));
}
resubmit:
usb_submit_urb(sc->urb_int, GFP_ATOMIC);
}
/*
* Start the modem : init the data and start kernel thread
*/
static int uea_boot(struct uea_softc *sc)
{
int ret, size;
struct intr_pkt *intr;
uea_enters(INS_TO_USBDEV(sc));
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
size = E4_INTR_PKT_SIZE;
sc->dispatch_cmv = uea_dispatch_cmv_e4;
sc->schedule_load_page = uea_schedule_load_page_e4;
sc->stat = uea_stat_e4;
sc->send_cmvs = uea_send_cmvs_e4;
INIT_WORK(&sc->task, uea_load_page_e4);
} else {
size = E1_INTR_PKT_SIZE;
sc->dispatch_cmv = uea_dispatch_cmv_e1;
sc->schedule_load_page = uea_schedule_load_page_e1;
sc->stat = uea_stat_e1;
sc->send_cmvs = uea_send_cmvs_e1;
INIT_WORK(&sc->task, uea_load_page_e1);
}
init_waitqueue_head(&sc->sync_q);
if (UEA_CHIP_VERSION(sc) == ADI930)
load_XILINX_firmware(sc);
intr = kmalloc(size, GFP_KERNEL);
if (!intr) {
uea_err(INS_TO_USBDEV(sc),
"cannot allocate interrupt package\n");
goto err0;
}
sc->urb_int = usb_alloc_urb(0, GFP_KERNEL);
if (!sc->urb_int) {
uea_err(INS_TO_USBDEV(sc), "cannot allocate interrupt URB\n");
goto err1;
}
usb_fill_int_urb(sc->urb_int, sc->usb_dev,
usb_rcvintpipe(sc->usb_dev, UEA_INTR_PIPE),
intr, size, uea_intr, sc,
sc->usb_dev->actconfig->interface[0]->altsetting[0].
endpoint[0].desc.bInterval);
ret = usb_submit_urb(sc->urb_int, GFP_KERNEL);
if (ret < 0) {
uea_err(INS_TO_USBDEV(sc),
"urb submition failed with error %d\n", ret);
goto err1;
}
/* Create worker thread, but don't start it here. Start it after
* all usbatm generic initialization is done.
*/
sc->kthread = kthread_create(uea_kthread, sc, "ueagle-atm");
if (IS_ERR(sc->kthread)) {
uea_err(INS_TO_USBDEV(sc), "failed to create thread\n");
goto err2;
}
uea_leaves(INS_TO_USBDEV(sc));
return 0;
err2:
usb_kill_urb(sc->urb_int);
err1:
usb_free_urb(sc->urb_int);
sc->urb_int = NULL;
kfree(intr);
err0:
uea_leaves(INS_TO_USBDEV(sc));
return -ENOMEM;
}
/*
* Stop the modem : kill kernel thread and free data
*/
static void uea_stop(struct uea_softc *sc)
{
int ret;
uea_enters(INS_TO_USBDEV(sc));
ret = kthread_stop(sc->kthread);
uea_dbg(INS_TO_USBDEV(sc), "kthread finish with status %d\n", ret);
uea_request(sc, UEA_SET_MODE, UEA_LOOPBACK_ON, 0, NULL);
usb_kill_urb(sc->urb_int);
kfree(sc->urb_int->transfer_buffer);
usb_free_urb(sc->urb_int);
/* flush the work item, when no one can schedule it */
flush_work_sync(&sc->task);
if (sc->dsp_firm)
release_firmware(sc->dsp_firm);
uea_leaves(INS_TO_USBDEV(sc));
}
/* syfs interface */
static struct uea_softc *dev_to_uea(struct device *dev)
{
struct usb_interface *intf;
struct usbatm_data *usbatm;
intf = to_usb_interface(dev);
if (!intf)
return NULL;
usbatm = usb_get_intfdata(intf);
if (!usbatm)
return NULL;
return usbatm->driver_data;
}
static ssize_t read_status(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.state);
out:
mutex_unlock(&uea_mutex);
return ret;
}
static ssize_t reboot(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int ret = -ENODEV;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
sc->reset = 1;
ret = count;
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
static ssize_t read_human_status(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret = -ENODEV;
int modem_state;
struct uea_softc *sc;
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
switch (sc->stats.phy.state) {
case 0x0: /* not yet synchronized */
case 0x1:
case 0x3:
case 0x4:
modem_state = 0;
break;
case 0x5: /* initialization */
case 0x6:
case 0x9:
case 0xa:
modem_state = 1;
break;
case 0x7: /* operational */
modem_state = 2;
break;
case 0x2: /* fail ... */
modem_state = 3;
break;
default: /* unknown */
modem_state = 4;
break;
}
} else
modem_state = GET_STATUS(sc->stats.phy.state);
switch (modem_state) {
case 0:
ret = sprintf(buf, "Modem is booting\n");
break;
case 1:
ret = sprintf(buf, "Modem is initializing\n");
break;
case 2:
ret = sprintf(buf, "Modem is operational\n");
break;
case 3:
ret = sprintf(buf, "Modem synchronization failed\n");
break;
default:
ret = sprintf(buf, "Modem state is unknown\n");
break;
}
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
char *buf)
{
int ret = -ENODEV;
struct uea_softc *sc;
char *delin = "GOOD";
mutex_lock(&uea_mutex);
sc = dev_to_uea(dev);
if (!sc)
goto out;
if (UEA_CHIP_VERSION(sc) == EAGLE_IV) {
if (sc->stats.phy.flags & 0x4000)
delin = "RESET";
else if (sc->stats.phy.flags & 0x0001)
delin = "LOSS";
} else {
if (sc->stats.phy.flags & 0x0C00)
delin = "ERROR";
else if (sc->stats.phy.flags & 0x0030)
delin = "LOSS";
}
ret = sprintf(buf, "%s\n", delin);
out:
mutex_unlock(&uea_mutex);
return ret;
}
static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
#define UEA_ATTR(name, reset) \
\
static ssize_t read_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
int ret = -ENODEV; \
struct uea_softc *sc; \
\
mutex_lock(&uea_mutex); \
sc = dev_to_uea(dev); \
if (!sc) \
goto out; \
ret = snprintf(buf, 10, "%08x\n", sc->stats.phy.name); \
if (reset) \
sc->stats.phy.name = 0; \
out: \
mutex_unlock(&uea_mutex); \
return ret; \
} \
\
static DEVICE_ATTR(stat_##name, S_IRUGO, read_##name, NULL)
UEA_ATTR(mflags, 1);
UEA_ATTR(vidcpe, 0);
UEA_ATTR(usrate, 0);
UEA_ATTR(dsrate, 0);
UEA_ATTR(usattenuation, 0);
UEA_ATTR(dsattenuation, 0);
UEA_ATTR(usmargin, 0);
UEA_ATTR(dsmargin, 0);
UEA_ATTR(txflow, 0);
UEA_ATTR(rxflow, 0);
UEA_ATTR(uscorr, 0);
UEA_ATTR(dscorr, 0);
UEA_ATTR(usunc, 0);
UEA_ATTR(dsunc, 0);
UEA_ATTR(firmid, 0);
/* Retrieve the device End System Identifier (MAC) */
static int uea_getesi(struct uea_softc *sc, u_char * esi)
{
unsigned char mac_str[2 * ETH_ALEN + 1];
int i;
if (usb_string
(sc->usb_dev, sc->usb_dev->descriptor.iSerialNumber, mac_str,
sizeof(mac_str)) != 2 * ETH_ALEN)
return 1;
for (i = 0; i < ETH_ALEN; i++)
esi[i] = hex_to_bin(mac_str[2 * i]) * 16 +
hex_to_bin(mac_str[2 * i + 1]);
return 0;
}
/* ATM stuff */
static int uea_atm_open(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct uea_softc *sc = usbatm->driver_data;
return uea_getesi(sc, atm_dev->esi);
}
static int uea_heavy(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
wait_event_interruptible(sc->sync_q, IS_OPERATIONAL(sc));
return 0;
}
static int claim_interface(struct usb_device *usb_dev,
struct usbatm_data *usbatm, int ifnum)
{
int ret;
struct usb_interface *intf = usb_ifnum_to_if(usb_dev, ifnum);
if (!intf) {
uea_err(usb_dev, "interface %d not found\n", ifnum);
return -ENODEV;
}
ret = usb_driver_claim_interface(&uea_driver, intf, usbatm);
if (ret != 0)
uea_err(usb_dev, "can't claim interface %d, error %d\n", ifnum,
ret);
return ret;
}
static struct attribute *attrs[] = {
&dev_attr_stat_status.attr,
&dev_attr_stat_mflags.attr,
&dev_attr_stat_human_status.attr,
&dev_attr_stat_delin.attr,
&dev_attr_stat_vidcpe.attr,
&dev_attr_stat_usrate.attr,
&dev_attr_stat_dsrate.attr,
&dev_attr_stat_usattenuation.attr,
&dev_attr_stat_dsattenuation.attr,
&dev_attr_stat_usmargin.attr,
&dev_attr_stat_dsmargin.attr,
&dev_attr_stat_txflow.attr,
&dev_attr_stat_rxflow.attr,
&dev_attr_stat_uscorr.attr,
&dev_attr_stat_dscorr.attr,
&dev_attr_stat_usunc.attr,
&dev_attr_stat_dsunc.attr,
&dev_attr_stat_firmid.attr,
NULL,
};
static struct attribute_group attr_grp = {
.attrs = attrs,
};
static int uea_bind(struct usbatm_data *usbatm, struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb = interface_to_usbdev(intf);
struct uea_softc *sc;
int ret, ifnum = intf->altsetting->desc.bInterfaceNumber;
unsigned int alt;
uea_enters(usb);
/* interface 0 is for firmware/monitoring */
if (ifnum != UEA_INTR_IFACE_NO)
return -ENODEV;
usbatm->flags = (sync_wait[modem_index] ? 0 : UDSL_SKIP_HEAVY_INIT);
/* interface 1 is for outbound traffic */
ret = claim_interface(usb, usbatm, UEA_US_IFACE_NO);
if (ret < 0)
return ret;
/* ADI930 has only 2 interfaces and inbound traffic is on interface 1 */
if (UEA_CHIP_VERSION(id) != ADI930) {
/* interface 2 is for inbound traffic */
ret = claim_interface(usb, usbatm, UEA_DS_IFACE_NO);
if (ret < 0)
return ret;
}
sc = kzalloc(sizeof(struct uea_softc), GFP_KERNEL);
if (!sc) {
uea_err(usb, "uea_init: not enough memory !\n");
return -ENOMEM;
}
sc->usb_dev = usb;
usbatm->driver_data = sc;
sc->usbatm = usbatm;
sc->modem_index = (modem_index < NB_MODEM) ? modem_index++ : 0;
sc->driver_info = id->driver_info;
/* first try to use module parameter */
if (annex[sc->modem_index] == 1)
sc->annex = ANNEXA;
else if (annex[sc->modem_index] == 2)
sc->annex = ANNEXB;
/* try to autodetect annex */
else if (sc->driver_info & AUTO_ANNEX_A)
sc->annex = ANNEXA;
else if (sc->driver_info & AUTO_ANNEX_B)
sc->annex = ANNEXB;
else
sc->annex = (le16_to_cpu
(sc->usb_dev->descriptor.bcdDevice) & 0x80) ? ANNEXB : ANNEXA;
alt = altsetting[sc->modem_index];
/* ADI930 don't support iso */
if (UEA_CHIP_VERSION(id) != ADI930 && alt > 0) {
if (alt <= 8 &&
usb_set_interface(usb, UEA_DS_IFACE_NO, alt) == 0) {
uea_dbg(usb, "set alternate %u for 2 interface\n", alt);
uea_info(usb, "using iso mode\n");
usbatm->flags |= UDSL_USE_ISOC | UDSL_IGNORE_EILSEQ;
} else {
uea_err(usb, "setting alternate %u failed for "
"2 interface, using bulk mode\n", alt);
}
}
ret = sysfs_create_group(&intf->dev.kobj, &attr_grp);
if (ret < 0)
goto error;
ret = uea_boot(sc);
if (ret < 0)
goto error_rm_grp;
return 0;
error_rm_grp:
sysfs_remove_group(&intf->dev.kobj, &attr_grp);
error:
kfree(sc);
return ret;
}
static void uea_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct uea_softc *sc = usbatm->driver_data;
sysfs_remove_group(&intf->dev.kobj, &attr_grp);
uea_stop(sc);
kfree(sc);
}
static struct usbatm_driver uea_usbatm_driver = {
.driver_name = "ueagle-atm",
.bind = uea_bind,
.atm_start = uea_atm_open,
.unbind = uea_unbind,
.heavy_init = uea_heavy,
.bulk_in = UEA_BULK_DATA_PIPE,
.bulk_out = UEA_BULK_DATA_PIPE,
.isoc_in = UEA_ISO_DATA_PIPE,
};
static int uea_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *usb = interface_to_usbdev(intf);
int ret;
uea_enters(usb);
uea_info(usb, "ADSL device founded vid (%#X) pid (%#X) Rev (%#X): %s\n",
le16_to_cpu(usb->descriptor.idVendor),
le16_to_cpu(usb->descriptor.idProduct),
le16_to_cpu(usb->descriptor.bcdDevice),
chip_name[UEA_CHIP_VERSION(id)]);
usb_reset_device(usb);
if (UEA_IS_PREFIRM(id))
return uea_load_firmware(usb, UEA_CHIP_VERSION(id));
ret = usbatm_usb_probe(intf, id, &uea_usbatm_driver);
if (ret == 0) {
struct usbatm_data *usbatm = usb_get_intfdata(intf);
struct uea_softc *sc = usbatm->driver_data;
/* Ensure carrier is initialized to off as early as possible */
UPDATE_ATM_SIGNAL(ATM_PHY_SIG_LOST);
/* Only start the worker thread when all init is done */
wake_up_process(sc->kthread);
}
return ret;
}
static void uea_disconnect(struct usb_interface *intf)
{
struct usb_device *usb = interface_to_usbdev(intf);
int ifnum = intf->altsetting->desc.bInterfaceNumber;
uea_enters(usb);
/* ADI930 has 2 interfaces and eagle 3 interfaces.
* Pre-firmware device has one interface
*/
if (usb->config->desc.bNumInterfaces != 1 && ifnum == 0) {
mutex_lock(&uea_mutex);
usbatm_usb_disconnect(intf);
mutex_unlock(&uea_mutex);
uea_info(usb, "ADSL device removed\n");
}
uea_leaves(usb);
}
/*
* List of supported VID/PID
*/
static const struct usb_device_id uea_ids[] = {
{USB_DEVICE(ANALOG_VID, ADI930_PID_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ANALOG_VID, ADI930_PID_PSTFIRM),
.driver_info = ADI930 | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_I_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_II_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IIC_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PREFIRM),
.driver_info = EAGLE_III | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_III_PID_PSTFIRM),
.driver_info = EAGLE_III | PSTFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PREFIRM),
.driver_info = EAGLE_IV | PREFIRM},
{USB_DEVICE(ANALOG_VID, EAGLE_IV_PID_PSTFIRM),
.driver_info = EAGLE_IV | PSTFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_I_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_A_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PREFIRM),
.driver_info = EAGLE_II | PREFIRM},
{USB_DEVICE(DEVOLO_VID, DEVOLO_EAGLE_II_B_PID_PSTFIRM),
.driver_info = EAGLE_II | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(ELSA_VID, ELSA_PID_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_PSTFIRM),
.driver_info = ADI930 | PSTFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_A_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_A_PSTFIRM),
.driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(ELSA_VID, ELSA_PID_B_PREFIRM),
.driver_info = ADI930 | PREFIRM},
{USB_DEVICE(ELSA_VID, ELSA_PID_B_PSTFIRM),
.driver_info = ADI930 | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(USR_VID, MILLER_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, MILLER_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(USR_VID, MILLER_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, MILLER_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{USB_DEVICE(USR_VID, HEINEKEN_A_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, HEINEKEN_A_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_A},
{USB_DEVICE(USR_VID, HEINEKEN_B_PID_PREFIRM),
.driver_info = EAGLE_I | PREFIRM},
{USB_DEVICE(USR_VID, HEINEKEN_B_PID_PSTFIRM),
.driver_info = EAGLE_I | PSTFIRM | AUTO_ANNEX_B},
{}
};
/*
* USB driver descriptor
*/
static struct usb_driver uea_driver = {
.name = "ueagle-atm",
.id_table = uea_ids,
.probe = uea_probe,
.disconnect = uea_disconnect,
};
MODULE_DEVICE_TABLE(usb, uea_ids);
module_usb_driver(uea_driver);
MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka");
MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver");
MODULE_LICENSE("Dual BSD/GPL");
|
gpl-2.0
|
TheEdge-/Leaping_kernel
|
drivers/usb/atm/speedtch.c
|
4860
|
28906
|
/******************************************************************************
* speedtch.c - Alcatel SpeedTouch USB xDSL modem driver
*
* Copyright (C) 2001, Alcatel
* Copyright (C) 2003, Duncan Sands
* Copyright (C) 2004, David Woodhouse
*
* Based on "modem_run.c", copyright (C) 2001, Benoit Papillault
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <asm/page.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firmware.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/usb/ch9.h>
#include <linux/workqueue.h>
#include "usbatm.h"
#define DRIVER_AUTHOR "Johan Verrept, Duncan Sands <duncan.sands@free.fr>"
#define DRIVER_VERSION "1.10"
#define DRIVER_DESC "Alcatel SpeedTouch USB driver version " DRIVER_VERSION
static const char speedtch_driver_name[] = "speedtch";
#define CTRL_TIMEOUT 2000 /* milliseconds */
#define DATA_TIMEOUT 2000 /* milliseconds */
#define OFFSET_7 0 /* size 1 */
#define OFFSET_b 1 /* size 8 */
#define OFFSET_d 9 /* size 4 */
#define OFFSET_e 13 /* size 1 */
#define OFFSET_f 14 /* size 1 */
#define SIZE_7 1
#define SIZE_b 8
#define SIZE_d 4
#define SIZE_e 1
#define SIZE_f 1
#define MIN_POLL_DELAY 5000 /* milliseconds */
#define MAX_POLL_DELAY 60000 /* milliseconds */
#define RESUBMIT_DELAY 1000 /* milliseconds */
#define DEFAULT_BULK_ALTSETTING 1
#define DEFAULT_ISOC_ALTSETTING 3
#define DEFAULT_DL_512_FIRST 0
#define DEFAULT_ENABLE_ISOC 0
#define DEFAULT_SW_BUFFERING 0
static unsigned int altsetting = 0; /* zero means: use the default */
static bool dl_512_first = DEFAULT_DL_512_FIRST;
static bool enable_isoc = DEFAULT_ENABLE_ISOC;
static bool sw_buffering = DEFAULT_SW_BUFFERING;
#define DEFAULT_B_MAX_DSL 8128
#define DEFAULT_MODEM_MODE 11
#define MODEM_OPTION_LENGTH 16
static const unsigned char DEFAULT_MODEM_OPTION[MODEM_OPTION_LENGTH] = {
0x10, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static unsigned int BMaxDSL = DEFAULT_B_MAX_DSL;
static unsigned char ModemMode = DEFAULT_MODEM_MODE;
static unsigned char ModemOption[MODEM_OPTION_LENGTH];
static unsigned int num_ModemOption;
module_param(altsetting, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(altsetting,
"Alternative setting for data interface (bulk_default: "
__MODULE_STRING(DEFAULT_BULK_ALTSETTING) "; isoc_default: "
__MODULE_STRING(DEFAULT_ISOC_ALTSETTING) ")");
module_param(dl_512_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dl_512_first,
"Read 512 bytes before sending firmware (default: "
__MODULE_STRING(DEFAULT_DL_512_FIRST) ")");
module_param(enable_isoc, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(enable_isoc,
"Use isochronous transfers if available (default: "
__MODULE_STRING(DEFAULT_ENABLE_ISOC) ")");
module_param(sw_buffering, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(sw_buffering,
"Enable software buffering (default: "
__MODULE_STRING(DEFAULT_SW_BUFFERING) ")");
module_param(BMaxDSL, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(BMaxDSL,
"default: " __MODULE_STRING(DEFAULT_B_MAX_DSL));
module_param(ModemMode, byte, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ModemMode,
"default: " __MODULE_STRING(DEFAULT_MODEM_MODE));
module_param_array(ModemOption, byte, &num_ModemOption, S_IRUGO);
MODULE_PARM_DESC(ModemOption, "default: 0x10,0x00,0x00,0x00,0x20");
#define INTERFACE_DATA 1
#define ENDPOINT_INT 0x81
#define ENDPOINT_BULK_DATA 0x07
#define ENDPOINT_ISOC_DATA 0x07
#define ENDPOINT_FIRMWARE 0x05
struct speedtch_params {
unsigned int altsetting;
unsigned int BMaxDSL;
unsigned char ModemMode;
unsigned char ModemOption[MODEM_OPTION_LENGTH];
};
struct speedtch_instance_data {
struct usbatm_data *usbatm;
struct speedtch_params params; /* set in probe, constant afterwards */
struct timer_list status_check_timer;
struct work_struct status_check_work;
unsigned char last_status;
int poll_delay; /* milliseconds */
struct timer_list resubmit_timer;
struct urb *int_urb;
unsigned char int_data[16];
unsigned char scratch_buffer[16];
};
/***************
** firmware **
***************/
static void speedtch_set_swbuff(struct speedtch_instance_data *instance, int state)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
int ret;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x32, 0x40, state ? 0x01 : 0x00, 0x00, NULL, 0, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm,
"%sabling SW buffering: usb_control_msg returned %d\n",
state ? "En" : "Dis", ret);
else
dbg("speedtch_set_swbuff: %sbled SW buffering", state ? "En" : "Dis");
}
static void speedtch_test_sequence(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
/* URB 147 */
buf[0] = 0x1c;
buf[1] = 0x50;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x0b, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB147: %d\n", __func__, ret);
/* URB 148 */
buf[0] = 0x32;
buf[1] = 0x00;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x02, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB148: %d\n", __func__, ret);
/* URB 149 */
buf[0] = 0x01;
buf[1] = 0x00;
buf[2] = 0x01;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x03, 0x00, buf, 3, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB149: %d\n", __func__, ret);
/* URB 150 */
buf[0] = 0x01;
buf[1] = 0x00;
buf[2] = 0x01;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x04, 0x00, buf, 3, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URB150: %d\n", __func__, ret);
/* Extra initialisation in recent drivers - gives higher speeds */
/* URBext1 */
buf[0] = instance->params.ModemMode;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x11, 0x00, buf, 1, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext1: %d\n", __func__, ret);
/* URBext2 */
/* This seems to be the one which actually triggers the higher sync
rate -- it does require the new firmware too, although it works OK
with older firmware */
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x14, 0x00,
instance->params.ModemOption,
MODEM_OPTION_LENGTH, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext2: %d\n", __func__, ret);
/* URBext3 */
buf[0] = instance->params.BMaxDSL & 0xff;
buf[1] = instance->params.BMaxDSL >> 8;
ret = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
0x01, 0x40, 0x12, 0x00, buf, 2, CTRL_TIMEOUT);
if (ret < 0)
usb_warn(usbatm, "%s failed on URBext3: %d\n", __func__, ret);
}
static int speedtch_upload_firmware(struct speedtch_instance_data *instance,
const struct firmware *fw1,
const struct firmware *fw2)
{
unsigned char *buffer;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
int actual_length;
int ret = 0;
int offset;
usb_dbg(usbatm, "%s entered\n", __func__);
if (!(buffer = (unsigned char *)__get_free_page(GFP_KERNEL))) {
ret = -ENOMEM;
usb_dbg(usbatm, "%s: no memory for buffer!\n", __func__);
goto out;
}
if (!usb_ifnum_to_if(usb_dev, 2)) {
ret = -ENODEV;
usb_dbg(usbatm, "%s: interface not found!\n", __func__);
goto out_free;
}
/* URB 7 */
if (dl_512_first) { /* some modems need a read before writing the firmware */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, 2000);
if (ret < 0 && ret != -ETIMEDOUT)
usb_warn(usbatm, "%s: read BLOCK0 from modem failed (%d)!\n", __func__, ret);
else
usb_dbg(usbatm, "%s: BLOCK0 downloaded (%d bytes)\n", __func__, ret);
}
/* URB 8 : both leds are static green */
for (offset = 0; offset < fw1->size; offset += PAGE_SIZE) {
int thislen = min_t(int, PAGE_SIZE, fw1->size - offset);
memcpy(buffer, fw1->data + offset, thislen);
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, thislen, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: write BLOCK1 to modem failed (%d)!\n", __func__, ret);
goto out_free;
}
usb_dbg(usbatm, "%s: BLOCK1 uploaded (%zu bytes)\n", __func__, fw1->size);
}
/* USB led blinking green, ADSL led off */
/* URB 11 */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: read BLOCK2 from modem failed (%d)!\n", __func__, ret);
goto out_free;
}
usb_dbg(usbatm, "%s: BLOCK2 downloaded (%d bytes)\n", __func__, actual_length);
/* URBs 12 to 139 - USB led blinking green, ADSL led off */
for (offset = 0; offset < fw2->size; offset += PAGE_SIZE) {
int thislen = min_t(int, PAGE_SIZE, fw2->size - offset);
memcpy(buffer, fw2->data + offset, thislen);
ret = usb_bulk_msg(usb_dev, usb_sndbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, thislen, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: write BLOCK3 to modem failed (%d)!\n", __func__, ret);
goto out_free;
}
}
usb_dbg(usbatm, "%s: BLOCK3 uploaded (%zu bytes)\n", __func__, fw2->size);
/* USB led static green, ADSL led static red */
/* URB 142 */
ret = usb_bulk_msg(usb_dev, usb_rcvbulkpipe(usb_dev, ENDPOINT_FIRMWARE),
buffer, 0x200, &actual_length, DATA_TIMEOUT);
if (ret < 0) {
usb_err(usbatm, "%s: read BLOCK4 from modem failed (%d)!\n", __func__, ret);
goto out_free;
}
/* success */
usb_dbg(usbatm, "%s: BLOCK4 downloaded (%d bytes)\n", __func__, actual_length);
/* Delay to allow firmware to start up. We can do this here
because we're in our own kernel thread anyway. */
msleep_interruptible(1000);
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
usb_err(usbatm, "%s: setting interface to %d failed (%d)!\n", __func__, instance->params.altsetting, ret);
goto out_free;
}
/* Enable software buffering, if requested */
if (sw_buffering)
speedtch_set_swbuff(instance, 1);
/* Magic spell; don't ask us what this does */
speedtch_test_sequence(instance);
ret = 0;
out_free:
free_page((unsigned long)buffer);
out:
return ret;
}
static int speedtch_find_firmware(struct usbatm_data *usbatm, struct usb_interface *intf,
int phase, const struct firmware **fw_p)
{
struct device *dev = &intf->dev;
const u16 bcdDevice = le16_to_cpu(interface_to_usbdev(intf)->descriptor.bcdDevice);
const u8 major_revision = bcdDevice >> 8;
const u8 minor_revision = bcdDevice & 0xff;
char buf[24];
sprintf(buf, "speedtch-%d.bin.%x.%02x", phase, major_revision, minor_revision);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
sprintf(buf, "speedtch-%d.bin.%x", phase, major_revision);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
sprintf(buf, "speedtch-%d.bin", phase);
usb_dbg(usbatm, "%s: looking for %s\n", __func__, buf);
if (request_firmware(fw_p, buf, dev)) {
usb_err(usbatm, "%s: no stage %d firmware found!\n", __func__, phase);
return -ENOENT;
}
}
}
usb_info(usbatm, "found stage %d firmware %s\n", phase, buf);
return 0;
}
static int speedtch_heavy_init(struct usbatm_data *usbatm, struct usb_interface *intf)
{
const struct firmware *fw1, *fw2;
struct speedtch_instance_data *instance = usbatm->driver_data;
int ret;
if ((ret = speedtch_find_firmware(usbatm, intf, 1, &fw1)) < 0)
return ret;
if ((ret = speedtch_find_firmware(usbatm, intf, 2, &fw2)) < 0) {
release_firmware(fw1);
return ret;
}
if ((ret = speedtch_upload_firmware(instance, fw1, fw2)) < 0)
usb_err(usbatm, "%s: firmware upload failed (%d)!\n", __func__, ret);
release_firmware(fw2);
release_firmware(fw1);
return ret;
}
/**********
** ATM **
**********/
static int speedtch_read_status(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
memset(buf, 0, 16);
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x07, 0x00, buf + OFFSET_7, SIZE_7,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG 7 failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x0b, 0x00, buf + OFFSET_b, SIZE_b,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG B failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x0d, 0x00, buf + OFFSET_d, SIZE_d,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG D failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x01, 0xc0, 0x0e, 0x00, buf + OFFSET_e, SIZE_e,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG E failed\n", __func__);
return ret;
}
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x01, 0xc0, 0x0f, 0x00, buf + OFFSET_f, SIZE_f,
CTRL_TIMEOUT);
if (ret < 0) {
atm_dbg(usbatm, "%s: MSG F failed\n", __func__);
return ret;
}
return 0;
}
static int speedtch_start_synchro(struct speedtch_instance_data *instance)
{
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
unsigned char *buf = instance->scratch_buffer;
int ret;
atm_dbg(usbatm, "%s entered\n", __func__);
memset(buf, 0, 2);
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x04, 0x00,
buf, 2, CTRL_TIMEOUT);
if (ret < 0)
atm_warn(usbatm, "failed to start ADSL synchronisation: %d\n", ret);
else
atm_dbg(usbatm, "%s: modem prodded. %d bytes returned: %02x %02x\n",
__func__, ret, buf[0], buf[1]);
return ret;
}
static void speedtch_check_status(struct work_struct *work)
{
struct speedtch_instance_data *instance =
container_of(work, struct speedtch_instance_data,
status_check_work);
struct usbatm_data *usbatm = instance->usbatm;
struct atm_dev *atm_dev = usbatm->atm_dev;
unsigned char *buf = instance->scratch_buffer;
int down_speed, up_speed, ret;
unsigned char status;
#ifdef VERBOSE_DEBUG
atm_dbg(usbatm, "%s entered\n", __func__);
#endif
ret = speedtch_read_status(instance);
if (ret < 0) {
atm_warn(usbatm, "error %d fetching device status\n", ret);
instance->poll_delay = min(2 * instance->poll_delay, MAX_POLL_DELAY);
return;
}
instance->poll_delay = max(instance->poll_delay / 2, MIN_POLL_DELAY);
status = buf[OFFSET_7];
if ((status != instance->last_status) || !status) {
atm_dbg(usbatm, "%s: line state 0x%02x\n", __func__, status);
switch (status) {
case 0:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
if (instance->last_status)
atm_info(usbatm, "ADSL line is down\n");
/* It may never resync again unless we ask it to... */
ret = speedtch_start_synchro(instance);
break;
case 0x08:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "ADSL line is blocked?\n");
break;
case 0x10:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_LOST);
atm_info(usbatm, "ADSL line is synchronising\n");
break;
case 0x20:
down_speed = buf[OFFSET_b] | (buf[OFFSET_b + 1] << 8)
| (buf[OFFSET_b + 2] << 16) | (buf[OFFSET_b + 3] << 24);
up_speed = buf[OFFSET_b + 4] | (buf[OFFSET_b + 5] << 8)
| (buf[OFFSET_b + 6] << 16) | (buf[OFFSET_b + 7] << 24);
if (!(down_speed & 0x0000ffff) && !(up_speed & 0x0000ffff)) {
down_speed >>= 16;
up_speed >>= 16;
}
atm_dev->link_rate = down_speed * 1000 / 424;
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_FOUND);
atm_info(usbatm,
"ADSL line is up (%d kb/s down | %d kb/s up)\n",
down_speed, up_speed);
break;
default:
atm_dev_signal_change(atm_dev, ATM_PHY_SIG_UNKNOWN);
atm_info(usbatm, "unknown line state %02x\n", status);
break;
}
instance->last_status = status;
}
}
static void speedtch_status_poll(unsigned long data)
{
struct speedtch_instance_data *instance = (void *)data;
schedule_work(&instance->status_check_work);
/* The following check is racy, but the race is harmless */
if (instance->poll_delay < MAX_POLL_DELAY)
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(instance->poll_delay));
else
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
static void speedtch_resubmit_int(unsigned long data)
{
struct speedtch_instance_data *instance = (void *)data;
struct urb *int_urb = instance->int_urb;
int ret;
atm_dbg(instance->usbatm, "%s entered\n", __func__);
if (int_urb) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
if (!ret)
schedule_work(&instance->status_check_work);
else {
atm_dbg(instance->usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
}
}
}
static void speedtch_handle_int(struct urb *int_urb)
{
struct speedtch_instance_data *instance = int_urb->context;
struct usbatm_data *usbatm = instance->usbatm;
unsigned int count = int_urb->actual_length;
int status = int_urb->status;
int ret;
/* The magic interrupt for "up state" */
static const unsigned char up_int[6] = { 0xa1, 0x00, 0x01, 0x00, 0x00, 0x00 };
/* The magic interrupt for "down state" */
static const unsigned char down_int[6] = { 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00 };
atm_dbg(usbatm, "%s entered\n", __func__);
if (status < 0) {
atm_dbg(usbatm, "%s: nonzero urb status %d!\n", __func__, status);
goto fail;
}
if ((count == 6) && !memcmp(up_int, instance->int_data, 6)) {
del_timer(&instance->status_check_timer);
atm_info(usbatm, "DSL line goes up\n");
} else if ((count == 6) && !memcmp(down_int, instance->int_data, 6)) {
atm_info(usbatm, "DSL line goes down\n");
} else {
int i;
atm_dbg(usbatm, "%s: unknown interrupt packet of length %d:", __func__, count);
for (i = 0; i < count; i++)
printk(" %02x", instance->int_data[i]);
printk("\n");
goto fail;
}
if ((int_urb = instance->int_urb)) {
ret = usb_submit_urb(int_urb, GFP_ATOMIC);
schedule_work(&instance->status_check_work);
if (ret < 0) {
atm_dbg(usbatm, "%s: usb_submit_urb failed with result %d\n", __func__, ret);
goto fail;
}
}
return;
fail:
if ((int_urb = instance->int_urb))
mod_timer(&instance->resubmit_timer, jiffies + msecs_to_jiffies(RESUBMIT_DELAY));
}
static int speedtch_atm_start(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct usb_device *usb_dev = usbatm->usb_dev;
struct speedtch_instance_data *instance = usbatm->driver_data;
int i, ret;
unsigned char mac_str[13];
atm_dbg(usbatm, "%s entered\n", __func__);
/* Set MAC address, it is stored in the serial number */
memset(atm_dev->esi, 0, sizeof(atm_dev->esi));
if (usb_string(usb_dev, usb_dev->descriptor.iSerialNumber, mac_str, sizeof(mac_str)) == 12) {
for (i = 0; i < 6; i++)
atm_dev->esi[i] = (hex_to_bin(mac_str[i * 2]) << 4) +
hex_to_bin(mac_str[i * 2 + 1]);
}
/* Start modem synchronisation */
ret = speedtch_start_synchro(instance);
/* Set up interrupt endpoint */
if (instance->int_urb) {
ret = usb_submit_urb(instance->int_urb, GFP_KERNEL);
if (ret < 0) {
/* Doesn't matter; we'll poll anyway */
atm_dbg(usbatm, "%s: submission of interrupt URB failed (%d)!\n", __func__, ret);
usb_free_urb(instance->int_urb);
instance->int_urb = NULL;
}
}
/* Start status polling */
mod_timer(&instance->status_check_timer, jiffies + msecs_to_jiffies(1000));
return 0;
}
static void speedtch_atm_stop(struct usbatm_data *usbatm, struct atm_dev *atm_dev)
{
struct speedtch_instance_data *instance = usbatm->driver_data;
struct urb *int_urb = instance->int_urb;
atm_dbg(usbatm, "%s entered\n", __func__);
del_timer_sync(&instance->status_check_timer);
/*
* Since resubmit_timer and int_urb can schedule themselves and
* each other, shutting them down correctly takes some care
*/
instance->int_urb = NULL; /* signal shutdown */
mb();
usb_kill_urb(int_urb);
del_timer_sync(&instance->resubmit_timer);
/*
* At this point, speedtch_handle_int and speedtch_resubmit_int
* can run or be running, but instance->int_urb == NULL means that
* they will not reschedule
*/
usb_kill_urb(int_urb);
del_timer_sync(&instance->resubmit_timer);
usb_free_urb(int_urb);
flush_work_sync(&instance->status_check_work);
}
static int speedtch_pre_reset(struct usb_interface *intf)
{
return 0;
}
static int speedtch_post_reset(struct usb_interface *intf)
{
return 0;
}
/**********
** USB **
**********/
static struct usb_device_id speedtch_usb_ids[] = {
{USB_DEVICE(0x06b9, 0x4061)},
{}
};
MODULE_DEVICE_TABLE(usb, speedtch_usb_ids);
static int speedtch_usb_probe(struct usb_interface *, const struct usb_device_id *);
static struct usb_driver speedtch_usb_driver = {
.name = speedtch_driver_name,
.probe = speedtch_usb_probe,
.disconnect = usbatm_usb_disconnect,
.pre_reset = speedtch_pre_reset,
.post_reset = speedtch_post_reset,
.id_table = speedtch_usb_ids
};
static void speedtch_release_interfaces(struct usb_device *usb_dev,
int num_interfaces)
{
struct usb_interface *cur_intf;
int i;
for (i = 0; i < num_interfaces; i++)
if ((cur_intf = usb_ifnum_to_if(usb_dev, i))) {
usb_set_intfdata(cur_intf, NULL);
usb_driver_release_interface(&speedtch_usb_driver, cur_intf);
}
}
static int speedtch_bind(struct usbatm_data *usbatm,
struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_interface *cur_intf, *data_intf;
struct speedtch_instance_data *instance;
int ifnum = intf->altsetting->desc.bInterfaceNumber;
int num_interfaces = usb_dev->actconfig->desc.bNumInterfaces;
int i, ret;
int use_isoc;
usb_dbg(usbatm, "%s entered\n", __func__);
/* sanity checks */
if (usb_dev->descriptor.bDeviceClass != USB_CLASS_VENDOR_SPEC) {
usb_err(usbatm, "%s: wrong device class %d\n", __func__, usb_dev->descriptor.bDeviceClass);
return -ENODEV;
}
if (!(data_intf = usb_ifnum_to_if(usb_dev, INTERFACE_DATA))) {
usb_err(usbatm, "%s: data interface not found!\n", __func__);
return -ENODEV;
}
/* claim all interfaces */
for (i = 0; i < num_interfaces; i++) {
cur_intf = usb_ifnum_to_if(usb_dev, i);
if ((i != ifnum) && cur_intf) {
ret = usb_driver_claim_interface(&speedtch_usb_driver, cur_intf, usbatm);
if (ret < 0) {
usb_err(usbatm, "%s: failed to claim interface %2d (%d)!\n", __func__, i, ret);
speedtch_release_interfaces(usb_dev, i);
return ret;
}
}
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
usb_err(usbatm, "%s: no memory for instance data!\n", __func__);
ret = -ENOMEM;
goto fail_release;
}
instance->usbatm = usbatm;
/* module parameters may change at any moment, so take a snapshot */
instance->params.altsetting = altsetting;
instance->params.BMaxDSL = BMaxDSL;
instance->params.ModemMode = ModemMode;
memcpy(instance->params.ModemOption, DEFAULT_MODEM_OPTION, MODEM_OPTION_LENGTH);
memcpy(instance->params.ModemOption, ModemOption, num_ModemOption);
use_isoc = enable_isoc;
if (instance->params.altsetting)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, instance->params.altsetting)) < 0) {
usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, instance->params.altsetting, ret);
instance->params.altsetting = 0; /* fall back to default */
}
if (!instance->params.altsetting && use_isoc)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_ISOC_ALTSETTING)) < 0) {
usb_dbg(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_ISOC_ALTSETTING, ret);
use_isoc = 0; /* fall back to bulk */
}
if (use_isoc) {
const struct usb_host_interface *desc = data_intf->cur_altsetting;
const __u8 target_address = USB_DIR_IN | usbatm->driver->isoc_in;
use_isoc = 0; /* fall back to bulk if endpoint not found */
for (i = 0; i < desc->desc.bNumEndpoints; i++) {
const struct usb_endpoint_descriptor *endpoint_desc = &desc->endpoint[i].desc;
if ((endpoint_desc->bEndpointAddress == target_address)) {
use_isoc =
usb_endpoint_xfer_isoc(endpoint_desc);
break;
}
}
if (!use_isoc)
usb_info(usbatm, "isochronous transfer not supported - using bulk\n");
}
if (!use_isoc && !instance->params.altsetting)
if ((ret = usb_set_interface(usb_dev, INTERFACE_DATA, DEFAULT_BULK_ALTSETTING)) < 0) {
usb_err(usbatm, "%s: setting interface to %2d failed (%d)!\n", __func__, DEFAULT_BULK_ALTSETTING, ret);
goto fail_free;
}
if (!instance->params.altsetting)
instance->params.altsetting = use_isoc ? DEFAULT_ISOC_ALTSETTING : DEFAULT_BULK_ALTSETTING;
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
init_timer(&instance->status_check_timer);
instance->status_check_timer.function = speedtch_status_poll;
instance->status_check_timer.data = (unsigned long)instance;
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
init_timer(&instance->resubmit_timer);
instance->resubmit_timer.function = speedtch_resubmit_int;
instance->resubmit_timer.data = (unsigned long)instance;
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
if (instance->int_urb)
usb_fill_int_urb(instance->int_urb, usb_dev,
usb_rcvintpipe(usb_dev, ENDPOINT_INT),
instance->int_data, sizeof(instance->int_data),
speedtch_handle_int, instance, 50);
else
usb_dbg(usbatm, "%s: no memory for interrupt urb!\n", __func__);
/* check whether the modem already seems to be alive */
ret = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
0x12, 0xc0, 0x07, 0x00,
instance->scratch_buffer + OFFSET_7, SIZE_7, 500);
usbatm->flags |= (ret == SIZE_7 ? UDSL_SKIP_HEAVY_INIT : 0);
usb_dbg(usbatm, "%s: firmware %s loaded\n", __func__, usbatm->flags & UDSL_SKIP_HEAVY_INIT ? "already" : "not");
if (!(usbatm->flags & UDSL_SKIP_HEAVY_INIT))
if ((ret = usb_reset_device(usb_dev)) < 0) {
usb_err(usbatm, "%s: device reset failed (%d)!\n", __func__, ret);
goto fail_free;
}
usbatm->driver_data = instance;
return 0;
fail_free:
usb_free_urb(instance->int_urb);
kfree(instance);
fail_release:
speedtch_release_interfaces(usb_dev, num_interfaces);
return ret;
}
static void speedtch_unbind(struct usbatm_data *usbatm, struct usb_interface *intf)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct speedtch_instance_data *instance = usbatm->driver_data;
usb_dbg(usbatm, "%s entered\n", __func__);
speedtch_release_interfaces(usb_dev, usb_dev->actconfig->desc.bNumInterfaces);
usb_free_urb(instance->int_urb);
kfree(instance);
}
/***********
** init **
***********/
static struct usbatm_driver speedtch_usbatm_driver = {
.driver_name = speedtch_driver_name,
.bind = speedtch_bind,
.heavy_init = speedtch_heavy_init,
.unbind = speedtch_unbind,
.atm_start = speedtch_atm_start,
.atm_stop = speedtch_atm_stop,
.bulk_in = ENDPOINT_BULK_DATA,
.bulk_out = ENDPOINT_BULK_DATA,
.isoc_in = ENDPOINT_ISOC_DATA
};
static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver);
}
module_usb_driver(speedtch_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
|
gpl-2.0
|
AOKP/kernel_samsung_jf
|
drivers/usb/renesas_usbhs/common.c
|
4860
|
14832
|
/*
* Renesas USB driver
*
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "./common.h"
/*
* image of renesas_usbhs
*
* ex) gadget case
* mod.c
* mod_gadget.c
* mod_host.c pipe.c fifo.c
*
* +-------+ +-----------+
* | pipe0 |------>| fifo pio |
* +------------+ +-------+ +-----------+
* | mod_gadget |=====> | pipe1 |--+
* +------------+ +-------+ | +-----------+
* | pipe2 | | +-| fifo dma0 |
* +------------+ +-------+ | | +-----------+
* | mod_host | | pipe3 |<-|--+
* +------------+ +-------+ | +-----------+
* | .... | +--->| fifo dma1 |
* | .... | +-----------+
*/
#define USBHSF_RUNTIME_PWCTRL (1 << 0)
/* status */
#define usbhsc_flags_init(p) do {(p)->flags = 0; } while (0)
#define usbhsc_flags_set(p, b) ((p)->flags |= (b))
#define usbhsc_flags_clr(p, b) ((p)->flags &= ~(b))
#define usbhsc_flags_has(p, b) ((p)->flags & (b))
/*
* platform call back
*
* renesas usb support platform callback function.
* Below macro call it.
* if platform doesn't have callback, it return 0 (no error)
*/
#define usbhs_platform_call(priv, func, args...)\
(!(priv) ? -ENODEV : \
!((priv)->pfunc.func) ? 0 : \
(priv)->pfunc.func(args))
/*
* common functions
*/
u16 usbhs_read(struct usbhs_priv *priv, u32 reg)
{
return ioread16(priv->base + reg);
}
void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data)
{
iowrite16(data, priv->base + reg);
}
void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data)
{
u16 val = usbhs_read(priv, reg);
val &= ~mask;
val |= data & mask;
usbhs_write(priv, reg, val);
}
struct usbhs_priv *usbhs_pdev_to_priv(struct platform_device *pdev)
{
return dev_get_drvdata(&pdev->dev);
}
/*
* syscfg functions
*/
static void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
{
usbhs_bset(priv, SYSCFG, SCKE, enable ? SCKE : 0);
}
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
{
u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
u16 val = DCFM | DRPD | HSE | USBE;
int has_otg = usbhs_get_dparam(priv, has_otg);
if (has_otg)
usbhs_bset(priv, DVSTCTR, (EXTLP | PWEN), (EXTLP | PWEN));
/*
* if enable
*
* - select Host mode
* - D+ Line/D- Line Pull-down
*/
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
{
u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
u16 val = DPRPU | HSE | USBE;
/*
* if enable
*
* - select Function mode
* - D+ Line Pull-up
*/
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode)
{
usbhs_write(priv, TESTMODE, mode);
}
/*
* frame functions
*/
int usbhs_frame_get_num(struct usbhs_priv *priv)
{
return usbhs_read(priv, FRMNUM) & FRNM_MASK;
}
/*
* usb request functions
*/
void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
{
u16 val;
val = usbhs_read(priv, USBREQ);
req->bRequest = (val >> 8) & 0xFF;
req->bRequestType = (val >> 0) & 0xFF;
req->wValue = usbhs_read(priv, USBVAL);
req->wIndex = usbhs_read(priv, USBINDX);
req->wLength = usbhs_read(priv, USBLENG);
}
void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
{
usbhs_write(priv, USBREQ, (req->bRequest << 8) | req->bRequestType);
usbhs_write(priv, USBVAL, req->wValue);
usbhs_write(priv, USBINDX, req->wIndex);
usbhs_write(priv, USBLENG, req->wLength);
usbhs_bset(priv, DCPCTR, SUREQ, SUREQ);
}
/*
* bus/vbus functions
*/
void usbhs_bus_send_sof_enable(struct usbhs_priv *priv)
{
u16 status = usbhs_read(priv, DVSTCTR) & (USBRST | UACT);
if (status != USBRST) {
struct device *dev = usbhs_priv_to_dev(priv);
dev_err(dev, "usbhs should be reset\n");
}
usbhs_bset(priv, DVSTCTR, (USBRST | UACT), UACT);
}
void usbhs_bus_send_reset(struct usbhs_priv *priv)
{
usbhs_bset(priv, DVSTCTR, (USBRST | UACT), USBRST);
}
int usbhs_bus_get_speed(struct usbhs_priv *priv)
{
u16 dvstctr = usbhs_read(priv, DVSTCTR);
switch (RHST & dvstctr) {
case RHST_LOW_SPEED:
return USB_SPEED_LOW;
case RHST_FULL_SPEED:
return USB_SPEED_FULL;
case RHST_HIGH_SPEED:
return USB_SPEED_HIGH;
}
return USB_SPEED_UNKNOWN;
}
int usbhs_vbus_ctrl(struct usbhs_priv *priv, int enable)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
return usbhs_platform_call(priv, set_vbus, pdev, enable);
}
static void usbhsc_bus_init(struct usbhs_priv *priv)
{
usbhs_write(priv, DVSTCTR, 0);
usbhs_vbus_ctrl(priv, 0);
}
/*
* device configuration
*/
int usbhs_set_device_config(struct usbhs_priv *priv, int devnum,
u16 upphub, u16 hubport, u16 speed)
{
struct device *dev = usbhs_priv_to_dev(priv);
u16 usbspd = 0;
u32 reg = DEVADD0 + (2 * devnum);
if (devnum > 10) {
dev_err(dev, "cannot set speed to unknown device %d\n", devnum);
return -EIO;
}
if (upphub > 0xA) {
dev_err(dev, "unsupported hub number %d\n", upphub);
return -EIO;
}
switch (speed) {
case USB_SPEED_LOW:
usbspd = USBSPD_SPEED_LOW;
break;
case USB_SPEED_FULL:
usbspd = USBSPD_SPEED_FULL;
break;
case USB_SPEED_HIGH:
usbspd = USBSPD_SPEED_HIGH;
break;
default:
dev_err(dev, "unsupported speed %d\n", speed);
return -EIO;
}
usbhs_write(priv, reg, UPPHUB(upphub) |
HUBPORT(hubport)|
USBSPD(usbspd));
return 0;
}
/*
* local functions
*/
static void usbhsc_set_buswait(struct usbhs_priv *priv)
{
int wait = usbhs_get_dparam(priv, buswait_bwait);
/* set bus wait if platform have */
if (wait)
usbhs_bset(priv, BUSWAIT, 0x000F, wait);
}
/*
* platform default param
*/
static u32 usbhsc_default_pipe_type[] = {
USB_ENDPOINT_XFER_CONTROL,
USB_ENDPOINT_XFER_ISOC,
USB_ENDPOINT_XFER_ISOC,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_BULK,
USB_ENDPOINT_XFER_INT,
USB_ENDPOINT_XFER_INT,
USB_ENDPOINT_XFER_INT,
USB_ENDPOINT_XFER_INT,
};
/*
* power control
*/
static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct device *dev = usbhs_priv_to_dev(priv);
if (enable) {
/* enable PM */
pm_runtime_get_sync(dev);
/* enable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
/* USB on */
usbhs_sys_clock_ctrl(priv, enable);
} else {
/* USB off */
usbhs_sys_clock_ctrl(priv, enable);
/* disable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
/* disable PM */
pm_runtime_put_sync(dev);
}
}
/*
* hotplug
*/
static void usbhsc_hotplug(struct usbhs_priv *priv)
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
int id;
int enable;
int ret;
/*
* get vbus status from platform
*/
enable = usbhs_platform_call(priv, get_vbus, pdev);
/*
* get id from platform
*/
id = usbhs_platform_call(priv, get_id, pdev);
if (enable && !mod) {
ret = usbhs_mod_change(priv, id);
if (ret < 0)
return;
dev_dbg(&pdev->dev, "%s enable\n", __func__);
/* power on */
if (usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, enable);
/* bus init */
usbhsc_set_buswait(priv);
usbhsc_bus_init(priv);
/* module start */
usbhs_mod_call(priv, start, priv);
} else if (!enable && mod) {
dev_dbg(&pdev->dev, "%s disable\n", __func__);
/* module stop */
usbhs_mod_call(priv, stop, priv);
/* bus init */
usbhsc_bus_init(priv);
/* power off */
if (usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, enable);
usbhs_mod_change(priv, -1);
/* reset phy for next connection */
usbhs_platform_call(priv, phy_reset, pdev);
}
}
/*
* notify hotplug
*/
static void usbhsc_notify_hotplug(struct work_struct *work)
{
struct usbhs_priv *priv = container_of(work,
struct usbhs_priv,
notify_hotplug_work.work);
usbhsc_hotplug(priv);
}
static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int delay = usbhs_get_dparam(priv, detection_delay);
/*
* This functions will be called in interrupt.
* To make sure safety context,
* use workqueue for usbhs_notify_hotplug
*/
schedule_delayed_work(&priv->notify_hotplug_work,
msecs_to_jiffies(delay));
return 0;
}
/*
* platform functions
*/
static int usbhs_probe(struct platform_device *pdev)
{
struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
struct renesas_usbhs_driver_callback *dfunc;
struct usbhs_priv *priv;
struct resource *res, *irq_res;
int ret;
/* check platform information */
if (!info ||
!info->platform_callback.get_id) {
dev_err(&pdev->dev, "no platform information\n");
return -EINVAL;
}
/* platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res || !irq_res) {
dev_err(&pdev->dev, "Not enough Renesas USB platform resources.\n");
return -ENODEV;
}
/* usb private data */
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "Could not allocate priv\n");
return -ENOMEM;
}
priv->base = ioremap_nocache(res->start, resource_size(res));
if (!priv->base) {
dev_err(&pdev->dev, "ioremap error.\n");
ret = -ENOMEM;
goto probe_end_kfree;
}
/*
* care platform info
*/
memcpy(&priv->pfunc,
&info->platform_callback,
sizeof(struct renesas_usbhs_platform_callback));
memcpy(&priv->dparam,
&info->driver_param,
sizeof(struct renesas_usbhs_driver_param));
/* set driver callback functions for platform */
dfunc = &info->driver_callback;
dfunc->notify_hotplug = usbhsc_drvcllbck_notify_hotplug;
/* set default param if platform doesn't have */
if (!priv->dparam.pipe_type) {
priv->dparam.pipe_type = usbhsc_default_pipe_type;
priv->dparam.pipe_size = ARRAY_SIZE(usbhsc_default_pipe_type);
}
if (!priv->dparam.pio_dma_border)
priv->dparam.pio_dma_border = 64; /* 64byte */
/* FIXME */
/* runtime power control ? */
if (priv->pfunc.get_vbus)
usbhsc_flags_set(priv, USBHSF_RUNTIME_PWCTRL);
/*
* priv settings
*/
priv->irq = irq_res->start;
if (irq_res->flags & IORESOURCE_IRQ_SHAREABLE)
priv->irqflags = IRQF_SHARED;
priv->pdev = pdev;
INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug);
spin_lock_init(usbhs_priv_to_lock(priv));
/* call pipe and module init */
ret = usbhs_pipe_probe(priv);
if (ret < 0)
goto probe_end_iounmap;
ret = usbhs_fifo_probe(priv);
if (ret < 0)
goto probe_end_pipe_exit;
ret = usbhs_mod_probe(priv);
if (ret < 0)
goto probe_end_fifo_exit;
/* dev_set_drvdata should be called after usbhs_mod_init */
dev_set_drvdata(&pdev->dev, priv);
/*
* deviece reset here because
* USB device might be used in boot loader.
*/
usbhs_sys_clock_ctrl(priv, 0);
/*
* platform call
*
* USB phy setup might depend on CPU/Board.
* If platform has its callback functions,
* call it here.
*/
ret = usbhs_platform_call(priv, hardware_init, pdev);
if (ret < 0) {
dev_err(&pdev->dev, "platform prove failed.\n");
goto probe_end_mod_exit;
}
/* reset phy for connection */
usbhs_platform_call(priv, phy_reset, pdev);
/* power control */
pm_runtime_enable(&pdev->dev);
if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
usbhsc_power_ctrl(priv, 1);
usbhs_mod_autonomy_mode(priv);
}
/*
* manual call notify_hotplug for cold plug
*/
ret = usbhsc_drvcllbck_notify_hotplug(pdev);
if (ret < 0)
goto probe_end_call_remove;
dev_info(&pdev->dev, "probed\n");
return ret;
probe_end_call_remove:
usbhs_platform_call(priv, hardware_exit, pdev);
probe_end_mod_exit:
usbhs_mod_remove(priv);
probe_end_fifo_exit:
usbhs_fifo_remove(priv);
probe_end_pipe_exit:
usbhs_pipe_remove(priv);
probe_end_iounmap:
iounmap(priv->base);
probe_end_kfree:
kfree(priv);
dev_info(&pdev->dev, "probe failed\n");
return ret;
}
static int __devexit usbhs_remove(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
struct renesas_usbhs_driver_callback *dfunc = &info->driver_callback;
dev_dbg(&pdev->dev, "usb remove\n");
dfunc->notify_hotplug = NULL;
/* power off */
if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, 0);
pm_runtime_disable(&pdev->dev);
usbhs_platform_call(priv, hardware_exit, pdev);
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
iounmap(priv->base);
kfree(priv);
return 0;
}
static int usbhsc_suspend(struct device *dev)
{
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
if (mod) {
usbhs_mod_call(priv, stop, priv);
usbhs_mod_change(priv, -1);
}
if (mod || !usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, 0);
return 0;
}
static int usbhsc_resume(struct device *dev)
{
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
usbhs_platform_call(priv, phy_reset, pdev);
if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
usbhsc_power_ctrl(priv, 1);
usbhsc_hotplug(priv);
return 0;
}
static int usbhsc_runtime_nop(struct device *dev)
{
/* Runtime PM callback shared between ->runtime_suspend()
* and ->runtime_resume(). Simply returns success.
*
* This driver re-initializes all registers after
* pm_runtime_get_sync() anyway so there is no need
* to save and restore registers here.
*/
return 0;
}
static const struct dev_pm_ops usbhsc_pm_ops = {
.suspend = usbhsc_suspend,
.resume = usbhsc_resume,
.runtime_suspend = usbhsc_runtime_nop,
.runtime_resume = usbhsc_runtime_nop,
};
static struct platform_driver renesas_usbhs_driver = {
.driver = {
.name = "renesas_usbhs",
.pm = &usbhsc_pm_ops,
},
.probe = usbhs_probe,
.remove = __devexit_p(usbhs_remove),
};
module_platform_driver(renesas_usbhs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas USB driver");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
|
gpl-2.0
|
yuanguo8/nubiaz5s_kernel
|
drivers/gpio/gpio-vr41xx.c
|
5116
|
12821
|
/*
* Driver for NEC VR4100 series General-purpose I/O Unit.
*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/irq.h>
#include <asm/vr41xx/vr41xx.h>
MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
MODULE_LICENSE("GPL");
#define GIUIOSELL 0x00
#define GIUIOSELH 0x02
#define GIUPIODL 0x04
#define GIUPIODH 0x06
#define GIUINTSTATL 0x08
#define GIUINTSTATH 0x0a
#define GIUINTENL 0x0c
#define GIUINTENH 0x0e
#define GIUINTTYPL 0x10
#define GIUINTTYPH 0x12
#define GIUINTALSELL 0x14
#define GIUINTALSELH 0x16
#define GIUINTHTSELL 0x18
#define GIUINTHTSELH 0x1a
#define GIUPODATL 0x1c
#define GIUPODATEN 0x1c
#define GIUPODATH 0x1e
#define PIOEN0 0x0100
#define PIOEN1 0x0200
#define GIUPODAT 0x1e
#define GIUFEDGEINHL 0x20
#define GIUFEDGEINHH 0x22
#define GIUREDGEINHL 0x24
#define GIUREDGEINHH 0x26
#define GIUUSEUPDN 0x1e0
#define GIUTERMUPDN 0x1e2
#define GPIO_HAS_PULLUPDOWN_IO 0x0001
#define GPIO_HAS_OUTPUT_ENABLE 0x0002
#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
enum {
GPIO_INPUT,
GPIO_OUTPUT,
};
static DEFINE_SPINLOCK(giu_lock);
static unsigned long giu_flags;
static void __iomem *giu_base;
#define giu_read(offset) readw(giu_base + (offset))
#define giu_write(offset, value) writew((value), giu_base + (offset))
#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
#define GIUINT_HIGH_OFFSET 16
#define GIUINT_HIGH_MAX 32
static inline u16 giu_set(u16 offset, u16 set)
{
u16 data;
data = giu_read(offset);
data |= set;
giu_write(offset, data);
return data;
}
static inline u16 giu_clear(u16 offset, u16 clear)
{
u16 data;
data = giu_read(offset);
data &= ~clear;
giu_write(offset, data);
return data;
}
static void ack_giuint_low(struct irq_data *d)
{
giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
static void mask_giuint_low(struct irq_data *d)
{
giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
static void mask_ack_giuint_low(struct irq_data *d)
{
unsigned int pin;
pin = GPIO_PIN_OF_IRQ(d->irq);
giu_clear(GIUINTENL, 1 << pin);
giu_write(GIUINTSTATL, 1 << pin);
}
static void unmask_giuint_low(struct irq_data *d)
{
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(d->irq));
}
static struct irq_chip giuint_low_irq_chip = {
.name = "GIUINTL",
.irq_ack = ack_giuint_low,
.irq_mask = mask_giuint_low,
.irq_mask_ack = mask_ack_giuint_low,
.irq_unmask = unmask_giuint_low,
};
static void ack_giuint_high(struct irq_data *d)
{
giu_write(GIUINTSTATH,
1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
}
static void mask_giuint_high(struct irq_data *d)
{
giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
}
static void mask_ack_giuint_high(struct irq_data *d)
{
unsigned int pin;
pin = GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET;
giu_clear(GIUINTENH, 1 << pin);
giu_write(GIUINTSTATH, 1 << pin);
}
static void unmask_giuint_high(struct irq_data *d)
{
giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(d->irq) - GIUINT_HIGH_OFFSET));
}
static struct irq_chip giuint_high_irq_chip = {
.name = "GIUINTH",
.irq_ack = ack_giuint_high,
.irq_mask = mask_giuint_high,
.irq_mask_ack = mask_ack_giuint_high,
.irq_unmask = unmask_giuint_high,
};
static int giu_get_irq(unsigned int irq)
{
u16 pendl, pendh, maskl, maskh;
int i;
pendl = giu_read(GIUINTSTATL);
pendh = giu_read(GIUINTSTATH);
maskl = giu_read(GIUINTENL);
maskh = giu_read(GIUINTENH);
maskl &= pendl;
maskh &= pendh;
if (maskl) {
for (i = 0; i < 16; i++) {
if (maskl & (1 << i))
return GIU_IRQ(i);
}
} else if (maskh) {
for (i = 0; i < 16; i++) {
if (maskh & (1 << i))
return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
}
}
printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
maskl, pendl, maskh, pendh);
atomic_inc(&irq_err_count);
return -EINVAL;
}
void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
irq_signal_t signal)
{
u16 mask;
if (pin < GIUINT_HIGH_OFFSET) {
mask = 1 << pin;
if (trigger != IRQ_TRIGGER_LEVEL) {
giu_set(GIUINTTYPL, mask);
if (signal == IRQ_SIGNAL_HOLD)
giu_set(GIUINTHTSELL, mask);
else
giu_clear(GIUINTHTSELL, mask);
if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
switch (trigger) {
case IRQ_TRIGGER_EDGE_FALLING:
giu_set(GIUFEDGEINHL, mask);
giu_clear(GIUREDGEINHL, mask);
break;
case IRQ_TRIGGER_EDGE_RISING:
giu_clear(GIUFEDGEINHL, mask);
giu_set(GIUREDGEINHL, mask);
break;
default:
giu_set(GIUFEDGEINHL, mask);
giu_set(GIUREDGEINHL, mask);
break;
}
}
irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_low_irq_chip,
handle_edge_irq);
} else {
giu_clear(GIUINTTYPL, mask);
giu_clear(GIUINTHTSELL, mask);
irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_low_irq_chip,
handle_level_irq);
}
giu_write(GIUINTSTATL, mask);
} else if (pin < GIUINT_HIGH_MAX) {
mask = 1 << (pin - GIUINT_HIGH_OFFSET);
if (trigger != IRQ_TRIGGER_LEVEL) {
giu_set(GIUINTTYPH, mask);
if (signal == IRQ_SIGNAL_HOLD)
giu_set(GIUINTHTSELH, mask);
else
giu_clear(GIUINTHTSELH, mask);
if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
switch (trigger) {
case IRQ_TRIGGER_EDGE_FALLING:
giu_set(GIUFEDGEINHH, mask);
giu_clear(GIUREDGEINHH, mask);
break;
case IRQ_TRIGGER_EDGE_RISING:
giu_clear(GIUFEDGEINHH, mask);
giu_set(GIUREDGEINHH, mask);
break;
default:
giu_set(GIUFEDGEINHH, mask);
giu_set(GIUREDGEINHH, mask);
break;
}
}
irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_high_irq_chip,
handle_edge_irq);
} else {
giu_clear(GIUINTTYPH, mask);
giu_clear(GIUINTHTSELH, mask);
irq_set_chip_and_handler(GIU_IRQ(pin),
&giuint_high_irq_chip,
handle_level_irq);
}
giu_write(GIUINTSTATH, mask);
}
}
EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
{
u16 mask;
if (pin < GIUINT_HIGH_OFFSET) {
mask = 1 << pin;
if (level == IRQ_LEVEL_HIGH)
giu_set(GIUINTALSELL, mask);
else
giu_clear(GIUINTALSELL, mask);
giu_write(GIUINTSTATL, mask);
} else if (pin < GIUINT_HIGH_MAX) {
mask = 1 << (pin - GIUINT_HIGH_OFFSET);
if (level == IRQ_LEVEL_HIGH)
giu_set(GIUINTALSELH, mask);
else
giu_clear(GIUINTALSELH, mask);
giu_write(GIUINTSTATH, mask);
}
}
EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
{
u16 offset, mask, reg;
unsigned long flags;
if (pin >= chip->ngpio)
return -EINVAL;
if (pin < 16) {
offset = GIUIOSELL;
mask = 1 << pin;
} else if (pin < 32) {
offset = GIUIOSELH;
mask = 1 << (pin - 16);
} else {
if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
offset = GIUPODATEN;
mask = 1 << (pin - 32);
} else {
switch (pin) {
case 48:
offset = GIUPODATH;
mask = PIOEN0;
break;
case 49:
offset = GIUPODATH;
mask = PIOEN1;
break;
default:
return -EINVAL;
}
}
}
spin_lock_irqsave(&giu_lock, flags);
reg = giu_read(offset);
if (dir == GPIO_OUTPUT)
reg |= mask;
else
reg &= ~mask;
giu_write(offset, reg);
spin_unlock_irqrestore(&giu_lock, flags);
return 0;
}
int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
{
u16 reg, mask;
unsigned long flags;
if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
return -EPERM;
if (pin >= 15)
return -EINVAL;
mask = 1 << pin;
spin_lock_irqsave(&giu_lock, flags);
if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
reg = giu_read(GIUTERMUPDN);
if (pull == GPIO_PULL_UP)
reg |= mask;
else
reg &= ~mask;
giu_write(GIUTERMUPDN, reg);
reg = giu_read(GIUUSEUPDN);
reg |= mask;
giu_write(GIUUSEUPDN, reg);
} else {
reg = giu_read(GIUUSEUPDN);
reg &= ~mask;
giu_write(GIUUSEUPDN, reg);
}
spin_unlock_irqrestore(&giu_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
{
u16 reg, mask;
if (pin >= chip->ngpio)
return -EINVAL;
if (pin < 16) {
reg = giu_read(GIUPIODL);
mask = 1 << pin;
} else if (pin < 32) {
reg = giu_read(GIUPIODH);
mask = 1 << (pin - 16);
} else if (pin < 48) {
reg = giu_read(GIUPODATL);
mask = 1 << (pin - 32);
} else {
reg = giu_read(GIUPODATH);
mask = 1 << (pin - 48);
}
if (reg & mask)
return 1;
return 0;
}
static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
int value)
{
u16 offset, mask, reg;
unsigned long flags;
if (pin >= chip->ngpio)
return;
if (pin < 16) {
offset = GIUPIODL;
mask = 1 << pin;
} else if (pin < 32) {
offset = GIUPIODH;
mask = 1 << (pin - 16);
} else if (pin < 48) {
offset = GIUPODATL;
mask = 1 << (pin - 32);
} else {
offset = GIUPODATH;
mask = 1 << (pin - 48);
}
spin_lock_irqsave(&giu_lock, flags);
reg = giu_read(offset);
if (value)
reg |= mask;
else
reg &= ~mask;
giu_write(offset, reg);
spin_unlock_irqrestore(&giu_lock, flags);
}
static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
return giu_set_direction(chip, offset, GPIO_INPUT);
}
static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
vr41xx_gpio_set(chip, offset, value);
return giu_set_direction(chip, offset, GPIO_OUTPUT);
}
static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
if (offset >= chip->ngpio)
return -EINVAL;
return GIU_IRQ_BASE + offset;
}
static struct gpio_chip vr41xx_gpio_chip = {
.label = "vr41xx",
.owner = THIS_MODULE,
.direction_input = vr41xx_gpio_direction_input,
.get = vr41xx_gpio_get,
.direction_output = vr41xx_gpio_direction_output,
.set = vr41xx_gpio_set,
.to_irq = vr41xx_gpio_to_irq,
};
static int __devinit giu_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int trigger, i, pin;
struct irq_chip *chip;
int irq, retval;
switch (pdev->id) {
case GPIO_50PINS_PULLUPDOWN:
giu_flags = GPIO_HAS_PULLUPDOWN_IO;
vr41xx_gpio_chip.ngpio = 50;
break;
case GPIO_36PINS:
vr41xx_gpio_chip.ngpio = 36;
break;
case GPIO_48PINS_EDGE_SELECT:
giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
vr41xx_gpio_chip.ngpio = 48;
break;
default:
dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EBUSY;
giu_base = ioremap(res->start, resource_size(res));
if (!giu_base)
return -ENOMEM;
vr41xx_gpio_chip.dev = &pdev->dev;
retval = gpiochip_add(&vr41xx_gpio_chip);
giu_write(GIUINTENL, 0);
giu_write(GIUINTENH, 0);
trigger = giu_read(GIUINTTYPH) << 16;
trigger |= giu_read(GIUINTTYPL);
for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
pin = GPIO_PIN_OF_IRQ(i);
if (pin < GIUINT_HIGH_OFFSET)
chip = &giuint_low_irq_chip;
else
chip = &giuint_high_irq_chip;
if (trigger & (1 << pin))
irq_set_chip_and_handler(i, chip, handle_edge_irq);
else
irq_set_chip_and_handler(i, chip, handle_level_irq);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0 || irq >= nr_irqs)
return -EBUSY;
return cascade_irq(irq, giu_get_irq);
}
static int __devexit giu_remove(struct platform_device *pdev)
{
if (giu_base) {
iounmap(giu_base);
giu_base = NULL;
}
return 0;
}
static struct platform_driver giu_device_driver = {
.probe = giu_probe,
.remove = __devexit_p(giu_remove),
.driver = {
.name = "GIU",
.owner = THIS_MODULE,
},
};
module_platform_driver(giu_device_driver);
|
gpl-2.0
|
upndwn4par/graviton_s4_kernel
|
drivers/staging/vme/boards/vme_vmivme7805.c
|
5628
|
2963
|
/*
* Support for the VMIVME-7805 board access to the Universe II bridge.
*
* Author: Arthur Benilov <arthur.benilov@iba-group.com>
* Copyright 2010 Ion Beam Application, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/io.h>
#include "vme_vmivme7805.h"
static int __init vmic_init(void);
static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
static void vmic_remove(struct pci_dev *);
static void __exit vmic_exit(void);
/** Base address to access FPGA register */
static void *vmic_base;
static const char driver_name[] = "vmivme_7805";
static DEFINE_PCI_DEVICE_TABLE(vmic_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VMIC, PCI_DEVICE_ID_VTIMR) },
{ },
};
static struct pci_driver vmic_driver = {
.name = driver_name,
.id_table = vmic_ids,
.probe = vmic_probe,
.remove = vmic_remove,
};
static int __init vmic_init(void)
{
return pci_register_driver(&vmic_driver);
}
static int vmic_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int retval;
u32 data;
/* Enable the device */
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "Unable to enable device\n");
goto err;
}
/* Map Registers */
retval = pci_request_regions(pdev, driver_name);
if (retval) {
dev_err(&pdev->dev, "Unable to reserve resources\n");
goto err_resource;
}
/* Map registers in BAR 0 */
vmic_base = ioremap_nocache(pci_resource_start(pdev, 0), 16);
if (!vmic_base) {
dev_err(&pdev->dev, "Unable to remap CRG region\n");
retval = -EIO;
goto err_remap;
}
/* Clear the FPGA VME IF contents */
iowrite32(0, vmic_base + VME_CONTROL);
/* Clear any initial BERR */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data |= BM_VME_CONTROL_BERRST;
iowrite32(data, vmic_base + VME_CONTROL);
/* Enable the vme interface and byte swapping */
data = ioread32(vmic_base + VME_CONTROL) & 0x00000FFF;
data = data | BM_VME_CONTROL_MASTER_ENDIAN |
BM_VME_CONTROL_SLAVE_ENDIAN |
BM_VME_CONTROL_ABLE |
BM_VME_CONTROL_BERRI |
BM_VME_CONTROL_BPENA |
BM_VME_CONTROL_VBENA;
iowrite32(data, vmic_base + VME_CONTROL);
return 0;
err_remap:
pci_release_regions(pdev);
err_resource:
pci_disable_device(pdev);
err:
return retval;
}
static void vmic_remove(struct pci_dev *pdev)
{
iounmap(vmic_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static void __exit vmic_exit(void)
{
pci_unregister_driver(&vmic_driver);
}
MODULE_DESCRIPTION("VMIVME-7805 board support driver");
MODULE_AUTHOR("Arthur Benilov <arthur.benilov@iba-group.com>");
MODULE_LICENSE("GPL");
module_init(vmic_init);
module_exit(vmic_exit);
|
gpl-2.0
|
sinutech/sinuos-kernel
|
arch/mips/sgi-ip27/ip27-hubio.c
|
7676
|
4995
|
/*
* Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
* Copyright (C) 2004 Christoph Hellwig.
* Released under GPL v2.
*
* Support functions for the HUB ASIC - mostly PIO mapping related.
*/
#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/mmzone.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/hub.h>
static int force_fire_and_forget = 1;
/**
* hub_pio_map - establish a HUB PIO mapping
*
* @hub: hub to perform PIO mapping on
* @widget: widget ID to perform PIO mapping for
* @xtalk_addr: xtalk_address that needs to be mapped
* @size: size of the PIO mapping
*
**/
unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
unsigned long xtalk_addr, size_t size)
{
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
unsigned i;
/* use small-window mapping if possible */
if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
" too big (%ld)\n",
nasid, widget, xtalk_addr, size);
return 0;
}
xtalk_addr &= ~(BWIN_SIZE-1);
for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
if (test_and_set_bit(i, hub_data(cnode)->h_bigwin_used))
continue;
/*
* The code below does a PIO write to setup an ITTE entry.
*
* We need to prevent other CPUs from seeing our updated
* memory shadow of the ITTE (in the piomap) until the ITTE
* entry is actually set up; otherwise, another CPU might
* attempt a PIO prematurely.
*
* Also, the only way we can know that an entry has been
* received by the hub and can be used by future PIO reads/
* writes is by reading back the ITTE entry after writing it.
*
* For these two reasons, we PIO read back the ITTE entry
* after we write it.
*/
IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
(void) HUB_L(IIO_ITTE_GET(nasid, i));
return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
}
printk(KERN_WARNING "unable to establish PIO mapping for at"
" hub %d widget %d addr 0x%lx\n",
nasid, widget, xtalk_addr);
return 0;
}
/*
* hub_setup_prb(nasid, prbnum, credits, conveyor)
*
* Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
* put it into conveyor belt mode with the specified number of credits.
*/
static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
{
iprb_t prb;
int prb_offset;
/*
* Get the current register value.
*/
prb_offset = IIO_IOPRB(prbnum);
prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
/*
* Clear out some fields.
*/
prb.iprb_ovflow = 1;
prb.iprb_bnakctr = 0;
prb.iprb_anakctr = 0;
/*
* Enable or disable fire-and-forget mode.
*/
prb.iprb_ff = force_fire_and_forget ? 1 : 0;
/*
* Set the appropriate number of PIO cresits for the widget.
*/
prb.iprb_xtalkctr = credits;
/*
* Store the new value to the register.
*/
REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
}
/**
* hub_set_piomode - set pio mode for a given hub
*
* @nasid: physical node ID for the hub in question
*
* Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
* To do this, we have to make absolutely sure that no PIOs are in progress
* so we turn off access to all widgets for the duration of the function.
*
* XXX - This code should really check what kind of widget we're talking
* to. Bridges can only handle three requests, but XG will do more.
* How many can crossbow handle to widget 0? We're assuming 1.
*
* XXX - There is a bug in the crossbow that link reset PIOs do not
* return write responses. The easiest solution to this problem is to
* leave widget 0 (xbow) in fire-and-forget mode at all times. This
* only affects pio's to xbow registers, which should be rare.
**/
static void hub_set_piomode(nasid_t nasid)
{
hubreg_t ii_iowa;
hubii_wcr_t ii_wcr;
unsigned i;
ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
if (ii_wcr.iwcr_dir_con) {
/*
* Assume a bridge here.
*/
hub_setup_prb(nasid, 0, 3);
} else {
/*
* Assume a crossbow here.
*/
hub_setup_prb(nasid, 0, 1);
}
/*
* XXX - Here's where we should take the widget type into
* when account assigning credits.
*/
for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
hub_setup_prb(nasid, i, 3);
REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
}
/*
* hub_pio_init - PIO-related hub initialization
*
* @hub: hubinfo structure for our hub
*/
void hub_pio_init(cnodeid_t cnode)
{
nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
unsigned i;
/* initialize big window piomaps for this hub */
bitmap_zero(hub_data(cnode)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
IIO_ITTE_DISABLE(nasid, i);
hub_set_piomode(nasid);
}
|
gpl-2.0
|
cbolumar/android_kernel_samsung_a3ulte
|
fs/yaffs2/yaffs_tagscompat.c
|
7932
|
10448
|
/*
* YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
*
* Copyright (C) 2002-2010 Aleph One Ltd.
* for Toby Churchill Ltd and Brightstar Engineering
*
* Created by Charles Manning <charles@aleph1.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "yaffs_guts.h"
#include "yaffs_tagscompat.h"
#include "yaffs_ecc.h"
#include "yaffs_getblockinfo.h"
#include "yaffs_trace.h"
static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk);
/********** Tags ECC calculations *********/
void yaffs_calc_ecc(const u8 * data, struct yaffs_spare *spare)
{
yaffs_ecc_cacl(data, spare->ecc1);
yaffs_ecc_cacl(&data[256], spare->ecc2);
}
void yaffs_calc_tags_ecc(struct yaffs_tags *tags)
{
/* Calculate an ecc */
unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
unsigned i, j;
unsigned ecc = 0;
unsigned bit = 0;
tags->ecc = 0;
for (i = 0; i < 8; i++) {
for (j = 1; j & 0xff; j <<= 1) {
bit++;
if (b[i] & j)
ecc ^= bit;
}
}
tags->ecc = ecc;
}
int yaffs_check_tags_ecc(struct yaffs_tags *tags)
{
unsigned ecc = tags->ecc;
yaffs_calc_tags_ecc(tags);
ecc ^= tags->ecc;
if (ecc && ecc <= 64) {
/* TODO: Handle the failure better. Retire? */
unsigned char *b = ((union yaffs_tags_union *)tags)->as_bytes;
ecc--;
b[ecc / 8] ^= (1 << (ecc & 7));
/* Now recvalc the ecc */
yaffs_calc_tags_ecc(tags);
return 1; /* recovered error */
} else if (ecc) {
/* Wierd ecc failure value */
/* TODO Need to do somethiong here */
return -1; /* unrecovered error */
}
return 0;
}
/********** Tags **********/
static void yaffs_load_tags_to_spare(struct yaffs_spare *spare_ptr,
struct yaffs_tags *tags_ptr)
{
union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
yaffs_calc_tags_ecc(tags_ptr);
spare_ptr->tb0 = tu->as_bytes[0];
spare_ptr->tb1 = tu->as_bytes[1];
spare_ptr->tb2 = tu->as_bytes[2];
spare_ptr->tb3 = tu->as_bytes[3];
spare_ptr->tb4 = tu->as_bytes[4];
spare_ptr->tb5 = tu->as_bytes[5];
spare_ptr->tb6 = tu->as_bytes[6];
spare_ptr->tb7 = tu->as_bytes[7];
}
static void yaffs_get_tags_from_spare(struct yaffs_dev *dev,
struct yaffs_spare *spare_ptr,
struct yaffs_tags *tags_ptr)
{
union yaffs_tags_union *tu = (union yaffs_tags_union *)tags_ptr;
int result;
tu->as_bytes[0] = spare_ptr->tb0;
tu->as_bytes[1] = spare_ptr->tb1;
tu->as_bytes[2] = spare_ptr->tb2;
tu->as_bytes[3] = spare_ptr->tb3;
tu->as_bytes[4] = spare_ptr->tb4;
tu->as_bytes[5] = spare_ptr->tb5;
tu->as_bytes[6] = spare_ptr->tb6;
tu->as_bytes[7] = spare_ptr->tb7;
result = yaffs_check_tags_ecc(tags_ptr);
if (result > 0)
dev->n_tags_ecc_fixed++;
else if (result < 0)
dev->n_tags_ecc_unfixed++;
}
static void yaffs_spare_init(struct yaffs_spare *spare)
{
memset(spare, 0xFF, sizeof(struct yaffs_spare));
}
static int yaffs_wr_nand(struct yaffs_dev *dev,
int nand_chunk, const u8 * data,
struct yaffs_spare *spare)
{
if (nand_chunk < dev->param.start_block * dev->param.chunks_per_block) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>> yaffs chunk %d is not valid",
nand_chunk);
return YAFFS_FAIL;
}
return dev->param.write_chunk_fn(dev, nand_chunk, data, spare);
}
static int yaffs_rd_chunk_nand(struct yaffs_dev *dev,
int nand_chunk,
u8 * data,
struct yaffs_spare *spare,
enum yaffs_ecc_result *ecc_result,
int correct_errors)
{
int ret_val;
struct yaffs_spare local_spare;
if (!spare && data) {
/* If we don't have a real spare, then we use a local one. */
/* Need this for the calculation of the ecc */
spare = &local_spare;
}
if (!dev->param.use_nand_ecc) {
ret_val =
dev->param.read_chunk_fn(dev, nand_chunk, data, spare);
if (data && correct_errors) {
/* Do ECC correction */
/* Todo handle any errors */
int ecc_result1, ecc_result2;
u8 calc_ecc[3];
yaffs_ecc_cacl(data, calc_ecc);
ecc_result1 =
yaffs_ecc_correct(data, spare->ecc1, calc_ecc);
yaffs_ecc_cacl(&data[256], calc_ecc);
ecc_result2 =
yaffs_ecc_correct(&data[256], spare->ecc2,
calc_ecc);
if (ecc_result1 > 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>yaffs ecc error fix performed on chunk %d:0",
nand_chunk);
dev->n_ecc_fixed++;
} else if (ecc_result1 < 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>yaffs ecc error unfixed on chunk %d:0",
nand_chunk);
dev->n_ecc_unfixed++;
}
if (ecc_result2 > 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>yaffs ecc error fix performed on chunk %d:1",
nand_chunk);
dev->n_ecc_fixed++;
} else if (ecc_result2 < 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>yaffs ecc error unfixed on chunk %d:1",
nand_chunk);
dev->n_ecc_unfixed++;
}
if (ecc_result1 || ecc_result2) {
/* We had a data problem on this page */
yaffs_handle_rd_data_error(dev, nand_chunk);
}
if (ecc_result1 < 0 || ecc_result2 < 0)
*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
else if (ecc_result1 > 0 || ecc_result2 > 0)
*ecc_result = YAFFS_ECC_RESULT_FIXED;
else
*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
}
} else {
/* Must allocate enough memory for spare+2*sizeof(int) */
/* for ecc results from device. */
struct yaffs_nand_spare nspare;
memset(&nspare, 0, sizeof(nspare));
ret_val = dev->param.read_chunk_fn(dev, nand_chunk, data,
(struct yaffs_spare *)
&nspare);
memcpy(spare, &nspare, sizeof(struct yaffs_spare));
if (data && correct_errors) {
if (nspare.eccres1 > 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>mtd ecc error fix performed on chunk %d:0",
nand_chunk);
} else if (nspare.eccres1 < 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>mtd ecc error unfixed on chunk %d:0",
nand_chunk);
}
if (nspare.eccres2 > 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>mtd ecc error fix performed on chunk %d:1",
nand_chunk);
} else if (nspare.eccres2 < 0) {
yaffs_trace(YAFFS_TRACE_ERROR,
"**>>mtd ecc error unfixed on chunk %d:1",
nand_chunk);
}
if (nspare.eccres1 || nspare.eccres2) {
/* We had a data problem on this page */
yaffs_handle_rd_data_error(dev, nand_chunk);
}
if (nspare.eccres1 < 0 || nspare.eccres2 < 0)
*ecc_result = YAFFS_ECC_RESULT_UNFIXED;
else if (nspare.eccres1 > 0 || nspare.eccres2 > 0)
*ecc_result = YAFFS_ECC_RESULT_FIXED;
else
*ecc_result = YAFFS_ECC_RESULT_NO_ERROR;
}
}
return ret_val;
}
/*
* Functions for robustisizing
*/
static void yaffs_handle_rd_data_error(struct yaffs_dev *dev, int nand_chunk)
{
int flash_block = nand_chunk / dev->param.chunks_per_block;
/* Mark the block for retirement */
yaffs_get_block_info(dev,
flash_block + dev->block_offset)->needs_retiring =
1;
yaffs_trace(YAFFS_TRACE_ERROR | YAFFS_TRACE_BAD_BLOCKS,
"**>>Block %d marked for retirement",
flash_block);
/* TODO:
* Just do a garbage collection on the affected block
* then retire the block
* NB recursion
*/
}
int yaffs_tags_compat_wr(struct yaffs_dev *dev,
int nand_chunk,
const u8 * data, const struct yaffs_ext_tags *ext_tags)
{
struct yaffs_spare spare;
struct yaffs_tags tags;
yaffs_spare_init(&spare);
if (ext_tags->is_deleted)
spare.page_status = 0;
else {
tags.obj_id = ext_tags->obj_id;
tags.chunk_id = ext_tags->chunk_id;
tags.n_bytes_lsb = ext_tags->n_bytes & 0x3ff;
if (dev->data_bytes_per_chunk >= 1024)
tags.n_bytes_msb = (ext_tags->n_bytes >> 10) & 3;
else
tags.n_bytes_msb = 3;
tags.serial_number = ext_tags->serial_number;
if (!dev->param.use_nand_ecc && data)
yaffs_calc_ecc(data, &spare);
yaffs_load_tags_to_spare(&spare, &tags);
}
return yaffs_wr_nand(dev, nand_chunk, data, &spare);
}
int yaffs_tags_compat_rd(struct yaffs_dev *dev,
int nand_chunk,
u8 * data, struct yaffs_ext_tags *ext_tags)
{
struct yaffs_spare spare;
struct yaffs_tags tags;
enum yaffs_ecc_result ecc_result = YAFFS_ECC_RESULT_UNKNOWN;
static struct yaffs_spare spare_ff;
static int init;
if (!init) {
memset(&spare_ff, 0xFF, sizeof(spare_ff));
init = 1;
}
if (yaffs_rd_chunk_nand(dev, nand_chunk, data, &spare, &ecc_result, 1)) {
/* ext_tags may be NULL */
if (ext_tags) {
int deleted =
(hweight8(spare.page_status) < 7) ? 1 : 0;
ext_tags->is_deleted = deleted;
ext_tags->ecc_result = ecc_result;
ext_tags->block_bad = 0; /* We're reading it */
/* therefore it is not a bad block */
ext_tags->chunk_used =
(memcmp(&spare_ff, &spare, sizeof(spare_ff)) !=
0) ? 1 : 0;
if (ext_tags->chunk_used) {
yaffs_get_tags_from_spare(dev, &spare, &tags);
ext_tags->obj_id = tags.obj_id;
ext_tags->chunk_id = tags.chunk_id;
ext_tags->n_bytes = tags.n_bytes_lsb;
if (dev->data_bytes_per_chunk >= 1024)
ext_tags->n_bytes |=
(((unsigned)tags.
n_bytes_msb) << 10);
ext_tags->serial_number = tags.serial_number;
}
}
return YAFFS_OK;
} else {
return YAFFS_FAIL;
}
}
int yaffs_tags_compat_mark_bad(struct yaffs_dev *dev, int flash_block)
{
struct yaffs_spare spare;
memset(&spare, 0xff, sizeof(struct yaffs_spare));
spare.block_status = 'Y';
yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block, NULL,
&spare);
yaffs_wr_nand(dev, flash_block * dev->param.chunks_per_block + 1,
NULL, &spare);
return YAFFS_OK;
}
int yaffs_tags_compat_query_block(struct yaffs_dev *dev,
int block_no,
enum yaffs_block_state *state,
u32 * seq_number)
{
struct yaffs_spare spare0, spare1;
static struct yaffs_spare spare_ff;
static int init;
enum yaffs_ecc_result dummy;
if (!init) {
memset(&spare_ff, 0xFF, sizeof(spare_ff));
init = 1;
}
*seq_number = 0;
yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block, NULL,
&spare0, &dummy, 1);
yaffs_rd_chunk_nand(dev, block_no * dev->param.chunks_per_block + 1,
NULL, &spare1, &dummy, 1);
if (hweight8(spare0.block_status & spare1.block_status) < 7)
*state = YAFFS_BLOCK_STATE_DEAD;
else if (memcmp(&spare_ff, &spare0, sizeof(spare_ff)) == 0)
*state = YAFFS_BLOCK_STATE_EMPTY;
else
*state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
return YAFFS_OK;
}
|
gpl-2.0
|
TREX-ROM/android_kernel_lge_mako
|
net/netfilter/ipvs/ip_vs_proto_ah_esp.c
|
8188
|
4054
|
/*
* ip_vs_proto_ah_esp.c: AH/ESP IPSec load balancing support for IPVS
*
* Authors: Julian Anastasov <ja@ssi.bg>, February 2002
* Wensong Zhang <wensong@linuxvirtualserver.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation;
*
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
/* TODO:
struct isakmp_hdr {
__u8 icookie[8];
__u8 rcookie[8];
__u8 np;
__u8 version;
__u8 xchgtype;
__u8 flags;
__u32 msgid;
__u32 length;
};
*/
#define PORT_ISAKMP 500
static void
ah_esp_conn_fill_param_proto(struct net *net, int af,
const struct ip_vs_iphdr *iph, int inverse,
struct ip_vs_conn_param *p)
{
if (likely(!inverse))
ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->saddr, htons(PORT_ISAKMP),
&iph->daddr, htons(PORT_ISAKMP), p);
else
ip_vs_conn_fill_param(net, af, IPPROTO_UDP,
&iph->daddr, htons(PORT_ISAKMP),
&iph->saddr, htons(PORT_ISAKMP), p);
}
static struct ip_vs_conn *
ah_esp_conn_in_get(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph, unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
struct net *net = skb_net(skb);
ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_in_get(&p);
if (!cp) {
/*
* We are not sure if the packet is from our
* service, so our conn_schedule hook should return NF_ACCEPT
*/
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for outin packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
return cp;
}
static struct ip_vs_conn *
ah_esp_conn_out_get(int af, const struct sk_buff *skb,
const struct ip_vs_iphdr *iph,
unsigned int proto_off,
int inverse)
{
struct ip_vs_conn *cp;
struct ip_vs_conn_param p;
struct net *net = skb_net(skb);
ah_esp_conn_fill_param_proto(net, af, iph, inverse, &p);
cp = ip_vs_conn_out_get(&p);
if (!cp) {
IP_VS_DBG_BUF(12, "Unknown ISAKMP entry for inout packet "
"%s%s %s->%s\n",
inverse ? "ICMP+" : "",
ip_vs_proto_get(iph->protocol)->name,
IP_VS_DBG_ADDR(af, &iph->saddr),
IP_VS_DBG_ADDR(af, &iph->daddr));
}
return cp;
}
static int
ah_esp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
int *verdict, struct ip_vs_conn **cpp)
{
/*
* AH/ESP is only related traffic. Pass the packet to IP stack.
*/
*verdict = NF_ACCEPT;
return 0;
}
#ifdef CONFIG_IP_VS_PROTO_AH
struct ip_vs_protocol ip_vs_protocol_ah = {
.name = "AH",
.protocol = IPPROTO_AH,
.num_states = 1,
.dont_defrag = 1,
.init = NULL,
.exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
.csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
};
#endif
#ifdef CONFIG_IP_VS_PROTO_ESP
struct ip_vs_protocol ip_vs_protocol_esp = {
.name = "ESP",
.protocol = IPPROTO_ESP,
.num_states = 1,
.dont_defrag = 1,
.init = NULL,
.exit = NULL,
.conn_schedule = ah_esp_conn_schedule,
.conn_in_get = ah_esp_conn_in_get,
.conn_out_get = ah_esp_conn_out_get,
.snat_handler = NULL,
.dnat_handler = NULL,
.csum_check = NULL,
.state_transition = NULL,
.register_app = NULL,
.unregister_app = NULL,
.app_conn_bind = NULL,
.debug_packet = ip_vs_tcpudp_debug_packet,
.timeout_change = NULL, /* ISAKMP */
};
#endif
|
gpl-2.0
|
javilonas/Ptah-GT-I9300_OLD
|
fs/xfs/linux-2.6/xfs_sysctl.c
|
8188
|
6276
|
/*
* Copyright (c) 2001-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include "xfs_error.h"
static struct ctl_table_header *xfs_table_header;
#ifdef CONFIG_PROC_FS
STATIC int
xfs_stats_clear_proc_handler(
ctl_table *ctl,
int write,
void __user *buffer,
size_t *lenp,
loff_t *ppos)
{
int c, ret, *valp = ctl->data;
__uint32_t vn_active;
ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
if (!ret && write && *valp) {
xfs_notice(NULL, "Clearing xfsstats");
for_each_possible_cpu(c) {
preempt_disable();
/* save vn_active, it's a universal truth! */
vn_active = per_cpu(xfsstats, c).vn_active;
memset(&per_cpu(xfsstats, c), 0,
sizeof(struct xfsstats));
per_cpu(xfsstats, c).vn_active = vn_active;
preempt_enable();
}
xfs_stats_clear = 0;
}
return ret;
}
STATIC int
xfs_panic_mask_proc_handler(
ctl_table *ctl,
int write,
void __user *buffer,
size_t *lenp,
loff_t *ppos)
{
int ret, *valp = ctl->data;
ret = proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
if (!ret && write) {
xfs_panic_mask = *valp;
#ifdef DEBUG
xfs_panic_mask |= (XFS_PTAG_SHUTDOWN_CORRUPT | XFS_PTAG_LOGRES);
#endif
}
return ret;
}
#endif /* CONFIG_PROC_FS */
static ctl_table xfs_table[] = {
{
.procname = "irix_sgid_inherit",
.data = &xfs_params.sgid_inherit.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.sgid_inherit.min,
.extra2 = &xfs_params.sgid_inherit.max
},
{
.procname = "irix_symlink_mode",
.data = &xfs_params.symlink_mode.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.symlink_mode.min,
.extra2 = &xfs_params.symlink_mode.max
},
{
.procname = "panic_mask",
.data = &xfs_params.panic_mask.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = xfs_panic_mask_proc_handler,
.extra1 = &xfs_params.panic_mask.min,
.extra2 = &xfs_params.panic_mask.max
},
{
.procname = "error_level",
.data = &xfs_params.error_level.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.error_level.min,
.extra2 = &xfs_params.error_level.max
},
{
.procname = "xfssyncd_centisecs",
.data = &xfs_params.syncd_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.syncd_timer.min,
.extra2 = &xfs_params.syncd_timer.max
},
{
.procname = "inherit_sync",
.data = &xfs_params.inherit_sync.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_sync.min,
.extra2 = &xfs_params.inherit_sync.max
},
{
.procname = "inherit_nodump",
.data = &xfs_params.inherit_nodump.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nodump.min,
.extra2 = &xfs_params.inherit_nodump.max
},
{
.procname = "inherit_noatime",
.data = &xfs_params.inherit_noatim.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_noatim.min,
.extra2 = &xfs_params.inherit_noatim.max
},
{
.procname = "xfsbufd_centisecs",
.data = &xfs_params.xfs_buf_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.xfs_buf_timer.min,
.extra2 = &xfs_params.xfs_buf_timer.max
},
{
.procname = "age_buffer_centisecs",
.data = &xfs_params.xfs_buf_age.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.xfs_buf_age.min,
.extra2 = &xfs_params.xfs_buf_age.max
},
{
.procname = "inherit_nosymlinks",
.data = &xfs_params.inherit_nosym.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nosym.min,
.extra2 = &xfs_params.inherit_nosym.max
},
{
.procname = "rotorstep",
.data = &xfs_params.rotorstep.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.rotorstep.min,
.extra2 = &xfs_params.rotorstep.max
},
{
.procname = "inherit_nodefrag",
.data = &xfs_params.inherit_nodfrg.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.inherit_nodfrg.min,
.extra2 = &xfs_params.inherit_nodfrg.max
},
{
.procname = "filestream_centisecs",
.data = &xfs_params.fstrm_timer.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xfs_params.fstrm_timer.min,
.extra2 = &xfs_params.fstrm_timer.max,
},
/* please keep this the last entry */
#ifdef CONFIG_PROC_FS
{
.procname = "stats_clear",
.data = &xfs_params.stats_clear.val,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = xfs_stats_clear_proc_handler,
.extra1 = &xfs_params.stats_clear.min,
.extra2 = &xfs_params.stats_clear.max
},
#endif /* CONFIG_PROC_FS */
{}
};
static ctl_table xfs_dir_table[] = {
{
.procname = "xfs",
.mode = 0555,
.child = xfs_table
},
{}
};
static ctl_table xfs_root_table[] = {
{
.procname = "fs",
.mode = 0555,
.child = xfs_dir_table
},
{}
};
int
xfs_sysctl_register(void)
{
xfs_table_header = register_sysctl_table(xfs_root_table);
if (!xfs_table_header)
return -ENOMEM;
return 0;
}
void
xfs_sysctl_unregister(void)
{
unregister_sysctl_table(xfs_table_header);
}
|
gpl-2.0
|
NeptunIDE/linux
|
drivers/staging/rt2860/common/mlme.c
|
509
|
258300
|
/*
*************************************************************************
* Ralink Tech Inc.
* 5F., No.36, Taiyuan St., Jhubei City,
* Hsinchu County 302,
* Taiwan, R.O.C.
*
* (c) Copyright 2002-2007, Ralink Technology, Inc.
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
*************************************************************************
Module Name:
mlme.c
Abstract:
Revision History:
Who When What
-------- ---------- ----------------------------------------------
John Chang 2004-08-25 Modify from RT2500 code base
John Chang 2004-09-06 modified for RT2600
*/
#include "../rt_config.h"
#include <stdarg.h>
UCHAR CISCO_OUI[] = {0x00, 0x40, 0x96};
UCHAR WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
UCHAR RSN_OUI[] = {0x00, 0x0f, 0xac};
UCHAR WME_INFO_ELEM[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
UCHAR WME_PARM_ELEM[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
UCHAR Ccx2QosInfo[] = {0x00, 0x40, 0x96, 0x04};
UCHAR RALINK_OUI[] = {0x00, 0x0c, 0x43};
UCHAR BROADCOM_OUI[] = {0x00, 0x90, 0x4c};
UCHAR WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
UCHAR PRE_N_HT_OUI[] = {0x00, 0x90, 0x4c};
UCHAR RateSwitchTable[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x11, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x00, 0, 40, 101,
0x01, 0x00, 1, 40, 50,
0x02, 0x00, 2, 35, 45,
0x03, 0x00, 3, 20, 45,
0x04, 0x21, 0, 30, 50,
0x05, 0x21, 1, 20, 50,
0x06, 0x21, 2, 20, 50,
0x07, 0x21, 3, 15, 50,
0x08, 0x21, 4, 15, 30,
0x09, 0x21, 5, 10, 25,
0x0a, 0x21, 6, 8, 25,
0x0b, 0x21, 7, 8, 25,
0x0c, 0x20, 12, 15, 30,
0x0d, 0x20, 13, 8, 20,
0x0e, 0x20, 14, 8, 20,
0x0f, 0x20, 15, 8, 25,
0x10, 0x22, 15, 8, 25,
0x11, 0x00, 0, 0, 0,
0x12, 0x00, 0, 0, 0,
0x13, 0x00, 0, 0, 0,
0x14, 0x00, 0, 0, 0,
0x15, 0x00, 0, 0, 0,
0x16, 0x00, 0, 0, 0,
0x17, 0x00, 0, 0, 0,
0x18, 0x00, 0, 0, 0,
0x19, 0x00, 0, 0, 0,
0x1a, 0x00, 0, 0, 0,
0x1b, 0x00, 0, 0, 0,
0x1c, 0x00, 0, 0, 0,
0x1d, 0x00, 0, 0, 0,
0x1e, 0x00, 0, 0, 0,
0x1f, 0x00, 0, 0, 0,
};
UCHAR RateSwitchTable11B[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x04, 0x03, 0, 0, 0, // Initial used item after association
0x00, 0x00, 0, 40, 101,
0x01, 0x00, 1, 40, 50,
0x02, 0x00, 2, 35, 45,
0x03, 0x00, 3, 20, 45,
};
UCHAR RateSwitchTable11BG[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0a, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x00, 0, 40, 101,
0x01, 0x00, 1, 40, 50,
0x02, 0x00, 2, 35, 45,
0x03, 0x00, 3, 20, 45,
0x04, 0x10, 2, 20, 35,
0x05, 0x10, 3, 16, 35,
0x06, 0x10, 4, 10, 25,
0x07, 0x10, 5, 16, 25,
0x08, 0x10, 6, 10, 25,
0x09, 0x10, 7, 10, 13,
};
UCHAR RateSwitchTable11G[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x08, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x10, 0, 20, 101,
0x01, 0x10, 1, 20, 35,
0x02, 0x10, 2, 20, 35,
0x03, 0x10, 3, 16, 35,
0x04, 0x10, 4, 10, 25,
0x05, 0x10, 5, 16, 25,
0x06, 0x10, 6, 10, 25,
0x07, 0x10, 7, 10, 13,
};
UCHAR RateSwitchTable11N1S[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x09, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30, 101,
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x21, 5, 10, 25,
0x06, 0x21, 6, 8, 14,
0x07, 0x21, 7, 8, 14,
0x08, 0x23, 7, 8, 14,
};
UCHAR RateSwitchTable11N2S[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0a, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30, 101,
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x20, 12, 15, 30,
0x06, 0x20, 13, 8, 20,
0x07, 0x20, 14, 8, 20,
0x08, 0x20, 15, 8, 25,
0x09, 0x22, 15, 8, 25,
};
UCHAR RateSwitchTable11N3S[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0a, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30, 101,
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x20, 12, 15, 30,
0x06, 0x20, 13, 8, 20,
0x07, 0x20, 14, 8, 20,
0x08, 0x20, 15, 8, 25,
0x09, 0x22, 15, 8, 25,
};
UCHAR RateSwitchTable11N2SForABand[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0b, 0x09, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30, 101,
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x21, 5, 15, 30,
0x06, 0x20, 12, 15, 30,
0x07, 0x20, 13, 8, 20,
0x08, 0x20, 14, 8, 20,
0x09, 0x20, 15, 8, 25,
0x0a, 0x22, 15, 8, 25,
};
UCHAR RateSwitchTable11N3SForABand[] = { // 3*3
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0b, 0x09, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30, 101,
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x21, 5, 15, 30,
0x06, 0x20, 12, 15, 30,
0x07, 0x20, 13, 8, 20,
0x08, 0x20, 14, 8, 20,
0x09, 0x20, 15, 8, 25,
0x0a, 0x22, 15, 8, 25,
};
UCHAR RateSwitchTable11BGN1S[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0d, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x00, 0, 40, 101,
0x01, 0x00, 1, 40, 50,
0x02, 0x00, 2, 35, 45,
0x03, 0x00, 3, 20, 45,
0x04, 0x21, 0, 30,101, //50
0x05, 0x21, 1, 20, 50,
0x06, 0x21, 2, 20, 50,
0x07, 0x21, 3, 15, 50,
0x08, 0x21, 4, 15, 30,
0x09, 0x21, 5, 10, 25,
0x0a, 0x21, 6, 8, 14,
0x0b, 0x21, 7, 8, 14,
0x0c, 0x23, 7, 8, 14,
};
UCHAR RateSwitchTable11BGN2S[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0a, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30,101, //50
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x20, 12, 15, 30,
0x06, 0x20, 13, 8, 20,
0x07, 0x20, 14, 8, 20,
0x08, 0x20, 15, 8, 25,
0x09, 0x22, 15, 8, 25,
};
UCHAR RateSwitchTable11BGN3S[] = { // 3*3
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0a, 0x00, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30,101, //50
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 20, 50,
0x04, 0x21, 4, 15, 50,
0x05, 0x20, 20, 15, 30,
0x06, 0x20, 21, 8, 20,
0x07, 0x20, 22, 8, 20,
0x08, 0x20, 23, 8, 25,
0x09, 0x22, 23, 8, 25,
};
UCHAR RateSwitchTable11BGN2SForABand[] = {
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0b, 0x09, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30,101, //50
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x21, 5, 15, 30,
0x06, 0x20, 12, 15, 30,
0x07, 0x20, 13, 8, 20,
0x08, 0x20, 14, 8, 20,
0x09, 0x20, 15, 8, 25,
0x0a, 0x22, 15, 8, 25,
};
UCHAR RateSwitchTable11BGN3SForABand[] = { // 3*3
// Item No. Mode Curr-MCS TrainUp TrainDown // Mode- Bit0: STBC, Bit1: Short GI, Bit4,5: Mode(0:CCK, 1:OFDM, 2:HT Mix, 3:HT GF)
0x0c, 0x09, 0, 0, 0, // Initial used item after association
0x00, 0x21, 0, 30,101, //50
0x01, 0x21, 1, 20, 50,
0x02, 0x21, 2, 20, 50,
0x03, 0x21, 3, 15, 50,
0x04, 0x21, 4, 15, 30,
0x05, 0x21, 5, 15, 30,
0x06, 0x21, 12, 15, 30,
0x07, 0x20, 20, 15, 30,
0x08, 0x20, 21, 8, 20,
0x09, 0x20, 22, 8, 20,
0x0a, 0x20, 23, 8, 25,
0x0b, 0x22, 23, 8, 25,
};
PUCHAR ReasonString[] = {
/* 0 */ "Reserved",
/* 1 */ "Unspecified Reason",
/* 2 */ "Previous Auth no longer valid",
/* 3 */ "STA is leaving / has left",
/* 4 */ "DIS-ASSOC due to inactivity",
/* 5 */ "AP unable to hanle all associations",
/* 6 */ "class 2 error",
/* 7 */ "class 3 error",
/* 8 */ "STA is leaving / has left",
/* 9 */ "require auth before assoc/re-assoc",
/* 10 */ "Reserved",
/* 11 */ "Reserved",
/* 12 */ "Reserved",
/* 13 */ "invalid IE",
/* 14 */ "MIC error",
/* 15 */ "4-way handshake timeout",
/* 16 */ "2-way (group key) handshake timeout",
/* 17 */ "4-way handshake IE diff among AssosReq/Rsp/Beacon",
/* 18 */
};
extern UCHAR OfdmRateToRxwiMCS[];
// since RT61 has better RX sensibility, we have to limit TX ACK rate not to exceed our normal data TX rate.
// otherwise the WLAN peer may not be able to receive the ACK thus downgrade its data TX rate
ULONG BasicRateMask[12] = {0xfffff001 /* 1-Mbps */, 0xfffff003 /* 2 Mbps */, 0xfffff007 /* 5.5 */, 0xfffff00f /* 11 */,
0xfffff01f /* 6 */ , 0xfffff03f /* 9 */ , 0xfffff07f /* 12 */ , 0xfffff0ff /* 18 */,
0xfffff1ff /* 24 */ , 0xfffff3ff /* 36 */ , 0xfffff7ff /* 48 */ , 0xffffffff /* 54 */};
UCHAR MULTICAST_ADDR[MAC_ADDR_LEN] = {0x1, 0x00, 0x00, 0x00, 0x00, 0x00};
UCHAR BROADCAST_ADDR[MAC_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
UCHAR ZERO_MAC_ADDR[MAC_ADDR_LEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
// e.g. RssiSafeLevelForTxRate[RATE_36]" means if the current RSSI is greater than
// this value, then it's quaranteed capable of operating in 36 mbps TX rate in
// clean environment.
// TxRate: 1 2 5.5 11 6 9 12 18 24 36 48 54 72 100
CHAR RssiSafeLevelForTxRate[] ={ -92, -91, -90, -87, -88, -86, -85, -83, -81, -78, -72, -71, -40, -40 };
UCHAR RateIdToMbps[] = { 1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 72, 100};
USHORT RateIdTo500Kbps[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108, 144, 200};
UCHAR SsidIe = IE_SSID;
UCHAR SupRateIe = IE_SUPP_RATES;
UCHAR ExtRateIe = IE_EXT_SUPP_RATES;
UCHAR HtCapIe = IE_HT_CAP;
UCHAR AddHtInfoIe = IE_ADD_HT;
UCHAR NewExtChanIe = IE_SECONDARY_CH_OFFSET;
UCHAR ErpIe = IE_ERP;
UCHAR DsIe = IE_DS_PARM;
UCHAR TimIe = IE_TIM;
UCHAR WpaIe = IE_WPA;
UCHAR Wpa2Ie = IE_WPA2;
UCHAR IbssIe = IE_IBSS_PARM;
UCHAR Ccx2Ie = IE_CCX_V2;
extern UCHAR WPA_OUI[];
UCHAR SES_OUI[] = {0x00, 0x90, 0x4c};
UCHAR ZeroSsid[32] = {0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00};
// Reset the RFIC setting to new series
RTMP_RF_REGS RF2850RegTable[] = {
// ch R1 R2 R3(TX0~4=0) R4
{1, 0x98402ecc, 0x984c0786, 0x9816b455, 0x9800510b},
{2, 0x98402ecc, 0x984c0786, 0x98168a55, 0x9800519f},
{3, 0x98402ecc, 0x984c078a, 0x98168a55, 0x9800518b},
{4, 0x98402ecc, 0x984c078a, 0x98168a55, 0x9800519f},
{5, 0x98402ecc, 0x984c078e, 0x98168a55, 0x9800518b},
{6, 0x98402ecc, 0x984c078e, 0x98168a55, 0x9800519f},
{7, 0x98402ecc, 0x984c0792, 0x98168a55, 0x9800518b},
{8, 0x98402ecc, 0x984c0792, 0x98168a55, 0x9800519f},
{9, 0x98402ecc, 0x984c0796, 0x98168a55, 0x9800518b},
{10, 0x98402ecc, 0x984c0796, 0x98168a55, 0x9800519f},
{11, 0x98402ecc, 0x984c079a, 0x98168a55, 0x9800518b},
{12, 0x98402ecc, 0x984c079a, 0x98168a55, 0x9800519f},
{13, 0x98402ecc, 0x984c079e, 0x98168a55, 0x9800518b},
{14, 0x98402ecc, 0x984c07a2, 0x98168a55, 0x98005193},
// 802.11 UNI / HyperLan 2
{36, 0x98402ecc, 0x984c099a, 0x98158a55, 0x980ed1a3},
{38, 0x98402ecc, 0x984c099e, 0x98158a55, 0x980ed193},
{40, 0x98402ec8, 0x984c0682, 0x98158a55, 0x980ed183},
{44, 0x98402ec8, 0x984c0682, 0x98158a55, 0x980ed1a3},
{46, 0x98402ec8, 0x984c0686, 0x98158a55, 0x980ed18b},
{48, 0x98402ec8, 0x984c0686, 0x98158a55, 0x980ed19b},
{52, 0x98402ec8, 0x984c068a, 0x98158a55, 0x980ed193},
{54, 0x98402ec8, 0x984c068a, 0x98158a55, 0x980ed1a3},
{56, 0x98402ec8, 0x984c068e, 0x98158a55, 0x980ed18b},
{60, 0x98402ec8, 0x984c0692, 0x98158a55, 0x980ed183},
{62, 0x98402ec8, 0x984c0692, 0x98158a55, 0x980ed193},
{64, 0x98402ec8, 0x984c0692, 0x98158a55, 0x980ed1a3}, // Plugfest#4, Day4, change RFR3 left4th 9->5.
// 802.11 HyperLan 2
{100, 0x98402ec8, 0x984c06b2, 0x98178a55, 0x980ed783},
// 2008.04.30 modified
// The system team has AN to improve the EVM value
// for channel 102 to 108 for the RT2850/RT2750 dual band solution.
{102, 0x98402ec8, 0x985c06b2, 0x98578a55, 0x980ed793},
{104, 0x98402ec8, 0x985c06b2, 0x98578a55, 0x980ed1a3},
{108, 0x98402ecc, 0x985c0a32, 0x98578a55, 0x980ed193},
{110, 0x98402ecc, 0x984c0a36, 0x98178a55, 0x980ed183},
{112, 0x98402ecc, 0x984c0a36, 0x98178a55, 0x980ed19b},
{116, 0x98402ecc, 0x984c0a3a, 0x98178a55, 0x980ed1a3},
{118, 0x98402ecc, 0x984c0a3e, 0x98178a55, 0x980ed193},
{120, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed183},
{124, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed193},
{126, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed15b}, // 0x980ed1bb->0x980ed15b required by Rory 20070927
{128, 0x98402ec4, 0x984c0382, 0x98178a55, 0x980ed1a3},
{132, 0x98402ec4, 0x984c0386, 0x98178a55, 0x980ed18b},
{134, 0x98402ec4, 0x984c0386, 0x98178a55, 0x980ed193},
{136, 0x98402ec4, 0x984c0386, 0x98178a55, 0x980ed19b},
{140, 0x98402ec4, 0x984c038a, 0x98178a55, 0x980ed183},
// 802.11 UNII
{149, 0x98402ec4, 0x984c038a, 0x98178a55, 0x980ed1a7},
{151, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed187},
{153, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed18f},
{157, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed19f},
{159, 0x98402ec4, 0x984c038e, 0x98178a55, 0x980ed1a7},
{161, 0x98402ec4, 0x984c0392, 0x98178a55, 0x980ed187},
{165, 0x98402ec4, 0x984c0392, 0x98178a55, 0x980ed197},
// Japan
{184, 0x95002ccc, 0x9500491e, 0x9509be55, 0x950c0a0b},
{188, 0x95002ccc, 0x95004922, 0x9509be55, 0x950c0a13},
{192, 0x95002ccc, 0x95004926, 0x9509be55, 0x950c0a1b},
{196, 0x95002ccc, 0x9500492a, 0x9509be55, 0x950c0a23},
{208, 0x95002ccc, 0x9500493a, 0x9509be55, 0x950c0a13},
{212, 0x95002ccc, 0x9500493e, 0x9509be55, 0x950c0a1b},
{216, 0x95002ccc, 0x95004982, 0x9509be55, 0x950c0a23},
// still lack of MMAC(Japan) ch 34,38,42,46
};
UCHAR NUM_OF_2850_CHNL = (sizeof(RF2850RegTable) / sizeof(RTMP_RF_REGS));
FREQUENCY_ITEM FreqItems3020[] =
{
/**************************************************/
// ISM : 2.4 to 2.483 GHz //
/**************************************************/
// 11g
/**************************************************/
//-CH---N-------R---K-----------
{1, 241, 2, 2},
{2, 241, 2, 7},
{3, 242, 2, 2},
{4, 242, 2, 7},
{5, 243, 2, 2},
{6, 243, 2, 7},
{7, 244, 2, 2},
{8, 244, 2, 7},
{9, 245, 2, 2},
{10, 245, 2, 7},
{11, 246, 2, 2},
{12, 246, 2, 7},
{13, 247, 2, 2},
{14, 248, 2, 4},
};
UCHAR NUM_OF_3020_CHNL=(sizeof(FreqItems3020) / sizeof(FREQUENCY_ITEM));
/*
==========================================================================
Description:
initialize the MLME task and its data structure (queue, spinlock,
timer, state machines).
IRQL = PASSIVE_LEVEL
Return:
always return NDIS_STATUS_SUCCESS
==========================================================================
*/
NDIS_STATUS MlmeInit(
IN PRTMP_ADAPTER pAd)
{
NDIS_STATUS Status = NDIS_STATUS_SUCCESS;
DBGPRINT(RT_DEBUG_TRACE, ("--> MLME Initialize\n"));
do
{
Status = MlmeQueueInit(&pAd->Mlme.Queue);
if(Status != NDIS_STATUS_SUCCESS)
break;
pAd->Mlme.bRunning = FALSE;
NdisAllocateSpinLock(&pAd->Mlme.TaskLock);
{
BssTableInit(&pAd->ScanTab);
// init STA state machines
AssocStateMachineInit(pAd, &pAd->Mlme.AssocMachine, pAd->Mlme.AssocFunc);
AuthStateMachineInit(pAd, &pAd->Mlme.AuthMachine, pAd->Mlme.AuthFunc);
AuthRspStateMachineInit(pAd, &pAd->Mlme.AuthRspMachine, pAd->Mlme.AuthRspFunc);
SyncStateMachineInit(pAd, &pAd->Mlme.SyncMachine, pAd->Mlme.SyncFunc);
WpaPskStateMachineInit(pAd, &pAd->Mlme.WpaPskMachine, pAd->Mlme.WpaPskFunc);
AironetStateMachineInit(pAd, &pAd->Mlme.AironetMachine, pAd->Mlme.AironetFunc);
// Since we are using switch/case to implement it, the init is different from the above
// state machine init
MlmeCntlInit(pAd, &pAd->Mlme.CntlMachine, NULL);
}
ActionStateMachineInit(pAd, &pAd->Mlme.ActMachine, pAd->Mlme.ActFunc);
// Init mlme periodic timer
RTMPInitTimer(pAd, &pAd->Mlme.PeriodicTimer, GET_TIMER_FUNCTION(MlmePeriodicExec), pAd, TRUE);
// Set mlme periodic timer
RTMPSetTimer(&pAd->Mlme.PeriodicTimer, MLME_TASK_EXEC_INTV);
// software-based RX Antenna diversity
RTMPInitTimer(pAd, &pAd->Mlme.RxAntEvalTimer, GET_TIMER_FUNCTION(AsicRxAntEvalTimeout), pAd, FALSE);
#ifdef RT2860
{
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_ADVANCE_POWER_SAVE_PCIE_DEVICE))
{
// only PCIe cards need these two timers
RTMPInitTimer(pAd, &pAd->Mlme.PsPollTimer, GET_TIMER_FUNCTION(PsPollWakeExec), pAd, FALSE);
RTMPInitTimer(pAd, &pAd->Mlme.RadioOnOffTimer, GET_TIMER_FUNCTION(RadioOnExec), pAd, FALSE);
}
}
#endif
} while (FALSE);
DBGPRINT(RT_DEBUG_TRACE, ("<-- MLME Initialize\n"));
return Status;
}
/*
==========================================================================
Description:
main loop of the MLME
Pre:
Mlme has to be initialized, and there are something inside the queue
Note:
This function is invoked from MPSetInformation and MPReceive;
This task guarantee only one MlmeHandler will run.
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID MlmeHandler(
IN PRTMP_ADAPTER pAd)
{
MLME_QUEUE_ELEM *Elem = NULL;
// Only accept MLME and Frame from peer side, no other (control/data) frame should
// get into this state machine
NdisAcquireSpinLock(&pAd->Mlme.TaskLock);
if(pAd->Mlme.bRunning)
{
NdisReleaseSpinLock(&pAd->Mlme.TaskLock);
return;
}
else
{
pAd->Mlme.bRunning = TRUE;
}
NdisReleaseSpinLock(&pAd->Mlme.TaskLock);
while (!MlmeQueueEmpty(&pAd->Mlme.Queue))
{
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_MLME_RESET_IN_PROGRESS) ||
RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS) ||
RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))
{
DBGPRINT(RT_DEBUG_TRACE, ("Device Halted or Removed or MlmeRest, exit MlmeHandler! (queue num = %ld)\n", pAd->Mlme.Queue.Num));
break;
}
//From message type, determine which state machine I should drive
if (MlmeDequeue(&pAd->Mlme.Queue, &Elem))
{
#ifdef RT2870
if (Elem->MsgType == MT2_RESET_CONF)
{
DBGPRINT_RAW(RT_DEBUG_TRACE, ("!!! reset MLME state machine !!!\n"));
MlmeRestartStateMachine(pAd);
Elem->Occupied = FALSE;
Elem->MsgLen = 0;
continue;
}
#endif // RT2870 //
// if dequeue success
switch (Elem->Machine)
{
// STA state machines
case ASSOC_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.AssocMachine, Elem);
break;
case AUTH_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.AuthMachine, Elem);
break;
case AUTH_RSP_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.AuthRspMachine, Elem);
break;
case SYNC_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.SyncMachine, Elem);
break;
case MLME_CNTL_STATE_MACHINE:
MlmeCntlMachinePerformAction(pAd, &pAd->Mlme.CntlMachine, Elem);
break;
case WPA_PSK_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.WpaPskMachine, Elem);
break;
case AIRONET_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.AironetMachine, Elem);
break;
case ACTION_STATE_MACHINE:
StateMachinePerformAction(pAd, &pAd->Mlme.ActMachine, Elem);
break;
default:
DBGPRINT(RT_DEBUG_TRACE, ("ERROR: Illegal machine %ld in MlmeHandler()\n", Elem->Machine));
break;
} // end of switch
// free MLME element
Elem->Occupied = FALSE;
Elem->MsgLen = 0;
}
else {
DBGPRINT_ERR(("MlmeHandler: MlmeQueue empty\n"));
}
}
NdisAcquireSpinLock(&pAd->Mlme.TaskLock);
pAd->Mlme.bRunning = FALSE;
NdisReleaseSpinLock(&pAd->Mlme.TaskLock);
}
/*
==========================================================================
Description:
Destructor of MLME (Destroy queue, state machine, spin lock and timer)
Parameters:
Adapter - NIC Adapter pointer
Post:
The MLME task will no longer work properly
IRQL = PASSIVE_LEVEL
==========================================================================
*/
VOID MlmeHalt(
IN PRTMP_ADAPTER pAd)
{
BOOLEAN Cancelled;
#ifdef RT3070
UINT32 TxPinCfg = 0x00050F0F;
#endif // RT3070 //
DBGPRINT(RT_DEBUG_TRACE, ("==> MlmeHalt\n"));
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))
{
// disable BEACON generation and other BEACON related hardware timers
AsicDisableSync(pAd);
}
{
// Cancel pending timers
RTMPCancelTimer(&pAd->MlmeAux.AssocTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.ReassocTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.DisassocTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.AuthTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.BeaconTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.ScanTimer, &Cancelled);
#ifdef RT2860
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_ADVANCE_POWER_SAVE_PCIE_DEVICE))
{
RTMPCancelTimer(&pAd->Mlme.PsPollTimer, &Cancelled);
RTMPCancelTimer(&pAd->Mlme.RadioOnOffTimer, &Cancelled);
}
#endif
}
RTMPCancelTimer(&pAd->Mlme.PeriodicTimer, &Cancelled);
RTMPCancelTimer(&pAd->Mlme.RxAntEvalTimer, &Cancelled);
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST))
{
// Set LED
RTMPSetLED(pAd, LED_HALT);
RTMPSetSignalLED(pAd, -100); // Force signal strength Led to be turned off, firmware is not done it.
#ifdef RT2870
{
LED_CFG_STRUC LedCfg;
RTMP_IO_READ32(pAd, LED_CFG, &LedCfg.word);
LedCfg.field.LedPolar = 0;
LedCfg.field.RLedMode = 0;
LedCfg.field.GLedMode = 0;
LedCfg.field.YLedMode = 0;
RTMP_IO_WRITE32(pAd, LED_CFG, LedCfg.word);
}
#endif // RT2870 //
#ifdef RT3070
//
// Turn off LNA_PE
//
if (IS_RT3070(pAd) || IS_RT3071(pAd))
{
TxPinCfg &= 0xFFFFF0F0;
RTUSBWriteMACRegister(pAd, TX_PIN_CFG, TxPinCfg);
}
#endif // RT3070 //
}
RTMPusecDelay(5000); // 5 msec to gurantee Ant Diversity timer canceled
MlmeQueueDestroy(&pAd->Mlme.Queue);
NdisFreeSpinLock(&pAd->Mlme.TaskLock);
DBGPRINT(RT_DEBUG_TRACE, ("<== MlmeHalt\n"));
}
VOID MlmeResetRalinkCounters(
IN PRTMP_ADAPTER pAd)
{
pAd->RalinkCounters.LastOneSecRxOkDataCnt = pAd->RalinkCounters.OneSecRxOkDataCnt;
// clear all OneSecxxx counters.
pAd->RalinkCounters.OneSecBeaconSentCnt = 0;
pAd->RalinkCounters.OneSecFalseCCACnt = 0;
pAd->RalinkCounters.OneSecRxFcsErrCnt = 0;
pAd->RalinkCounters.OneSecRxOkCnt = 0;
pAd->RalinkCounters.OneSecTxFailCount = 0;
pAd->RalinkCounters.OneSecTxNoRetryOkCount = 0;
pAd->RalinkCounters.OneSecTxRetryOkCount = 0;
pAd->RalinkCounters.OneSecRxOkDataCnt = 0;
// TODO: for debug only. to be removed
pAd->RalinkCounters.OneSecOsTxCount[QID_AC_BE] = 0;
pAd->RalinkCounters.OneSecOsTxCount[QID_AC_BK] = 0;
pAd->RalinkCounters.OneSecOsTxCount[QID_AC_VI] = 0;
pAd->RalinkCounters.OneSecOsTxCount[QID_AC_VO] = 0;
pAd->RalinkCounters.OneSecDmaDoneCount[QID_AC_BE] = 0;
pAd->RalinkCounters.OneSecDmaDoneCount[QID_AC_BK] = 0;
pAd->RalinkCounters.OneSecDmaDoneCount[QID_AC_VI] = 0;
pAd->RalinkCounters.OneSecDmaDoneCount[QID_AC_VO] = 0;
pAd->RalinkCounters.OneSecTxDoneCount = 0;
pAd->RalinkCounters.OneSecRxCount = 0;
pAd->RalinkCounters.OneSecTxAggregationCount = 0;
pAd->RalinkCounters.OneSecRxAggregationCount = 0;
return;
}
unsigned long rx_AMSDU;
unsigned long rx_Total;
/*
==========================================================================
Description:
This routine is executed periodically to -
1. Decide if it's a right time to turn on PwrMgmt bit of all
outgoiing frames
2. Calculate ChannelQuality based on statistics of the last
period, so that TX rate won't toggling very frequently between a
successful TX and a failed TX.
3. If the calculated ChannelQuality indicated current connection not
healthy, then a ROAMing attempt is tried here.
IRQL = DISPATCH_LEVEL
==========================================================================
*/
#define ADHOC_BEACON_LOST_TIME (8*OS_HZ) // 8 sec
VOID MlmePeriodicExec(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
ULONG TxTotalCnt;
PRTMP_ADAPTER pAd = (RTMP_ADAPTER *)FunctionContext;
#ifdef RT2860
//Baron 2008/07/10
//printk("Baron_Test:\t%s", RTMPGetRalinkEncryModeStr(pAd->StaCfg.WepStatus));
//If the STA security setting is OPEN or WEP, pAd->StaCfg.WpaSupplicantUP = 0.
//If the STA security setting is WPAPSK or WPA2PSK, pAd->StaCfg.WpaSupplicantUP = 1.
if(pAd->StaCfg.WepStatus<2)
{
pAd->StaCfg.WpaSupplicantUP = 0;
}
else
{
pAd->StaCfg.WpaSupplicantUP = 1;
}
{
// If Hardware controlled Radio enabled, we have to check GPIO pin2 every 2 second.
// Move code to here, because following code will return when radio is off
if ((pAd->Mlme.PeriodicRound % (MLME_TASK_EXEC_MULTIPLE * 2) == 0) &&
(pAd->StaCfg.bHardwareRadio == TRUE) &&
(RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_START_UP)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS)))
{
UINT32 data = 0;
// Read GPIO pin2 as Hardware controlled radio state
RTMP_IO_FORCE_READ32(pAd, GPIO_CTRL_CFG, &data);
if (data & 0x04)
{
pAd->StaCfg.bHwRadio = TRUE;
}
else
{
pAd->StaCfg.bHwRadio = FALSE;
}
if (pAd->StaCfg.bRadio != (pAd->StaCfg.bHwRadio && pAd->StaCfg.bSwRadio))
{
pAd->StaCfg.bRadio = (pAd->StaCfg.bHwRadio && pAd->StaCfg.bSwRadio);
if (pAd->StaCfg.bRadio == TRUE)
{
MlmeRadioOn(pAd);
// Update extra information
pAd->ExtraInfo = EXTRA_INFO_CLEAR;
}
else
{
MlmeRadioOff(pAd);
// Update extra information
pAd->ExtraInfo = HW_RADIO_OFF;
}
}
}
}
#endif /* RT2860 */
// Do nothing if the driver is starting halt state.
// This might happen when timer already been fired before cancel timer with mlmehalt
if ((RTMP_TEST_FLAG(pAd, (fRTMP_ADAPTER_HALT_IN_PROGRESS |
fRTMP_ADAPTER_RADIO_OFF |
fRTMP_ADAPTER_RADIO_MEASUREMENT |
fRTMP_ADAPTER_RESET_IN_PROGRESS))))
return;
#ifdef RT2860
{
if ((pAd->RalinkCounters.LastReceivedByteCount == pAd->RalinkCounters.ReceivedByteCount) && (pAd->StaCfg.bRadio == TRUE))
{
// If ReceiveByteCount doesn't change, increase SameRxByteCount by 1.
pAd->SameRxByteCount++;
}
else
pAd->SameRxByteCount = 0;
// If after BBP, still not work...need to check to reset PBF&MAC.
if (pAd->SameRxByteCount == 702)
{
pAd->SameRxByteCount = 0;
AsicResetPBF(pAd);
AsicResetMAC(pAd);
}
// If SameRxByteCount keeps happens for 2 second in infra mode, or for 60 seconds in idle mode.
if (((INFRA_ON(pAd)) && (pAd->SameRxByteCount > 20)) || ((IDLE_ON(pAd)) && (pAd->SameRxByteCount > 600)))
{
if ((pAd->StaCfg.bRadio == TRUE) && (pAd->SameRxByteCount < 700))
{
DBGPRINT(RT_DEBUG_TRACE, ("---> SameRxByteCount = %lu !!!!!!!!!!!!!!! \n", pAd->SameRxByteCount));
pAd->SameRxByteCount = 700;
AsicResetBBP(pAd);
}
}
// Update lastReceiveByteCount.
pAd->RalinkCounters.LastReceivedByteCount = pAd->RalinkCounters.ReceivedByteCount;
if ((pAd->CheckDmaBusyCount > 3) && (IDLE_ON(pAd)))
{
pAd->CheckDmaBusyCount = 0;
AsicResetFromDMABusy(pAd);
}
}
#endif /* RT2860 */
RT28XX_MLME_PRE_SANITY_CHECK(pAd);
{
// Do nothing if monitor mode is on
if (MONITOR_ON(pAd))
return;
if (pAd->Mlme.PeriodicRound & 0x1)
{
// This is the fix for wifi 11n extension channel overlapping test case. for 2860D
if (((pAd->MACVersion & 0xffff) == 0x0101) &&
(STA_TGN_WIFI_ON(pAd)) &&
(pAd->CommonCfg.IOTestParm.bToggle == FALSE))
{
RTMP_IO_WRITE32(pAd, TXOP_CTRL_CFG, 0x24Bf);
pAd->CommonCfg.IOTestParm.bToggle = TRUE;
}
else if ((STA_TGN_WIFI_ON(pAd)) &&
((pAd->MACVersion & 0xffff) == 0x0101))
{
RTMP_IO_WRITE32(pAd, TXOP_CTRL_CFG, 0x243f);
pAd->CommonCfg.IOTestParm.bToggle = FALSE;
}
}
}
pAd->bUpdateBcnCntDone = FALSE;
// RECBATimerTimeout(SystemSpecific1,FunctionContext,SystemSpecific2,SystemSpecific3);
pAd->Mlme.PeriodicRound ++;
#ifdef RT3070
// execute every 100ms, update the Tx FIFO Cnt for update Tx Rate.
NICUpdateFifoStaCounters(pAd);
#endif // RT3070 //
// execute every 500ms
if ((pAd->Mlme.PeriodicRound % 5 == 0) && RTMPAutoRateSwitchCheck(pAd)/*(OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_TX_RATE_SWITCH_ENABLED))*/)
{
// perform dynamic tx rate switching based on past TX history
{
if ((OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED)
)
&& (!OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)))
MlmeDynamicTxRateSwitching(pAd);
}
}
// Normal 1 second Mlme PeriodicExec.
if (pAd->Mlme.PeriodicRound %MLME_TASK_EXEC_MULTIPLE == 0)
{
pAd->Mlme.OneSecPeriodicRound ++;
if (rx_Total)
{
// reset counters
rx_AMSDU = 0;
rx_Total = 0;
}
// Media status changed, report to NDIS
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_MEDIA_STATE_CHANGE))
{
RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_MEDIA_STATE_CHANGE);
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED))
{
pAd->IndicateMediaState = NdisMediaStateConnected;
RTMP_IndicateMediaState(pAd);
}
else
{
pAd->IndicateMediaState = NdisMediaStateDisconnected;
RTMP_IndicateMediaState(pAd);
}
}
NdisGetSystemUpTime(&pAd->Mlme.Now32);
// add the most up-to-date h/w raw counters into software variable, so that
// the dynamic tuning mechanism below are based on most up-to-date information
NICUpdateRawCounters(pAd);
#ifdef RT2870
RT2870_WatchDog(pAd);
#endif // RT2870 //
// Need statistics after read counter. So put after NICUpdateRawCounters
ORIBATimerTimeout(pAd);
// The time period for checking antenna is according to traffic
if (pAd->Mlme.bEnableAutoAntennaCheck)
{
TxTotalCnt = pAd->RalinkCounters.OneSecTxNoRetryOkCount +
pAd->RalinkCounters.OneSecTxRetryOkCount +
pAd->RalinkCounters.OneSecTxFailCount;
// dynamic adjust antenna evaluation period according to the traffic
if (TxTotalCnt > 50)
{
if (pAd->Mlme.OneSecPeriodicRound % 10 == 0)
{
AsicEvaluateRxAnt(pAd);
}
}
else
{
if (pAd->Mlme.OneSecPeriodicRound % 3 == 0)
{
AsicEvaluateRxAnt(pAd);
}
}
}
STAMlmePeriodicExec(pAd);
MlmeResetRalinkCounters(pAd);
{
#ifdef RT2860
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST) && (pAd->bPCIclkOff == FALSE))
#endif
{
// When Adhoc beacon is enabled and RTS/CTS is enabled, there is a chance that hardware MAC FSM will run into a deadlock
// and sending CTS-to-self over and over.
// Software Patch Solution:
// 1. Polling debug state register 0x10F4 every one second.
// 2. If in 0x10F4 the ((bit29==1) && (bit7==1)) OR ((bit29==1) && (bit5==1)), it means the deadlock has occurred.
// 3. If the deadlock occurred, reset MAC/BBP by setting 0x1004 to 0x0001 for a while then setting it back to 0x000C again.
UINT32 MacReg = 0;
RTMP_IO_READ32(pAd, 0x10F4, &MacReg);
if (((MacReg & 0x20000000) && (MacReg & 0x80)) || ((MacReg & 0x20000000) && (MacReg & 0x20)))
{
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0x1);
RTMPusecDelay(1);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0xC);
DBGPRINT(RT_DEBUG_WARN,("Warning, MAC specific condition occurs \n"));
}
}
}
RT28XX_MLME_HANDLER(pAd);
}
pAd->bUpdateBcnCntDone = FALSE;
}
VOID STAMlmePeriodicExec(
PRTMP_ADAPTER pAd)
{
#ifdef RT2860
ULONG TxTotalCnt;
#endif
#ifdef RT2870
ULONG TxTotalCnt;
int i;
#endif
if (pAd->StaCfg.WpaSupplicantUP == WPA_SUPPLICANT_DISABLE)
{
// WPA MIC error should block association attempt for 60 seconds
if (pAd->StaCfg.bBlockAssoc && (pAd->StaCfg.LastMicErrorTime + (60 * OS_HZ) < pAd->Mlme.Now32))
pAd->StaCfg.bBlockAssoc = FALSE;
}
#ifdef RT2860
//Baron 2008/07/10
//printk("Baron_Test:\t%s", RTMPGetRalinkEncryModeStr(pAd->StaCfg.WepStatus));
//If the STA security setting is OPEN or WEP, pAd->StaCfg.WpaSupplicantUP = 0.
//If the STA security setting is WPAPSK or WPA2PSK, pAd->StaCfg.WpaSupplicantUP = 1.
if(pAd->StaCfg.WepStatus<2)
{
pAd->StaCfg.WpaSupplicantUP = 0;
}
else
{
pAd->StaCfg.WpaSupplicantUP = 1;
}
#endif
if ((pAd->PreMediaState != pAd->IndicateMediaState) && (pAd->CommonCfg.bWirelessEvent))
{
if (pAd->IndicateMediaState == NdisMediaStateConnected)
{
RTMPSendWirelessEvent(pAd, IW_STA_LINKUP_EVENT_FLAG, pAd->MacTab.Content[BSSID_WCID].Addr, BSS0, 0);
}
pAd->PreMediaState = pAd->IndicateMediaState;
}
#ifdef RT2860
if ((pAd->OpMode == OPMODE_STA) && (IDLE_ON(pAd)) &&
(OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_ADVANCE_POWER_SAVE_PCIE_DEVICE)) &&
(pAd->Mlme.SyncMachine.CurrState == SYNC_IDLE) &&
(pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE) &&
(RTMP_SET_FLAG(pAd, fRTMP_ADAPTER_START_UP)) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_IDLE_RADIO_OFF)))
{
RT28xxPciAsicRadioOff(pAd, GUI_IDLE_POWER_SAVE, 0);
}
#endif
AsicStaBbpTuning(pAd);
TxTotalCnt = pAd->RalinkCounters.OneSecTxNoRetryOkCount +
pAd->RalinkCounters.OneSecTxRetryOkCount +
pAd->RalinkCounters.OneSecTxFailCount;
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED))
{
// update channel quality for Roaming and UI LinkQuality display
MlmeCalculateChannelQuality(pAd, pAd->Mlme.Now32);
}
// must be AFTER MlmeDynamicTxRateSwitching() because it needs to know if
// Radio is currently in noisy environment
if (!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_BSS_SCAN_IN_PROGRESS))
AsicAdjustTxPower(pAd);
if (INFRA_ON(pAd))
{
// Is PSM bit consistent with user power management policy?
// This is the only place that will set PSM bit ON.
if (!OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE))
MlmeCheckPsmChange(pAd, pAd->Mlme.Now32);
pAd->RalinkCounters.LastOneSecTotalTxCount = TxTotalCnt;
if ((pAd->StaCfg.LastBeaconRxTime + 1*OS_HZ < pAd->Mlme.Now32) &&
(!RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_BSS_SCAN_IN_PROGRESS)) &&
((TxTotalCnt + pAd->RalinkCounters.OneSecRxOkCnt < 600)))
{
RTMPSetAGCInitValue(pAd, BW_20);
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - No BEACON. restore R66 to the low bound(%d) \n", (0x2E + GET_LNA_GAIN(pAd))));
}
{
if (pAd->CommonCfg.bAPSDCapable && pAd->CommonCfg.APEdcaParm.bAPSDCapable)
{
// When APSD is enabled, the period changes as 20 sec
if ((pAd->Mlme.OneSecPeriodicRound % 20) == 8)
RTMPSendNullFrame(pAd, pAd->CommonCfg.TxRate, TRUE);
}
else
{
// Send out a NULL frame every 10 sec to inform AP that STA is still alive (Avoid being age out)
if ((pAd->Mlme.OneSecPeriodicRound % 10) == 8)
{
if (pAd->CommonCfg.bWmmCapable)
RTMPSendNullFrame(pAd, pAd->CommonCfg.TxRate, TRUE);
else
RTMPSendNullFrame(pAd, pAd->CommonCfg.TxRate, FALSE);
}
}
}
if (CQI_IS_DEAD(pAd->Mlme.ChannelQuality))
{
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - No BEACON. Dead CQI. Auto Recovery attempt #%ld\n", pAd->RalinkCounters.BadCQIAutoRecoveryCount));
pAd->StaCfg.CCXAdjacentAPReportFlag = TRUE;
pAd->StaCfg.CCXAdjacentAPLinkDownTime = pAd->StaCfg.LastBeaconRxTime;
// Lost AP, send disconnect & link down event
LinkDown(pAd, FALSE);
{
union iwreq_data wrqu;
memset(wrqu.ap_addr.sa_data, 0, MAC_ADDR_LEN);
wireless_send_event(pAd->net_dev, SIOCGIWAP, &wrqu, NULL);
}
MlmeAutoReconnectLastSSID(pAd);
}
else if (CQI_IS_BAD(pAd->Mlme.ChannelQuality))
{
pAd->RalinkCounters.BadCQIAutoRecoveryCount ++;
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - Bad CQI. Auto Recovery attempt #%ld\n", pAd->RalinkCounters.BadCQIAutoRecoveryCount));
MlmeAutoReconnectLastSSID(pAd);
}
// Add auto seamless roaming
if (pAd->StaCfg.bFastRoaming)
{
SHORT dBmToRoam = (SHORT)pAd->StaCfg.dBmToRoam;
DBGPRINT(RT_DEBUG_TRACE, ("Rssi=%d, dBmToRoam=%d\n", RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.LastRssi0, pAd->StaCfg.RssiSample.LastRssi1, pAd->StaCfg.RssiSample.LastRssi2), (CHAR)dBmToRoam));
if (RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.LastRssi0, pAd->StaCfg.RssiSample.LastRssi1, pAd->StaCfg.RssiSample.LastRssi2) <= (CHAR)dBmToRoam)
{
MlmeCheckForFastRoaming(pAd, pAd->Mlme.Now32);
}
}
}
else if (ADHOC_ON(pAd))
{
#ifdef RT2860
// 2003-04-17 john. this is a patch that driver forces a BEACON out if ASIC fails
// the "TX BEACON competition" for the entire past 1 sec.
// So that even when ASIC's BEACONgen engine been blocked
// by peer's BEACON due to slower system clock, this STA still can send out
// minimum BEACON to tell the peer I'm alive.
// drawback is that this BEACON won't be well aligned at TBTT boundary.
// EnqueueBeaconFrame(pAd); // software send BEACON
// if all 11b peers leave this BSS more than 5 seconds, update Tx rate,
// restore outgoing BEACON to support B/G-mixed mode
if ((pAd->CommonCfg.Channel <= 14) &&
(pAd->CommonCfg.MaxTxRate <= RATE_11) &&
(pAd->CommonCfg.MaxDesiredRate > RATE_11) &&
((pAd->StaCfg.Last11bBeaconRxTime + 5*OS_HZ) < pAd->Mlme.Now32))
{
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - last 11B peer left, update Tx rates\n"));
NdisMoveMemory(pAd->StaActive.SupRate, pAd->CommonCfg.SupRate, MAX_LEN_OF_SUPPORTED_RATES);
pAd->StaActive.SupRateLen = pAd->CommonCfg.SupRateLen;
MlmeUpdateTxRates(pAd, FALSE, 0);
MakeIbssBeacon(pAd); // re-build BEACON frame
AsicEnableIbssSync(pAd); // copy to on-chip memory
pAd->StaCfg.AdhocBOnlyJoined = FALSE;
}
if (pAd->CommonCfg.PhyMode >= PHY_11ABGN_MIXED)
{
if ((pAd->StaCfg.AdhocBGJoined) &&
((pAd->StaCfg.Last11gBeaconRxTime + 5 * OS_HZ) < pAd->Mlme.Now32))
{
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - last 11G peer left\n"));
pAd->StaCfg.AdhocBGJoined = FALSE;
}
if ((pAd->StaCfg.Adhoc20NJoined) &&
((pAd->StaCfg.Last20NBeaconRxTime + 5 * OS_HZ) < pAd->Mlme.Now32))
{
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - last 20MHz N peer left\n"));
pAd->StaCfg.Adhoc20NJoined = FALSE;
}
}
#endif /* RT2860 */
//radar detect
if ((pAd->CommonCfg.Channel > 14)
&& (pAd->CommonCfg.bIEEE80211H == 1)
&& RadarChannelCheck(pAd, pAd->CommonCfg.Channel))
{
RadarDetectPeriodic(pAd);
}
// If all peers leave, and this STA becomes the last one in this IBSS, then change MediaState
// to DISCONNECTED. But still holding this IBSS (i.e. sending BEACON) so that other STAs can
// join later.
if ((pAd->StaCfg.LastBeaconRxTime + ADHOC_BEACON_LOST_TIME < pAd->Mlme.Now32) &&
OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED))
{
MLME_START_REQ_STRUCT StartReq;
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - excessive BEACON lost, last STA in this IBSS, MediaState=Disconnected\n"));
LinkDown(pAd, FALSE);
StartParmFill(pAd, &StartReq, pAd->MlmeAux.Ssid, pAd->MlmeAux.SsidLen);
MlmeEnqueue(pAd, SYNC_STATE_MACHINE, MT2_MLME_START_REQ, sizeof(MLME_START_REQ_STRUCT), &StartReq);
pAd->Mlme.CntlMachine.CurrState = CNTL_WAIT_START;
}
#ifdef RT2870
for (i = 1; i < MAX_LEN_OF_MAC_TABLE; i++)
{
MAC_TABLE_ENTRY *pEntry = &pAd->MacTab.Content[i];
if (pEntry->ValidAsCLI == FALSE)
continue;
if (pEntry->LastBeaconRxTime + ADHOC_BEACON_LOST_TIME < pAd->Mlme.Now32)
MacTableDeleteEntry(pAd, pEntry->Aid, pEntry->Addr);
}
#endif
}
else // no INFRA nor ADHOC connection
{
if (pAd->StaCfg.bScanReqIsFromWebUI &&
((pAd->StaCfg.LastScanTime + 30 * OS_HZ) > pAd->Mlme.Now32))
goto SKIP_AUTO_SCAN_CONN;
else
pAd->StaCfg.bScanReqIsFromWebUI = FALSE;
if ((pAd->StaCfg.bAutoReconnect == TRUE)
&& RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_START_UP)
&& (MlmeValidateSSID(pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen) == TRUE))
{
if ((pAd->ScanTab.BssNr==0) && (pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE))
{
MLME_SCAN_REQ_STRUCT ScanReq;
if ((pAd->StaCfg.LastScanTime + 10 * OS_HZ) < pAd->Mlme.Now32)
{
DBGPRINT(RT_DEBUG_TRACE, ("STAMlmePeriodicExec():CNTL - ScanTab.BssNr==0, start a new ACTIVE scan SSID[%s]\n", pAd->MlmeAux.AutoReconnectSsid));
ScanParmFill(pAd, &ScanReq, pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen, BSS_ANY, SCAN_ACTIVE);
MlmeEnqueue(pAd, SYNC_STATE_MACHINE, MT2_MLME_SCAN_REQ, sizeof(MLME_SCAN_REQ_STRUCT), &ScanReq);
pAd->Mlme.CntlMachine.CurrState = CNTL_WAIT_OID_LIST_SCAN;
// Reset Missed scan number
pAd->StaCfg.LastScanTime = pAd->Mlme.Now32;
}
else if (pAd->StaCfg.BssType == BSS_ADHOC) // Quit the forever scan when in a very clean room
MlmeAutoReconnectLastSSID(pAd);
}
else if (pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE)
{
if ((pAd->Mlme.OneSecPeriodicRound % 7) == 0)
{
MlmeAutoScan(pAd);
pAd->StaCfg.LastScanTime = pAd->Mlme.Now32;
}
else
{
MlmeAutoReconnectLastSSID(pAd);
}
}
}
}
SKIP_AUTO_SCAN_CONN:
if ((pAd->MacTab.Content[BSSID_WCID].TXBAbitmap !=0) && (pAd->MacTab.fAnyBASession == FALSE))
{
pAd->MacTab.fAnyBASession = TRUE;
AsicUpdateProtect(pAd, HT_FORCERTSCTS, ALLN_SETPROTECT, FALSE, FALSE);
}
else if ((pAd->MacTab.Content[BSSID_WCID].TXBAbitmap ==0) && (pAd->MacTab.fAnyBASession == TRUE))
{
pAd->MacTab.fAnyBASession = FALSE;
AsicUpdateProtect(pAd, pAd->MlmeAux.AddHtInfo.AddHtInfo2.OperaionMode, ALLN_SETPROTECT, FALSE, FALSE);
}
return;
}
// Link down report
VOID LinkDownExec(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
RTMP_ADAPTER *pAd = (RTMP_ADAPTER *)FunctionContext;
pAd->IndicateMediaState = NdisMediaStateDisconnected;
RTMP_IndicateMediaState(pAd);
pAd->ExtraInfo = GENERAL_LINK_DOWN;
}
// IRQL = DISPATCH_LEVEL
VOID MlmeAutoScan(
IN PRTMP_ADAPTER pAd)
{
// check CntlMachine.CurrState to avoid collision with NDIS SetOID request
if (pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE)
{
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - Driver auto scan\n"));
MlmeEnqueue(pAd,
MLME_CNTL_STATE_MACHINE,
OID_802_11_BSSID_LIST_SCAN,
0,
NULL);
RT28XX_MLME_HANDLER(pAd);
}
}
// IRQL = DISPATCH_LEVEL
VOID MlmeAutoReconnectLastSSID(
IN PRTMP_ADAPTER pAd)
{
// check CntlMachine.CurrState to avoid collision with NDIS SetOID request
if ((pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE) &&
(MlmeValidateSSID(pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen) == TRUE))
{
NDIS_802_11_SSID OidSsid;
OidSsid.SsidLength = pAd->MlmeAux.AutoReconnectSsidLen;
NdisMoveMemory(OidSsid.Ssid, pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen);
DBGPRINT(RT_DEBUG_TRACE, ("Driver auto reconnect to last OID_802_11_SSID setting - %s, len - %d\n", pAd->MlmeAux.AutoReconnectSsid, pAd->MlmeAux.AutoReconnectSsidLen));
MlmeEnqueue(pAd,
MLME_CNTL_STATE_MACHINE,
OID_802_11_SSID,
sizeof(NDIS_802_11_SSID),
&OidSsid);
RT28XX_MLME_HANDLER(pAd);
}
}
/*
==========================================================================
Validate SSID for connection try and rescan purpose
Valid SSID will have visible chars only.
The valid length is from 0 to 32.
IRQL = DISPATCH_LEVEL
==========================================================================
*/
BOOLEAN MlmeValidateSSID(
IN PUCHAR pSsid,
IN UCHAR SsidLen)
{
int index;
if (SsidLen > MAX_LEN_OF_SSID)
return (FALSE);
// Check each character value
for (index = 0; index < SsidLen; index++)
{
if (pSsid[index] < 0x20)
return (FALSE);
}
// All checked
return (TRUE);
}
VOID MlmeSelectTxRateTable(
IN PRTMP_ADAPTER pAd,
IN PMAC_TABLE_ENTRY pEntry,
IN PUCHAR *ppTable,
IN PUCHAR pTableSize,
IN PUCHAR pInitTxRateIdx)
{
do
{
// decide the rate table for tuning
if (pAd->CommonCfg.TxRateTableSize > 0)
{
*ppTable = RateSwitchTable;
*pTableSize = RateSwitchTable[0];
*pInitTxRateIdx = RateSwitchTable[1];
break;
}
if ((pAd->OpMode == OPMODE_STA) && ADHOC_ON(pAd))
{
if ((pAd->CommonCfg.PhyMode >= PHY_11ABGN_MIXED) &&
#ifdef RT2860
!pAd->StaCfg.AdhocBOnlyJoined &&
!pAd->StaCfg.AdhocBGJoined &&
(pAd->StaActive.SupportedPhyInfo.MCSSet[0] == 0xff) &&
((pAd->StaActive.SupportedPhyInfo.MCSSet[1] == 0x00) || (pAd->Antenna.field.TxPath == 1)))
#endif
#ifdef RT2870
(pEntry->HTCapability.MCSSet[0] == 0xff) &&
((pEntry->HTCapability.MCSSet[1] == 0x00) || (pAd->Antenna.field.TxPath == 1)))
#endif
{// 11N 1S Adhoc
*ppTable = RateSwitchTable11N1S;
*pTableSize = RateSwitchTable11N1S[0];
*pInitTxRateIdx = RateSwitchTable11N1S[1];
}
else if ((pAd->CommonCfg.PhyMode >= PHY_11ABGN_MIXED) &&
#ifdef RT2860
!pAd->StaCfg.AdhocBOnlyJoined &&
!pAd->StaCfg.AdhocBGJoined &&
(pAd->StaActive.SupportedPhyInfo.MCSSet[0] == 0xff) &&
(pAd->StaActive.SupportedPhyInfo.MCSSet[1] == 0xff) &&
#endif
#ifdef RT2870
(pEntry->HTCapability.MCSSet[0] == 0xff) &&
(pEntry->HTCapability.MCSSet[1] == 0xff) &&
#endif
(pAd->Antenna.field.TxPath == 2))
{// 11N 2S Adhoc
if (pAd->LatchRfRegs.Channel <= 14)
{
*ppTable = RateSwitchTable11N2S;
*pTableSize = RateSwitchTable11N2S[0];
*pInitTxRateIdx = RateSwitchTable11N2S[1];
}
else
{
*ppTable = RateSwitchTable11N2SForABand;
*pTableSize = RateSwitchTable11N2SForABand[0];
*pInitTxRateIdx = RateSwitchTable11N2SForABand[1];
}
}
else
#ifdef RT2860
if (pAd->CommonCfg.PhyMode == PHY_11B)
{
*ppTable = RateSwitchTable11B;
*pTableSize = RateSwitchTable11B[0];
*pInitTxRateIdx = RateSwitchTable11B[1];
}
else if((pAd->LatchRfRegs.Channel <= 14) && (pAd->StaCfg.AdhocBOnlyJoined == TRUE))
#endif
#ifdef RT2870
if ((pEntry->RateLen == 4)
&& (pEntry->HTCapability.MCSSet[0] == 0) && (pEntry->HTCapability.MCSSet[1] == 0)
)
#endif
{
// USe B Table when Only b-only Station in my IBSS .
*ppTable = RateSwitchTable11B;
*pTableSize = RateSwitchTable11B[0];
*pInitTxRateIdx = RateSwitchTable11B[1];
}
else if (pAd->LatchRfRegs.Channel <= 14)
{
*ppTable = RateSwitchTable11BG;
*pTableSize = RateSwitchTable11BG[0];
*pInitTxRateIdx = RateSwitchTable11BG[1];
}
else
{
*ppTable = RateSwitchTable11G;
*pTableSize = RateSwitchTable11G[0];
*pInitTxRateIdx = RateSwitchTable11G[1];
}
break;
}
if ((pEntry->RateLen == 12) && (pEntry->HTCapability.MCSSet[0] == 0xff) &&
((pEntry->HTCapability.MCSSet[1] == 0x00) || (pAd->CommonCfg.TxStream == 1)))
{// 11BGN 1S AP
*ppTable = RateSwitchTable11BGN1S;
*pTableSize = RateSwitchTable11BGN1S[0];
*pInitTxRateIdx = RateSwitchTable11BGN1S[1];
break;
}
if ((pEntry->RateLen == 12) && (pEntry->HTCapability.MCSSet[0] == 0xff) &&
(pEntry->HTCapability.MCSSet[1] == 0xff) && (pAd->CommonCfg.TxStream == 2))
{// 11BGN 2S AP
if (pAd->LatchRfRegs.Channel <= 14)
{
*ppTable = RateSwitchTable11BGN2S;
*pTableSize = RateSwitchTable11BGN2S[0];
*pInitTxRateIdx = RateSwitchTable11BGN2S[1];
}
else
{
*ppTable = RateSwitchTable11BGN2SForABand;
*pTableSize = RateSwitchTable11BGN2SForABand[0];
*pInitTxRateIdx = RateSwitchTable11BGN2SForABand[1];
}
break;
}
if ((pEntry->HTCapability.MCSSet[0] == 0xff) && ((pEntry->HTCapability.MCSSet[1] == 0x00) || (pAd->CommonCfg.TxStream == 1)))
{// 11N 1S AP
*ppTable = RateSwitchTable11N1S;
*pTableSize = RateSwitchTable11N1S[0];
*pInitTxRateIdx = RateSwitchTable11N1S[1];
break;
}
if ((pEntry->HTCapability.MCSSet[0] == 0xff) && (pEntry->HTCapability.MCSSet[1] == 0xff) && (pAd->CommonCfg.TxStream == 2))
{// 11N 2S AP
if (pAd->LatchRfRegs.Channel <= 14)
{
*ppTable = RateSwitchTable11N2S;
*pTableSize = RateSwitchTable11N2S[0];
*pInitTxRateIdx = RateSwitchTable11N2S[1];
}
else
{
*ppTable = RateSwitchTable11N2SForABand;
*pTableSize = RateSwitchTable11N2SForABand[0];
*pInitTxRateIdx = RateSwitchTable11N2SForABand[1];
}
break;
}
//else if ((pAd->StaActive.SupRateLen == 4) && (pAd->StaActive.ExtRateLen == 0) && (pAd->StaActive.SupportedPhyInfo.MCSSet[0] == 0) && (pAd->StaActive.SupportedPhyInfo.MCSSet[1] == 0))
if (pEntry->RateLen == 4)
{// B only AP
*ppTable = RateSwitchTable11B;
*pTableSize = RateSwitchTable11B[0];
*pInitTxRateIdx = RateSwitchTable11B[1];
break;
}
//else if ((pAd->StaActive.SupRateLen + pAd->StaActive.ExtRateLen > 8) && (pAd->StaActive.SupportedPhyInfo.MCSSet[0] == 0) && (pAd->StaActive.SupportedPhyInfo.MCSSet[1] == 0))
if ((pEntry->RateLen > 8)
&& (pEntry->HTCapability.MCSSet[0] == 0) && (pEntry->HTCapability.MCSSet[1] == 0)
)
{// B/G mixed AP
*ppTable = RateSwitchTable11BG;
*pTableSize = RateSwitchTable11BG[0];
*pInitTxRateIdx = RateSwitchTable11BG[1];
break;
}
//else if ((pAd->StaActive.SupRateLen + pAd->StaActive.ExtRateLen == 8) && (pAd->StaActive.SupportedPhyInfo.MCSSet[0] == 0) && (pAd->StaActive.SupportedPhyInfo.MCSSet[1] == 0))
if ((pEntry->RateLen == 8)
&& (pEntry->HTCapability.MCSSet[0] == 0) && (pEntry->HTCapability.MCSSet[1] == 0)
)
{// G only AP
*ppTable = RateSwitchTable11G;
*pTableSize = RateSwitchTable11G[0];
*pInitTxRateIdx = RateSwitchTable11G[1];
break;
}
{
//else if ((pAd->StaActive.SupportedPhyInfo.MCSSet[0] == 0) && (pAd->StaActive.SupportedPhyInfo.MCSSet[1] == 0))
if ((pEntry->HTCapability.MCSSet[0] == 0) && (pEntry->HTCapability.MCSSet[1] == 0))
{ // Legacy mode
if (pAd->CommonCfg.MaxTxRate <= RATE_11)
{
*ppTable = RateSwitchTable11B;
*pTableSize = RateSwitchTable11B[0];
*pInitTxRateIdx = RateSwitchTable11B[1];
}
else if ((pAd->CommonCfg.MaxTxRate > RATE_11) && (pAd->CommonCfg.MinTxRate > RATE_11))
{
*ppTable = RateSwitchTable11G;
*pTableSize = RateSwitchTable11G[0];
*pInitTxRateIdx = RateSwitchTable11G[1];
}
else
{
*ppTable = RateSwitchTable11BG;
*pTableSize = RateSwitchTable11BG[0];
*pInitTxRateIdx = RateSwitchTable11BG[1];
}
break;
}
if (pAd->LatchRfRegs.Channel <= 14)
{
if (pAd->CommonCfg.TxStream == 1)
{
*ppTable = RateSwitchTable11N1S;
*pTableSize = RateSwitchTable11N1S[0];
*pInitTxRateIdx = RateSwitchTable11N1S[1];
DBGPRINT_RAW(RT_DEBUG_ERROR,("DRS: unkown mode,default use 11N 1S AP \n"));
}
else
{
*ppTable = RateSwitchTable11N2S;
*pTableSize = RateSwitchTable11N2S[0];
*pInitTxRateIdx = RateSwitchTable11N2S[1];
DBGPRINT_RAW(RT_DEBUG_ERROR,("DRS: unkown mode,default use 11N 2S AP \n"));
}
}
else
{
if (pAd->CommonCfg.TxStream == 1)
{
*ppTable = RateSwitchTable11N1S;
*pTableSize = RateSwitchTable11N1S[0];
*pInitTxRateIdx = RateSwitchTable11N1S[1];
DBGPRINT_RAW(RT_DEBUG_ERROR,("DRS: unkown mode,default use 11N 1S AP \n"));
}
else
{
*ppTable = RateSwitchTable11N2SForABand;
*pTableSize = RateSwitchTable11N2SForABand[0];
*pInitTxRateIdx = RateSwitchTable11N2SForABand[1];
DBGPRINT_RAW(RT_DEBUG_ERROR,("DRS: unkown mode,default use 11N 2S AP \n"));
}
}
DBGPRINT_RAW(RT_DEBUG_ERROR,("DRS: unkown mode (SupRateLen=%d, ExtRateLen=%d, MCSSet[0]=0x%x, MCSSet[1]=0x%x)\n",
pAd->StaActive.SupRateLen, pAd->StaActive.ExtRateLen, pAd->StaActive.SupportedPhyInfo.MCSSet[0], pAd->StaActive.SupportedPhyInfo.MCSSet[1]));
}
} while(FALSE);
}
/*
==========================================================================
Description:
This routine checks if there're other APs out there capable for
roaming. Caller should call this routine only when Link up in INFRA mode
and channel quality is below CQI_GOOD_THRESHOLD.
IRQL = DISPATCH_LEVEL
Output:
==========================================================================
*/
VOID MlmeCheckForRoaming(
IN PRTMP_ADAPTER pAd,
IN ULONG Now32)
{
USHORT i;
BSS_TABLE *pRoamTab = &pAd->MlmeAux.RoamTab;
BSS_ENTRY *pBss;
DBGPRINT(RT_DEBUG_TRACE, ("==> MlmeCheckForRoaming\n"));
// put all roaming candidates into RoamTab, and sort in RSSI order
BssTableInit(pRoamTab);
for (i = 0; i < pAd->ScanTab.BssNr; i++)
{
pBss = &pAd->ScanTab.BssEntry[i];
if ((pBss->LastBeaconRxTime + BEACON_LOST_TIME) < Now32)
continue; // AP disappear
if (pBss->Rssi <= RSSI_THRESHOLD_FOR_ROAMING)
continue; // RSSI too weak. forget it.
if (MAC_ADDR_EQUAL(pBss->Bssid, pAd->CommonCfg.Bssid))
continue; // skip current AP
if (pBss->Rssi < (pAd->StaCfg.RssiSample.LastRssi0 + RSSI_DELTA))
continue; // only AP with stronger RSSI is eligible for roaming
// AP passing all above rules is put into roaming candidate table
NdisMoveMemory(&pRoamTab->BssEntry[pRoamTab->BssNr], pBss, sizeof(BSS_ENTRY));
pRoamTab->BssNr += 1;
}
if (pRoamTab->BssNr > 0)
{
// check CntlMachine.CurrState to avoid collision with NDIS SetOID request
if (pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE)
{
pAd->RalinkCounters.PoorCQIRoamingCount ++;
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - Roaming attempt #%ld\n", pAd->RalinkCounters.PoorCQIRoamingCount));
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_MLME_ROAMING_REQ, 0, NULL);
RT28XX_MLME_HANDLER(pAd);
}
}
DBGPRINT(RT_DEBUG_TRACE, ("<== MlmeCheckForRoaming(# of candidate= %d)\n",pRoamTab->BssNr));
}
/*
==========================================================================
Description:
This routine checks if there're other APs out there capable for
roaming. Caller should call this routine only when link up in INFRA mode
and channel quality is below CQI_GOOD_THRESHOLD.
IRQL = DISPATCH_LEVEL
Output:
==========================================================================
*/
VOID MlmeCheckForFastRoaming(
IN PRTMP_ADAPTER pAd,
IN ULONG Now)
{
USHORT i;
BSS_TABLE *pRoamTab = &pAd->MlmeAux.RoamTab;
BSS_ENTRY *pBss;
DBGPRINT(RT_DEBUG_TRACE, ("==> MlmeCheckForFastRoaming\n"));
// put all roaming candidates into RoamTab, and sort in RSSI order
BssTableInit(pRoamTab);
for (i = 0; i < pAd->ScanTab.BssNr; i++)
{
pBss = &pAd->ScanTab.BssEntry[i];
if ((pBss->Rssi <= -50) && (pBss->Channel == pAd->CommonCfg.Channel))
continue; // RSSI too weak. forget it.
if (MAC_ADDR_EQUAL(pBss->Bssid, pAd->CommonCfg.Bssid))
continue; // skip current AP
if (!SSID_EQUAL(pBss->Ssid, pBss->SsidLen, pAd->CommonCfg.Ssid, pAd->CommonCfg.SsidLen))
continue; // skip different SSID
if (pBss->Rssi < (RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.LastRssi0, pAd->StaCfg.RssiSample.LastRssi1, pAd->StaCfg.RssiSample.LastRssi2) + RSSI_DELTA))
continue; // skip AP without better RSSI
DBGPRINT(RT_DEBUG_TRACE, ("LastRssi0 = %d, pBss->Rssi = %d\n", RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.LastRssi0, pAd->StaCfg.RssiSample.LastRssi1, pAd->StaCfg.RssiSample.LastRssi2), pBss->Rssi));
// AP passing all above rules is put into roaming candidate table
NdisMoveMemory(&pRoamTab->BssEntry[pRoamTab->BssNr], pBss, sizeof(BSS_ENTRY));
pRoamTab->BssNr += 1;
}
if (pRoamTab->BssNr > 0)
{
// check CntlMachine.CurrState to avoid collision with NDIS SetOID request
if (pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE)
{
pAd->RalinkCounters.PoorCQIRoamingCount ++;
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - Roaming attempt #%ld\n", pAd->RalinkCounters.PoorCQIRoamingCount));
MlmeEnqueue(pAd, MLME_CNTL_STATE_MACHINE, MT2_MLME_ROAMING_REQ, 0, NULL);
RT28XX_MLME_HANDLER(pAd);
}
}
// Maybe site survey required
else
{
if ((pAd->StaCfg.LastScanTime + 10 * 1000) < Now)
{
// check CntlMachine.CurrState to avoid collision with NDIS SetOID request
DBGPRINT(RT_DEBUG_TRACE, ("MMCHK - Roaming, No eligable entry, try new scan!\n"));
pAd->StaCfg.ScanCnt = 2;
pAd->StaCfg.LastScanTime = Now;
MlmeAutoScan(pAd);
}
}
DBGPRINT(RT_DEBUG_TRACE, ("<== MlmeCheckForFastRoaming (BssNr=%d)\n", pRoamTab->BssNr));
}
/*
==========================================================================
Description:
This routine calculates TxPER, RxPER of the past N-sec period. And
according to the calculation result, ChannelQuality is calculated here
to decide if current AP is still doing the job.
If ChannelQuality is not good, a ROAMing attempt may be tried later.
Output:
StaCfg.ChannelQuality - 0..100
IRQL = DISPATCH_LEVEL
NOTE: This routine decide channle quality based on RX CRC error ratio.
Caller should make sure a function call to NICUpdateRawCounters(pAd)
is performed right before this routine, so that this routine can decide
channel quality based on the most up-to-date information
==========================================================================
*/
VOID MlmeCalculateChannelQuality(
IN PRTMP_ADAPTER pAd,
IN ULONG Now32)
{
ULONG TxOkCnt, TxCnt, TxPER, TxPRR;
ULONG RxCnt, RxPER;
UCHAR NorRssi;
CHAR MaxRssi;
ULONG BeaconLostTime = BEACON_LOST_TIME;
MaxRssi = RTMPMaxRssi(pAd, pAd->StaCfg.RssiSample.LastRssi0, pAd->StaCfg.RssiSample.LastRssi1, pAd->StaCfg.RssiSample.LastRssi2);
//
// calculate TX packet error ratio and TX retry ratio - if too few TX samples, skip TX related statistics
//
TxOkCnt = pAd->RalinkCounters.OneSecTxNoRetryOkCount + pAd->RalinkCounters.OneSecTxRetryOkCount;
TxCnt = TxOkCnt + pAd->RalinkCounters.OneSecTxFailCount;
if (TxCnt < 5)
{
TxPER = 0;
TxPRR = 0;
}
else
{
TxPER = (pAd->RalinkCounters.OneSecTxFailCount * 100) / TxCnt;
TxPRR = ((TxCnt - pAd->RalinkCounters.OneSecTxNoRetryOkCount) * 100) / TxCnt;
}
//
// calculate RX PER - don't take RxPER into consideration if too few sample
//
RxCnt = pAd->RalinkCounters.OneSecRxOkCnt + pAd->RalinkCounters.OneSecRxFcsErrCnt;
if (RxCnt < 5)
RxPER = 0;
else
RxPER = (pAd->RalinkCounters.OneSecRxFcsErrCnt * 100) / RxCnt;
//
// decide ChannelQuality based on: 1)last BEACON received time, 2)last RSSI, 3)TxPER, and 4)RxPER
//
if (INFRA_ON(pAd) &&
(pAd->RalinkCounters.OneSecTxNoRetryOkCount < 2) && // no heavy traffic
(pAd->StaCfg.LastBeaconRxTime + BeaconLostTime < Now32))
{
DBGPRINT(RT_DEBUG_TRACE, ("BEACON lost > %ld msec with TxOkCnt=%ld -> CQI=0\n", BeaconLostTime, TxOkCnt));
pAd->Mlme.ChannelQuality = 0;
}
else
{
// Normalize Rssi
if (MaxRssi > -40)
NorRssi = 100;
else if (MaxRssi < -90)
NorRssi = 0;
else
NorRssi = (MaxRssi + 90) * 2;
// ChannelQuality = W1*RSSI + W2*TxPRR + W3*RxPER (RSSI 0..100), (TxPER 100..0), (RxPER 100..0)
pAd->Mlme.ChannelQuality = (RSSI_WEIGHTING * NorRssi +
TX_WEIGHTING * (100 - TxPRR) +
RX_WEIGHTING* (100 - RxPER)) / 100;
if (pAd->Mlme.ChannelQuality >= 100)
pAd->Mlme.ChannelQuality = 100;
}
}
VOID MlmeSetTxRate(
IN PRTMP_ADAPTER pAd,
IN PMAC_TABLE_ENTRY pEntry,
IN PRTMP_TX_RATE_SWITCH pTxRate)
{
UCHAR MaxMode = MODE_OFDM;
MaxMode = MODE_HTGREENFIELD;
if (pTxRate->STBC && (pAd->StaCfg.MaxHTPhyMode.field.STBC) && (pAd->Antenna.field.TxPath == 2))
pAd->StaCfg.HTPhyMode.field.STBC = STBC_USE;
else
pAd->StaCfg.HTPhyMode.field.STBC = STBC_NONE;
if (pTxRate->CurrMCS < MCS_AUTO)
pAd->StaCfg.HTPhyMode.field.MCS = pTxRate->CurrMCS;
if (pAd->StaCfg.HTPhyMode.field.MCS > 7)
pAd->StaCfg.HTPhyMode.field.STBC = STBC_NONE;
if (ADHOC_ON(pAd))
{
// If peer adhoc is b-only mode, we can't send 11g rate.
pAd->StaCfg.HTPhyMode.field.ShortGI = GI_800;
pEntry->HTPhyMode.field.STBC = STBC_NONE;
//
// For Adhoc MODE_CCK, driver will use AdhocBOnlyJoined flag to roll back to B only if necessary
//
pEntry->HTPhyMode.field.MODE = pTxRate->Mode;
pEntry->HTPhyMode.field.ShortGI = pAd->StaCfg.HTPhyMode.field.ShortGI;
pEntry->HTPhyMode.field.MCS = pAd->StaCfg.HTPhyMode.field.MCS;
// Patch speed error in status page
pAd->StaCfg.HTPhyMode.field.MODE = pEntry->HTPhyMode.field.MODE;
}
else
{
if (pTxRate->Mode <= MaxMode)
pAd->StaCfg.HTPhyMode.field.MODE = pTxRate->Mode;
if (pTxRate->ShortGI && (pAd->StaCfg.MaxHTPhyMode.field.ShortGI))
pAd->StaCfg.HTPhyMode.field.ShortGI = GI_400;
else
pAd->StaCfg.HTPhyMode.field.ShortGI = GI_800;
// Reexam each bandwidth's SGI support.
if (pAd->StaCfg.HTPhyMode.field.ShortGI == GI_400)
{
if ((pEntry->HTPhyMode.field.BW == BW_20) && (!CLIENT_STATUS_TEST_FLAG(pEntry, fCLIENT_STATUS_SGI20_CAPABLE)))
pAd->StaCfg.HTPhyMode.field.ShortGI = GI_800;
if ((pEntry->HTPhyMode.field.BW == BW_40) && (!CLIENT_STATUS_TEST_FLAG(pEntry, fCLIENT_STATUS_SGI40_CAPABLE)))
pAd->StaCfg.HTPhyMode.field.ShortGI = GI_800;
}
// Turn RTS/CTS rate to 6Mbps.
if ((pEntry->HTPhyMode.field.MCS == 0) && (pAd->StaCfg.HTPhyMode.field.MCS != 0))
{
pEntry->HTPhyMode.field.MCS = pAd->StaCfg.HTPhyMode.field.MCS;
if (pAd->MacTab.fAnyBASession)
{
AsicUpdateProtect(pAd, HT_FORCERTSCTS, ALLN_SETPROTECT, TRUE, (BOOLEAN)pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent);
}
else
{
AsicUpdateProtect(pAd, pAd->MlmeAux.AddHtInfo.AddHtInfo2.OperaionMode, ALLN_SETPROTECT, TRUE, (BOOLEAN)pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent);
}
}
else if ((pEntry->HTPhyMode.field.MCS == 8) && (pAd->StaCfg.HTPhyMode.field.MCS != 8))
{
pEntry->HTPhyMode.field.MCS = pAd->StaCfg.HTPhyMode.field.MCS;
if (pAd->MacTab.fAnyBASession)
{
AsicUpdateProtect(pAd, HT_FORCERTSCTS, ALLN_SETPROTECT, TRUE, (BOOLEAN)pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent);
}
else
{
AsicUpdateProtect(pAd, pAd->MlmeAux.AddHtInfo.AddHtInfo2.OperaionMode, ALLN_SETPROTECT, TRUE, (BOOLEAN)pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent);
}
}
else if ((pEntry->HTPhyMode.field.MCS != 0) && (pAd->StaCfg.HTPhyMode.field.MCS == 0))
{
AsicUpdateProtect(pAd, HT_RTSCTS_6M, ALLN_SETPROTECT, TRUE, (BOOLEAN)pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent);
}
else if ((pEntry->HTPhyMode.field.MCS != 8) && (pAd->StaCfg.HTPhyMode.field.MCS == 8))
{
AsicUpdateProtect(pAd, HT_RTSCTS_6M, ALLN_SETPROTECT, TRUE, (BOOLEAN)pAd->MlmeAux.AddHtInfo.AddHtInfo2.NonGfPresent);
}
pEntry->HTPhyMode.field.STBC = pAd->StaCfg.HTPhyMode.field.STBC;
pEntry->HTPhyMode.field.ShortGI = pAd->StaCfg.HTPhyMode.field.ShortGI;
pEntry->HTPhyMode.field.MCS = pAd->StaCfg.HTPhyMode.field.MCS;
pEntry->HTPhyMode.field.MODE = pAd->StaCfg.HTPhyMode.field.MODE;
if ((pAd->StaCfg.MaxHTPhyMode.field.MODE == MODE_HTGREENFIELD) &&
pAd->WIFItestbed.bGreenField)
pEntry->HTPhyMode.field.MODE = MODE_HTGREENFIELD;
}
pAd->LastTxRate = (USHORT)(pEntry->HTPhyMode.word);
}
/*
==========================================================================
Description:
This routine calculates the acumulated TxPER of eaxh TxRate. And
according to the calculation result, change CommonCfg.TxRate which
is the stable TX Rate we expect the Radio situation could sustained.
CommonCfg.TxRate will change dynamically within {RATE_1/RATE_6, MaxTxRate}
Output:
CommonCfg.TxRate -
IRQL = DISPATCH_LEVEL
NOTE:
call this routine every second
==========================================================================
*/
VOID MlmeDynamicTxRateSwitching(
IN PRTMP_ADAPTER pAd)
{
UCHAR UpRateIdx = 0, DownRateIdx = 0, CurrRateIdx;
ULONG i, AccuTxTotalCnt = 0, TxTotalCnt;
ULONG TxErrorRatio = 0;
BOOLEAN bTxRateChanged, bUpgradeQuality = FALSE;
PRTMP_TX_RATE_SWITCH pCurrTxRate, pNextTxRate = NULL;
PUCHAR pTable;
UCHAR TableSize = 0;
UCHAR InitTxRateIdx = 0, TrainUp, TrainDown;
CHAR Rssi, RssiOffset = 0;
TX_STA_CNT1_STRUC StaTx1;
TX_STA_CNT0_STRUC TxStaCnt0;
ULONG TxRetransmit = 0, TxSuccess = 0, TxFailCount = 0;
MAC_TABLE_ENTRY *pEntry;
//
// walk through MAC table, see if need to change AP's TX rate toward each entry
//
for (i = 1; i < MAX_LEN_OF_MAC_TABLE; i++)
{
pEntry = &pAd->MacTab.Content[i];
// check if this entry need to switch rate automatically
if (RTMPCheckEntryEnableAutoRateSwitch(pAd, pEntry) == FALSE)
continue;
if ((pAd->MacTab.Size == 1) || (pEntry->ValidAsDls))
{
#ifdef RT2860
Rssi = RTMPMaxRssi(pAd, (CHAR)pAd->StaCfg.RssiSample.AvgRssi0, (CHAR)pAd->StaCfg.RssiSample.AvgRssi1, (CHAR)pAd->StaCfg.RssiSample.AvgRssi2);
#endif
#ifdef RT2870
Rssi = RTMPMaxRssi(pAd,
pAd->StaCfg.RssiSample.AvgRssi0,
pAd->StaCfg.RssiSample.AvgRssi1,
pAd->StaCfg.RssiSample.AvgRssi2);
#endif
// Update statistic counter
RTMP_IO_READ32(pAd, TX_STA_CNT0, &TxStaCnt0.word);
RTMP_IO_READ32(pAd, TX_STA_CNT1, &StaTx1.word);
pAd->bUpdateBcnCntDone = TRUE;
TxRetransmit = StaTx1.field.TxRetransmit;
TxSuccess = StaTx1.field.TxSuccess;
TxFailCount = TxStaCnt0.field.TxFailCount;
TxTotalCnt = TxRetransmit + TxSuccess + TxFailCount;
pAd->RalinkCounters.OneSecTxRetryOkCount += StaTx1.field.TxRetransmit;
pAd->RalinkCounters.OneSecTxNoRetryOkCount += StaTx1.field.TxSuccess;
pAd->RalinkCounters.OneSecTxFailCount += TxStaCnt0.field.TxFailCount;
pAd->WlanCounters.TransmittedFragmentCount.u.LowPart += StaTx1.field.TxSuccess;
pAd->WlanCounters.RetryCount.u.LowPart += StaTx1.field.TxRetransmit;
pAd->WlanCounters.FailedCount.u.LowPart += TxStaCnt0.field.TxFailCount;
// if no traffic in the past 1-sec period, don't change TX rate,
// but clear all bad history. because the bad history may affect the next
// Chariot throughput test
AccuTxTotalCnt = pAd->RalinkCounters.OneSecTxNoRetryOkCount +
pAd->RalinkCounters.OneSecTxRetryOkCount +
pAd->RalinkCounters.OneSecTxFailCount;
if (TxTotalCnt)
TxErrorRatio = ((TxRetransmit + TxFailCount) * 100) / TxTotalCnt;
}
else
{
#ifdef RT2860
Rssi = RTMPMaxRssi(pAd, (CHAR)pEntry->RssiSample.AvgRssi0, (CHAR)pEntry->RssiSample.AvgRssi1, (CHAR)pEntry->RssiSample.AvgRssi2);
#endif
#ifdef RT2870
if (INFRA_ON(pAd) && (i == 1))
Rssi = RTMPMaxRssi(pAd,
pAd->StaCfg.RssiSample.AvgRssi0,
pAd->StaCfg.RssiSample.AvgRssi1,
pAd->StaCfg.RssiSample.AvgRssi2);
else
Rssi = RTMPMaxRssi(pAd,
pEntry->RssiSample.AvgRssi0,
pEntry->RssiSample.AvgRssi1,
pEntry->RssiSample.AvgRssi2);
#endif
TxTotalCnt = pEntry->OneSecTxNoRetryOkCount +
pEntry->OneSecTxRetryOkCount +
pEntry->OneSecTxFailCount;
if (TxTotalCnt)
TxErrorRatio = ((pEntry->OneSecTxRetryOkCount + pEntry->OneSecTxFailCount) * 100) / TxTotalCnt;
}
CurrRateIdx = pEntry->CurrTxRateIndex;
MlmeSelectTxRateTable(pAd, pEntry, &pTable, &TableSize, &InitTxRateIdx);
if (CurrRateIdx >= TableSize)
{
CurrRateIdx = TableSize - 1;
}
// When switch from Fixed rate -> auto rate, the REAL TX rate might be different from pAd->CommonCfg.TxRateIndex.
// So need to sync here.
pCurrTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(CurrRateIdx+1)*5];
if ((pEntry->HTPhyMode.field.MCS != pCurrTxRate->CurrMCS)
//&& (pAd->StaCfg.bAutoTxRateSwitch == TRUE)
)
{
// Need to sync Real Tx rate and our record.
// Then return for next DRS.
pCurrTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(InitTxRateIdx+1)*5];
pEntry->CurrTxRateIndex = InitTxRateIdx;
MlmeSetTxRate(pAd, pEntry, pCurrTxRate);
// reset all OneSecTx counters
RESET_ONE_SEC_TX_CNT(pEntry);
continue;
}
// decide the next upgrade rate and downgrade rate, if any
if ((CurrRateIdx > 0) && (CurrRateIdx < (TableSize - 1)))
{
UpRateIdx = CurrRateIdx + 1;
DownRateIdx = CurrRateIdx -1;
}
else if (CurrRateIdx == 0)
{
UpRateIdx = CurrRateIdx + 1;
DownRateIdx = CurrRateIdx;
}
else if (CurrRateIdx == (TableSize - 1))
{
UpRateIdx = CurrRateIdx;
DownRateIdx = CurrRateIdx - 1;
}
pCurrTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(CurrRateIdx+1)*5];
if ((Rssi > -65) && (pCurrTxRate->Mode >= MODE_HTMIX))
{
TrainUp = (pCurrTxRate->TrainUp + (pCurrTxRate->TrainUp >> 1));
TrainDown = (pCurrTxRate->TrainDown + (pCurrTxRate->TrainDown >> 1));
}
else
{
TrainUp = pCurrTxRate->TrainUp;
TrainDown = pCurrTxRate->TrainDown;
}
//pAd->DrsCounters.LastTimeTxRateChangeAction = pAd->DrsCounters.LastSecTxRateChangeAction;
//
// Keep the last time TxRateChangeAction status.
//
pEntry->LastTimeTxRateChangeAction = pEntry->LastSecTxRateChangeAction;
//
// CASE 1. when TX samples are fewer than 15, then decide TX rate solely on RSSI
// (criteria copied from RT2500 for Netopia case)
//
if (TxTotalCnt <= 15)
{
CHAR idx = 0;
UCHAR TxRateIdx;
//UCHAR MCS0 = 0, MCS1 = 0, MCS2 = 0, MCS3 = 0, MCS4 = 0, MCS7 = 0, MCS12 = 0, MCS13 = 0, MCS14 = 0, MCS15 = 0;
UCHAR MCS0 = 0, MCS1 = 0, MCS2 = 0, MCS3 = 0, MCS4 = 0, MCS5 =0, MCS6 = 0, MCS7 = 0;
UCHAR MCS12 = 0, MCS13 = 0, MCS14 = 0, MCS15 = 0;
UCHAR MCS20 = 0, MCS21 = 0, MCS22 = 0, MCS23 = 0; // 3*3
// check the existence and index of each needed MCS
while (idx < pTable[0])
{
pCurrTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(idx+1)*5];
if (pCurrTxRate->CurrMCS == MCS_0)
{
MCS0 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_1)
{
MCS1 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_2)
{
MCS2 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_3)
{
MCS3 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_4)
{
MCS4 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_5)
{
MCS5 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_6)
{
MCS6 = idx;
}
//else if (pCurrTxRate->CurrMCS == MCS_7)
else if ((pCurrTxRate->CurrMCS == MCS_7) && (pCurrTxRate->ShortGI == GI_800)) // prevent the highest MCS using short GI when 1T and low throughput
{
MCS7 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_12)
{
MCS12 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_13)
{
MCS13 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_14)
{
MCS14 = idx;
}
else if ((pCurrTxRate->CurrMCS == MCS_15) && (pCurrTxRate->ShortGI == GI_800)) //we hope to use ShortGI as initial rate, however Atheros's chip has bugs when short GI
{
MCS15 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_20) // 3*3
{
MCS20 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_21)
{
MCS21 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_22)
{
MCS22 = idx;
}
else if (pCurrTxRate->CurrMCS == MCS_23)
{
MCS23 = idx;
}
idx ++;
}
if (pAd->LatchRfRegs.Channel <= 14)
{
if (pAd->NicConfig2.field.ExternalLNAForG)
{
RssiOffset = 2;
}
else
{
RssiOffset = 5;
}
}
else
{
if (pAd->NicConfig2.field.ExternalLNAForA)
{
RssiOffset = 5;
}
else
{
RssiOffset = 8;
}
}
/*if (MCS15)*/
if ((pTable == RateSwitchTable11BGN3S) ||
(pTable == RateSwitchTable11N3S) ||
(pTable == RateSwitchTable))
{// N mode with 3 stream // 3*3
if (MCS23 && (Rssi >= -70))
TxRateIdx = MCS15;
else if (MCS22 && (Rssi >= -72))
TxRateIdx = MCS14;
else if (MCS21 && (Rssi >= -76))
TxRateIdx = MCS13;
else if (MCS20 && (Rssi >= -78))
TxRateIdx = MCS12;
else if (MCS4 && (Rssi >= -82))
TxRateIdx = MCS4;
else if (MCS3 && (Rssi >= -84))
TxRateIdx = MCS3;
else if (MCS2 && (Rssi >= -86))
TxRateIdx = MCS2;
else if (MCS1 && (Rssi >= -88))
TxRateIdx = MCS1;
else
TxRateIdx = MCS0;
}
else if ((pTable == RateSwitchTable11BGN2S) || (pTable == RateSwitchTable11BGN2SForABand) ||(pTable == RateSwitchTable11N2S) ||(pTable == RateSwitchTable11N2SForABand)) // 3*3
{// N mode with 2 stream
if (MCS15 && (Rssi >= (-70+RssiOffset)))
TxRateIdx = MCS15;
else if (MCS14 && (Rssi >= (-72+RssiOffset)))
TxRateIdx = MCS14;
else if (MCS13 && (Rssi >= (-76+RssiOffset)))
TxRateIdx = MCS13;
else if (MCS12 && (Rssi >= (-78+RssiOffset)))
TxRateIdx = MCS12;
else if (MCS4 && (Rssi >= (-82+RssiOffset)))
TxRateIdx = MCS4;
else if (MCS3 && (Rssi >= (-84+RssiOffset)))
TxRateIdx = MCS3;
else if (MCS2 && (Rssi >= (-86+RssiOffset)))
TxRateIdx = MCS2;
else if (MCS1 && (Rssi >= (-88+RssiOffset)))
TxRateIdx = MCS1;
else
TxRateIdx = MCS0;
}
else if ((pTable == RateSwitchTable11BGN1S) || (pTable == RateSwitchTable11N1S))
{// N mode with 1 stream
if (MCS7 && (Rssi > (-72+RssiOffset)))
TxRateIdx = MCS7;
else if (MCS6 && (Rssi > (-74+RssiOffset)))
TxRateIdx = MCS6;
else if (MCS5 && (Rssi > (-77+RssiOffset)))
TxRateIdx = MCS5;
else if (MCS4 && (Rssi > (-79+RssiOffset)))
TxRateIdx = MCS4;
else if (MCS3 && (Rssi > (-81+RssiOffset)))
TxRateIdx = MCS3;
else if (MCS2 && (Rssi > (-83+RssiOffset)))
TxRateIdx = MCS2;
else if (MCS1 && (Rssi > (-86+RssiOffset)))
TxRateIdx = MCS1;
else
TxRateIdx = MCS0;
}
else
{// Legacy mode
if (MCS7 && (Rssi > -70))
TxRateIdx = MCS7;
else if (MCS6 && (Rssi > -74))
TxRateIdx = MCS6;
else if (MCS5 && (Rssi > -78))
TxRateIdx = MCS5;
else if (MCS4 && (Rssi > -82))
TxRateIdx = MCS4;
else if (MCS4 == 0) // for B-only mode
TxRateIdx = MCS3;
else if (MCS3 && (Rssi > -85))
TxRateIdx = MCS3;
else if (MCS2 && (Rssi > -87))
TxRateIdx = MCS2;
else if (MCS1 && (Rssi > -90))
TxRateIdx = MCS1;
else
TxRateIdx = MCS0;
}
{
pEntry->CurrTxRateIndex = TxRateIdx;
pNextTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(pEntry->CurrTxRateIndex+1)*5];
MlmeSetTxRate(pAd, pEntry, pNextTxRate);
}
NdisZeroMemory(pEntry->TxQuality, sizeof(USHORT) * MAX_STEP_OF_TX_RATE_SWITCH);
NdisZeroMemory(pEntry->PER, sizeof(UCHAR) * MAX_STEP_OF_TX_RATE_SWITCH);
pEntry->fLastSecAccordingRSSI = TRUE;
// reset all OneSecTx counters
RESET_ONE_SEC_TX_CNT(pEntry);
continue;
}
if (pEntry->fLastSecAccordingRSSI == TRUE)
{
pEntry->fLastSecAccordingRSSI = FALSE;
pEntry->LastSecTxRateChangeAction = 0;
// reset all OneSecTx counters
RESET_ONE_SEC_TX_CNT(pEntry);
continue;
}
do
{
BOOLEAN bTrainUpDown = FALSE;
pEntry->CurrTxRateStableTime ++;
// downgrade TX quality if PER >= Rate-Down threshold
if (TxErrorRatio >= TrainDown)
{
bTrainUpDown = TRUE;
pEntry->TxQuality[CurrRateIdx] = DRS_TX_QUALITY_WORST_BOUND;
}
// upgrade TX quality if PER <= Rate-Up threshold
else if (TxErrorRatio <= TrainUp)
{
bTrainUpDown = TRUE;
bUpgradeQuality = TRUE;
if (pEntry->TxQuality[CurrRateIdx])
pEntry->TxQuality[CurrRateIdx] --; // quality very good in CurrRate
if (pEntry->TxRateUpPenalty)
pEntry->TxRateUpPenalty --;
else if (pEntry->TxQuality[UpRateIdx])
pEntry->TxQuality[UpRateIdx] --; // may improve next UP rate's quality
}
pEntry->PER[CurrRateIdx] = (UCHAR)TxErrorRatio;
if (bTrainUpDown)
{
// perform DRS - consider TxRate Down first, then rate up.
if ((CurrRateIdx != DownRateIdx) && (pEntry->TxQuality[CurrRateIdx] >= DRS_TX_QUALITY_WORST_BOUND))
{
pEntry->CurrTxRateIndex = DownRateIdx;
}
else if ((CurrRateIdx != UpRateIdx) && (pEntry->TxQuality[UpRateIdx] <= 0))
{
pEntry->CurrTxRateIndex = UpRateIdx;
}
}
} while (FALSE);
// if rate-up happen, clear all bad history of all TX rates
if (pEntry->CurrTxRateIndex > CurrRateIdx)
{
pEntry->CurrTxRateStableTime = 0;
pEntry->TxRateUpPenalty = 0;
pEntry->LastSecTxRateChangeAction = 1; // rate UP
NdisZeroMemory(pEntry->TxQuality, sizeof(USHORT) * MAX_STEP_OF_TX_RATE_SWITCH);
NdisZeroMemory(pEntry->PER, sizeof(UCHAR) * MAX_STEP_OF_TX_RATE_SWITCH);
//
// For TxRate fast train up
//
if (!pAd->StaCfg.StaQuickResponeForRateUpTimerRunning)
{
RTMPSetTimer(&pAd->StaCfg.StaQuickResponeForRateUpTimer, 100);
pAd->StaCfg.StaQuickResponeForRateUpTimerRunning = TRUE;
}
bTxRateChanged = TRUE;
}
// if rate-down happen, only clear DownRate's bad history
else if (pEntry->CurrTxRateIndex < CurrRateIdx)
{
pEntry->CurrTxRateStableTime = 0;
pEntry->TxRateUpPenalty = 0; // no penalty
pEntry->LastSecTxRateChangeAction = 2; // rate DOWN
pEntry->TxQuality[pEntry->CurrTxRateIndex] = 0;
pEntry->PER[pEntry->CurrTxRateIndex] = 0;
//
// For TxRate fast train down
//
if (!pAd->StaCfg.StaQuickResponeForRateUpTimerRunning)
{
RTMPSetTimer(&pAd->StaCfg.StaQuickResponeForRateUpTimer, 100);
pAd->StaCfg.StaQuickResponeForRateUpTimerRunning = TRUE;
}
bTxRateChanged = TRUE;
}
else
{
pEntry->LastSecTxRateChangeAction = 0; // rate no change
bTxRateChanged = FALSE;
}
pEntry->LastTxOkCount = TxSuccess;
// reset all OneSecTx counters
RESET_ONE_SEC_TX_CNT(pEntry);
pNextTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(pEntry->CurrTxRateIndex+1)*5];
if (bTxRateChanged && pNextTxRate)
{
MlmeSetTxRate(pAd, pEntry, pNextTxRate);
}
}
}
/*
========================================================================
Routine Description:
Station side, Auto TxRate faster train up timer call back function.
Arguments:
SystemSpecific1 - Not used.
FunctionContext - Pointer to our Adapter context.
SystemSpecific2 - Not used.
SystemSpecific3 - Not used.
Return Value:
None
========================================================================
*/
VOID StaQuickResponeForRateUpExec(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
PRTMP_ADAPTER pAd = (PRTMP_ADAPTER)FunctionContext;
UCHAR UpRateIdx = 0, DownRateIdx = 0, CurrRateIdx = 0;
ULONG TxTotalCnt;
ULONG TxErrorRatio = 0;
#ifdef RT2860
BOOLEAN bTxRateChanged = TRUE; //, bUpgradeQuality = FALSE;
#endif
#ifdef RT2870
BOOLEAN bTxRateChanged; //, bUpgradeQuality = FALSE;
#endif
PRTMP_TX_RATE_SWITCH pCurrTxRate, pNextTxRate = NULL;
PUCHAR pTable;
UCHAR TableSize = 0;
UCHAR InitTxRateIdx = 0, TrainUp, TrainDown;
TX_STA_CNT1_STRUC StaTx1;
TX_STA_CNT0_STRUC TxStaCnt0;
CHAR Rssi, ratio;
ULONG TxRetransmit = 0, TxSuccess = 0, TxFailCount = 0;
MAC_TABLE_ENTRY *pEntry;
ULONG i;
pAd->StaCfg.StaQuickResponeForRateUpTimerRunning = FALSE;
//
// walk through MAC table, see if need to change AP's TX rate toward each entry
//
for (i = 1; i < MAX_LEN_OF_MAC_TABLE; i++)
{
pEntry = &pAd->MacTab.Content[i];
// check if this entry need to switch rate automatically
if (RTMPCheckEntryEnableAutoRateSwitch(pAd, pEntry) == FALSE)
continue;
#ifdef RT2860
//Rssi = RTMPMaxRssi(pAd, (CHAR)pAd->StaCfg.AvgRssi0, (CHAR)pAd->StaCfg.AvgRssi1, (CHAR)pAd->StaCfg.AvgRssi2);
if (pAd->Antenna.field.TxPath > 1)
Rssi = (pAd->StaCfg.RssiSample.AvgRssi0 + pAd->StaCfg.RssiSample.AvgRssi1) >> 1;
else
Rssi = pAd->StaCfg.RssiSample.AvgRssi0;
#endif
#ifdef RT2870
if (INFRA_ON(pAd) && (i == 1))
Rssi = RTMPMaxRssi(pAd,
pAd->StaCfg.RssiSample.AvgRssi0,
pAd->StaCfg.RssiSample.AvgRssi1,
pAd->StaCfg.RssiSample.AvgRssi2);
else
Rssi = RTMPMaxRssi(pAd,
pEntry->RssiSample.AvgRssi0,
pEntry->RssiSample.AvgRssi1,
pEntry->RssiSample.AvgRssi2);
#endif
CurrRateIdx = pAd->CommonCfg.TxRateIndex;
MlmeSelectTxRateTable(pAd, pEntry, &pTable, &TableSize, &InitTxRateIdx);
// decide the next upgrade rate and downgrade rate, if any
if ((CurrRateIdx > 0) && (CurrRateIdx < (TableSize - 1)))
{
UpRateIdx = CurrRateIdx + 1;
DownRateIdx = CurrRateIdx -1;
}
else if (CurrRateIdx == 0)
{
UpRateIdx = CurrRateIdx + 1;
DownRateIdx = CurrRateIdx;
}
else if (CurrRateIdx == (TableSize - 1))
{
UpRateIdx = CurrRateIdx;
DownRateIdx = CurrRateIdx - 1;
}
pCurrTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(CurrRateIdx+1)*5];
if ((Rssi > -65) && (pCurrTxRate->Mode >= MODE_HTMIX))
{
TrainUp = (pCurrTxRate->TrainUp + (pCurrTxRate->TrainUp >> 1));
TrainDown = (pCurrTxRate->TrainDown + (pCurrTxRate->TrainDown >> 1));
}
else
{
TrainUp = pCurrTxRate->TrainUp;
TrainDown = pCurrTxRate->TrainDown;
}
if (pAd->MacTab.Size == 1)
{
// Update statistic counter
RTMP_IO_READ32(pAd, TX_STA_CNT0, &TxStaCnt0.word);
RTMP_IO_READ32(pAd, TX_STA_CNT1, &StaTx1.word);
TxRetransmit = StaTx1.field.TxRetransmit;
TxSuccess = StaTx1.field.TxSuccess;
TxFailCount = TxStaCnt0.field.TxFailCount;
TxTotalCnt = TxRetransmit + TxSuccess + TxFailCount;
pAd->RalinkCounters.OneSecTxRetryOkCount += StaTx1.field.TxRetransmit;
pAd->RalinkCounters.OneSecTxNoRetryOkCount += StaTx1.field.TxSuccess;
pAd->RalinkCounters.OneSecTxFailCount += TxStaCnt0.field.TxFailCount;
pAd->WlanCounters.TransmittedFragmentCount.u.LowPart += StaTx1.field.TxSuccess;
pAd->WlanCounters.RetryCount.u.LowPart += StaTx1.field.TxRetransmit;
pAd->WlanCounters.FailedCount.u.LowPart += TxStaCnt0.field.TxFailCount;
if (TxTotalCnt)
TxErrorRatio = ((TxRetransmit + TxFailCount) * 100) / TxTotalCnt;
}
else
{
TxTotalCnt = pEntry->OneSecTxNoRetryOkCount +
pEntry->OneSecTxRetryOkCount +
pEntry->OneSecTxFailCount;
if (TxTotalCnt)
TxErrorRatio = ((pEntry->OneSecTxRetryOkCount + pEntry->OneSecTxFailCount) * 100) / TxTotalCnt;
}
//
// CASE 1. when TX samples are fewer than 15, then decide TX rate solely on RSSI
// (criteria copied from RT2500 for Netopia case)
//
if (TxTotalCnt <= 12)
{
NdisZeroMemory(pAd->DrsCounters.TxQuality, sizeof(USHORT) * MAX_STEP_OF_TX_RATE_SWITCH);
NdisZeroMemory(pAd->DrsCounters.PER, sizeof(UCHAR) * MAX_STEP_OF_TX_RATE_SWITCH);
if ((pAd->DrsCounters.LastSecTxRateChangeAction == 1) && (CurrRateIdx != DownRateIdx))
{
pAd->CommonCfg.TxRateIndex = DownRateIdx;
pAd->DrsCounters.TxQuality[CurrRateIdx] = DRS_TX_QUALITY_WORST_BOUND;
}
else if ((pAd->DrsCounters.LastSecTxRateChangeAction == 2) && (CurrRateIdx != UpRateIdx))
{
pAd->CommonCfg.TxRateIndex = UpRateIdx;
}
DBGPRINT_RAW(RT_DEBUG_TRACE,("QuickDRS: TxTotalCnt <= 15, train back to original rate \n"));
return;
}
do
{
ULONG OneSecTxNoRetryOKRationCount;
if (pAd->DrsCounters.LastTimeTxRateChangeAction == 0)
ratio = 5;
else
ratio = 4;
// downgrade TX quality if PER >= Rate-Down threshold
if (TxErrorRatio >= TrainDown)
{
pAd->DrsCounters.TxQuality[CurrRateIdx] = DRS_TX_QUALITY_WORST_BOUND;
}
pAd->DrsCounters.PER[CurrRateIdx] = (UCHAR)TxErrorRatio;
OneSecTxNoRetryOKRationCount = (TxSuccess * ratio);
// perform DRS - consider TxRate Down first, then rate up.
if ((pAd->DrsCounters.LastSecTxRateChangeAction == 1) && (CurrRateIdx != DownRateIdx))
{
if ((pAd->DrsCounters.LastTxOkCount + 2) >= OneSecTxNoRetryOKRationCount)
{
pAd->CommonCfg.TxRateIndex = DownRateIdx;
pAd->DrsCounters.TxQuality[CurrRateIdx] = DRS_TX_QUALITY_WORST_BOUND;
}
}
else if ((pAd->DrsCounters.LastSecTxRateChangeAction == 2) && (CurrRateIdx != UpRateIdx))
{
if ((TxErrorRatio >= 50) || (TxErrorRatio >= TrainDown))
{
}
else if ((pAd->DrsCounters.LastTxOkCount + 2) >= OneSecTxNoRetryOKRationCount)
{
pAd->CommonCfg.TxRateIndex = UpRateIdx;
}
}
}while (FALSE);
// if rate-up happen, clear all bad history of all TX rates
if (pAd->CommonCfg.TxRateIndex > CurrRateIdx)
{
pAd->DrsCounters.TxRateUpPenalty = 0;
NdisZeroMemory(pAd->DrsCounters.TxQuality, sizeof(USHORT) * MAX_STEP_OF_TX_RATE_SWITCH);
NdisZeroMemory(pAd->DrsCounters.PER, sizeof(UCHAR) * MAX_STEP_OF_TX_RATE_SWITCH);
#ifdef RT2870
bTxRateChanged = TRUE;
#endif
}
// if rate-down happen, only clear DownRate's bad history
else if (pAd->CommonCfg.TxRateIndex < CurrRateIdx)
{
DBGPRINT_RAW(RT_DEBUG_TRACE,("QuickDRS: --TX rate from %d to %d \n", CurrRateIdx, pAd->CommonCfg.TxRateIndex));
pAd->DrsCounters.TxRateUpPenalty = 0; // no penalty
pAd->DrsCounters.TxQuality[pAd->CommonCfg.TxRateIndex] = 0;
pAd->DrsCounters.PER[pAd->CommonCfg.TxRateIndex] = 0;
#ifdef RT2870
bTxRateChanged = TRUE;
#endif
}
else
{
bTxRateChanged = FALSE;
}
pNextTxRate = (PRTMP_TX_RATE_SWITCH) &pTable[(pAd->CommonCfg.TxRateIndex+1)*5];
if (bTxRateChanged && pNextTxRate)
{
MlmeSetTxRate(pAd, pEntry, pNextTxRate);
}
}
}
/*
==========================================================================
Description:
This routine is executed periodically inside MlmePeriodicExec() after
association with an AP.
It checks if StaCfg.Psm is consistent with user policy (recorded in
StaCfg.WindowsPowerMode). If not, enforce user policy. However,
there're some conditions to consider:
1. we don't support power-saving in ADHOC mode, so Psm=PWR_ACTIVE all
the time when Mibss==TRUE
2. When link up in INFRA mode, Psm should not be switch to PWR_SAVE
if outgoing traffic available in TxRing or MgmtRing.
Output:
1. change pAd->StaCfg.Psm to PWR_SAVE or leave it untouched
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID MlmeCheckPsmChange(
IN PRTMP_ADAPTER pAd,
IN ULONG Now32)
{
ULONG PowerMode;
// condition -
// 1. Psm maybe ON only happen in INFRASTRUCTURE mode
// 2. user wants either MAX_PSP or FAST_PSP
// 3. but current psm is not in PWR_SAVE
// 4. CNTL state machine is not doing SCANning
// 5. no TX SUCCESS event for the past 1-sec period
#ifdef NDIS51_MINIPORT
if (pAd->StaCfg.WindowsPowerProfile == NdisPowerProfileBattery)
PowerMode = pAd->StaCfg.WindowsBatteryPowerMode;
else
#endif
PowerMode = pAd->StaCfg.WindowsPowerMode;
if (INFRA_ON(pAd) &&
(PowerMode != Ndis802_11PowerModeCAM) &&
(pAd->StaCfg.Psm == PWR_ACTIVE) &&
#ifdef RT2860
RTMP_TEST_PSFLAG(pAd, fRTMP_PS_CAN_GO_SLEEP))
#else
(pAd->Mlme.CntlMachine.CurrState == CNTL_IDLE))
#endif
{
// add by johnli, use Rx OK data count per second to calculate throughput
// If Ttraffic is too high ( > 400 Rx per second), don't go to sleep mode. If tx rate is low, use low criteria
// Mode=CCK/MCS=3 => 11 Mbps, Mode=OFDM/MCS=3 => 18 Mbps
if (((pAd->StaCfg.HTPhyMode.field.MCS <= 3) &&
(pAd->RalinkCounters.OneSecRxOkDataCnt < (ULONG)100)) ||
((pAd->StaCfg.HTPhyMode.field.MCS > 3) &&
(pAd->RalinkCounters.OneSecRxOkDataCnt < (ULONG)400)))
{
// Get this time
NdisGetSystemUpTime(&pAd->Mlme.LastSendNULLpsmTime);
pAd->RalinkCounters.RxCountSinceLastNULL = 0;
MlmeSetPsmBit(pAd, PWR_SAVE);
if (!(pAd->CommonCfg.bAPSDCapable && pAd->CommonCfg.APEdcaParm.bAPSDCapable))
{
RTMPSendNullFrame(pAd, pAd->CommonCfg.TxRate, FALSE);
}
else
{
RTMPSendNullFrame(pAd, pAd->CommonCfg.TxRate, TRUE);
}
}
}
}
// IRQL = PASSIVE_LEVEL
// IRQL = DISPATCH_LEVEL
VOID MlmeSetPsmBit(
IN PRTMP_ADAPTER pAd,
IN USHORT psm)
{
AUTO_RSP_CFG_STRUC csr4;
pAd->StaCfg.Psm = psm;
RTMP_IO_READ32(pAd, AUTO_RSP_CFG, &csr4.word);
csr4.field.AckCtsPsmBit = (psm == PWR_SAVE)? 1:0;
RTMP_IO_WRITE32(pAd, AUTO_RSP_CFG, csr4.word);
DBGPRINT(RT_DEBUG_TRACE, ("MlmeSetPsmBit = %d\n", psm));
}
// IRQL = DISPATCH_LEVEL
VOID MlmeSetTxPreamble(
IN PRTMP_ADAPTER pAd,
IN USHORT TxPreamble)
{
AUTO_RSP_CFG_STRUC csr4;
//
// Always use Long preamble before verifiation short preamble functionality works well.
// Todo: remove the following line if short preamble functionality works
//
//TxPreamble = Rt802_11PreambleLong;
RTMP_IO_READ32(pAd, AUTO_RSP_CFG, &csr4.word);
if (TxPreamble == Rt802_11PreambleLong)
{
DBGPRINT(RT_DEBUG_TRACE, ("MlmeSetTxPreamble (= LONG PREAMBLE)\n"));
OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_SHORT_PREAMBLE_INUSED);
csr4.field.AutoResponderPreamble = 0;
}
else
{
// NOTE: 1Mbps should always use long preamble
DBGPRINT(RT_DEBUG_TRACE, ("MlmeSetTxPreamble (= SHORT PREAMBLE)\n"));
OPSTATUS_SET_FLAG(pAd, fOP_STATUS_SHORT_PREAMBLE_INUSED);
csr4.field.AutoResponderPreamble = 1;
}
RTMP_IO_WRITE32(pAd, AUTO_RSP_CFG, csr4.word);
}
/*
==========================================================================
Description:
Update basic rate bitmap
==========================================================================
*/
VOID UpdateBasicRateBitmap(
IN PRTMP_ADAPTER pAdapter)
{
INT i, j;
/* 1 2 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54 */
UCHAR rate[] = { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 };
UCHAR *sup_p = pAdapter->CommonCfg.SupRate;
UCHAR *ext_p = pAdapter->CommonCfg.ExtRate;
ULONG bitmap = pAdapter->CommonCfg.BasicRateBitmap;
/* if A mode, always use fix BasicRateBitMap */
//if (pAdapter->CommonCfg.Channel == PHY_11A)
if (pAdapter->CommonCfg.Channel > 14)
pAdapter->CommonCfg.BasicRateBitmap = 0x150; /* 6, 12, 24M */
/* End of if */
if (pAdapter->CommonCfg.BasicRateBitmap > 4095)
{
/* (2 ^ MAX_LEN_OF_SUPPORTED_RATES) -1 */
return;
} /* End of if */
for(i=0; i<MAX_LEN_OF_SUPPORTED_RATES; i++)
{
sup_p[i] &= 0x7f;
ext_p[i] &= 0x7f;
} /* End of for */
for(i=0; i<MAX_LEN_OF_SUPPORTED_RATES; i++)
{
if (bitmap & (1 << i))
{
for(j=0; j<MAX_LEN_OF_SUPPORTED_RATES; j++)
{
if (sup_p[j] == rate[i])
sup_p[j] |= 0x80;
/* End of if */
} /* End of for */
for(j=0; j<MAX_LEN_OF_SUPPORTED_RATES; j++)
{
if (ext_p[j] == rate[i])
ext_p[j] |= 0x80;
/* End of if */
} /* End of for */
} /* End of if */
} /* End of for */
} /* End of UpdateBasicRateBitmap */
// IRQL = PASSIVE_LEVEL
// IRQL = DISPATCH_LEVEL
// bLinkUp is to identify the inital link speed.
// TRUE indicates the rate update at linkup, we should not try to set the rate at 54Mbps.
VOID MlmeUpdateTxRates(
IN PRTMP_ADAPTER pAd,
IN BOOLEAN bLinkUp,
IN UCHAR apidx)
{
int i, num;
UCHAR Rate = RATE_6, MaxDesire = RATE_1, MaxSupport = RATE_1;
UCHAR MinSupport = RATE_54;
ULONG BasicRateBitmap = 0;
UCHAR CurrBasicRate = RATE_1;
UCHAR *pSupRate, SupRateLen, *pExtRate, ExtRateLen;
PHTTRANSMIT_SETTING pHtPhy = NULL;
PHTTRANSMIT_SETTING pMaxHtPhy = NULL;
PHTTRANSMIT_SETTING pMinHtPhy = NULL;
BOOLEAN *auto_rate_cur_p;
UCHAR HtMcs = MCS_AUTO;
// find max desired rate
UpdateBasicRateBitmap(pAd);
num = 0;
auto_rate_cur_p = NULL;
for (i=0; i<MAX_LEN_OF_SUPPORTED_RATES; i++)
{
switch (pAd->CommonCfg.DesireRate[i] & 0x7f)
{
case 2: Rate = RATE_1; num++; break;
case 4: Rate = RATE_2; num++; break;
case 11: Rate = RATE_5_5; num++; break;
case 22: Rate = RATE_11; num++; break;
case 12: Rate = RATE_6; num++; break;
case 18: Rate = RATE_9; num++; break;
case 24: Rate = RATE_12; num++; break;
case 36: Rate = RATE_18; num++; break;
case 48: Rate = RATE_24; num++; break;
case 72: Rate = RATE_36; num++; break;
case 96: Rate = RATE_48; num++; break;
case 108: Rate = RATE_54; num++; break;
//default: Rate = RATE_1; break;
}
if (MaxDesire < Rate) MaxDesire = Rate;
}
//===========================================================================
//===========================================================================
{
pHtPhy = &pAd->StaCfg.HTPhyMode;
pMaxHtPhy = &pAd->StaCfg.MaxHTPhyMode;
pMinHtPhy = &pAd->StaCfg.MinHTPhyMode;
auto_rate_cur_p = &pAd->StaCfg.bAutoTxRateSwitch;
HtMcs = pAd->StaCfg.DesiredTransmitSetting.field.MCS;
if ((pAd->StaCfg.BssType == BSS_ADHOC) &&
(pAd->CommonCfg.PhyMode == PHY_11B) &&
(MaxDesire > RATE_11))
{
MaxDesire = RATE_11;
}
}
pAd->CommonCfg.MaxDesiredRate = MaxDesire;
pMinHtPhy->word = 0;
pMaxHtPhy->word = 0;
pHtPhy->word = 0;
// Auto rate switching is enabled only if more than one DESIRED RATES are
// specified; otherwise disabled
if (num <= 1)
{
*auto_rate_cur_p = FALSE;
}
else
{
*auto_rate_cur_p = TRUE;
}
#if 1
if (HtMcs != MCS_AUTO)
{
*auto_rate_cur_p = FALSE;
}
else
{
*auto_rate_cur_p = TRUE;
}
#endif
if ((ADHOC_ON(pAd) || INFRA_ON(pAd)) && (pAd->OpMode == OPMODE_STA))
{
pSupRate = &pAd->StaActive.SupRate[0];
pExtRate = &pAd->StaActive.ExtRate[0];
SupRateLen = pAd->StaActive.SupRateLen;
ExtRateLen = pAd->StaActive.ExtRateLen;
}
else
{
pSupRate = &pAd->CommonCfg.SupRate[0];
pExtRate = &pAd->CommonCfg.ExtRate[0];
SupRateLen = pAd->CommonCfg.SupRateLen;
ExtRateLen = pAd->CommonCfg.ExtRateLen;
}
// find max supported rate
for (i=0; i<SupRateLen; i++)
{
switch (pSupRate[i] & 0x7f)
{
case 2: Rate = RATE_1; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0001; break;
case 4: Rate = RATE_2; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0002; break;
case 11: Rate = RATE_5_5; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0004; break;
case 22: Rate = RATE_11; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0008; break;
case 12: Rate = RATE_6; /*if (pSupRate[i] & 0x80)*/ BasicRateBitmap |= 0x0010; break;
case 18: Rate = RATE_9; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0020; break;
case 24: Rate = RATE_12; /*if (pSupRate[i] & 0x80)*/ BasicRateBitmap |= 0x0040; break;
case 36: Rate = RATE_18; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0080; break;
case 48: Rate = RATE_24; /*if (pSupRate[i] & 0x80)*/ BasicRateBitmap |= 0x0100; break;
case 72: Rate = RATE_36; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0200; break;
case 96: Rate = RATE_48; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0400; break;
case 108: Rate = RATE_54; if (pSupRate[i] & 0x80) BasicRateBitmap |= 0x0800; break;
default: Rate = RATE_1; break;
}
if (MaxSupport < Rate) MaxSupport = Rate;
if (MinSupport > Rate) MinSupport = Rate;
}
for (i=0; i<ExtRateLen; i++)
{
switch (pExtRate[i] & 0x7f)
{
case 2: Rate = RATE_1; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0001; break;
case 4: Rate = RATE_2; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0002; break;
case 11: Rate = RATE_5_5; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0004; break;
case 22: Rate = RATE_11; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0008; break;
case 12: Rate = RATE_6; /*if (pExtRate[i] & 0x80)*/ BasicRateBitmap |= 0x0010; break;
case 18: Rate = RATE_9; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0020; break;
case 24: Rate = RATE_12; /*if (pExtRate[i] & 0x80)*/ BasicRateBitmap |= 0x0040; break;
case 36: Rate = RATE_18; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0080; break;
case 48: Rate = RATE_24; /*if (pExtRate[i] & 0x80)*/ BasicRateBitmap |= 0x0100; break;
case 72: Rate = RATE_36; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0200; break;
case 96: Rate = RATE_48; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0400; break;
case 108: Rate = RATE_54; if (pExtRate[i] & 0x80) BasicRateBitmap |= 0x0800; break;
default: Rate = RATE_1; break;
}
if (MaxSupport < Rate) MaxSupport = Rate;
if (MinSupport > Rate) MinSupport = Rate;
}
RTMP_IO_WRITE32(pAd, LEGACY_BASIC_RATE, BasicRateBitmap);
// calculate the exptected ACK rate for each TX rate. This info is used to caculate
// the DURATION field of outgoing uniicast DATA/MGMT frame
for (i=0; i<MAX_LEN_OF_SUPPORTED_RATES; i++)
{
if (BasicRateBitmap & (0x01 << i))
CurrBasicRate = (UCHAR)i;
pAd->CommonCfg.ExpectedACKRate[i] = CurrBasicRate;
}
DBGPRINT(RT_DEBUG_TRACE,("MlmeUpdateTxRates[MaxSupport = %d] = MaxDesire %d Mbps\n", RateIdToMbps[MaxSupport], RateIdToMbps[MaxDesire]));
// max tx rate = min {max desire rate, max supported rate}
if (MaxSupport < MaxDesire)
pAd->CommonCfg.MaxTxRate = MaxSupport;
else
pAd->CommonCfg.MaxTxRate = MaxDesire;
pAd->CommonCfg.MinTxRate = MinSupport;
if (*auto_rate_cur_p)
{
short dbm = 0;
dbm = pAd->StaCfg.RssiSample.AvgRssi0 - pAd->BbpRssiToDbmDelta;
if (bLinkUp == TRUE)
pAd->CommonCfg.TxRate = RATE_24;
else
pAd->CommonCfg.TxRate = pAd->CommonCfg.MaxTxRate;
if (dbm < -75)
pAd->CommonCfg.TxRate = RATE_11;
else if (dbm < -70)
pAd->CommonCfg.TxRate = RATE_24;
// should never exceed MaxTxRate (consider 11B-only mode)
if (pAd->CommonCfg.TxRate > pAd->CommonCfg.MaxTxRate)
pAd->CommonCfg.TxRate = pAd->CommonCfg.MaxTxRate;
pAd->CommonCfg.TxRateIndex = 0;
}
else
{
pAd->CommonCfg.TxRate = pAd->CommonCfg.MaxTxRate;
pHtPhy->field.MCS = (pAd->CommonCfg.MaxTxRate > 3) ? (pAd->CommonCfg.MaxTxRate - 4) : pAd->CommonCfg.MaxTxRate;
pHtPhy->field.MODE = (pAd->CommonCfg.MaxTxRate > 3) ? MODE_OFDM : MODE_CCK;
pAd->MacTab.Content[BSSID_WCID].HTPhyMode.field.STBC = pHtPhy->field.STBC;
pAd->MacTab.Content[BSSID_WCID].HTPhyMode.field.ShortGI = pHtPhy->field.ShortGI;
pAd->MacTab.Content[BSSID_WCID].HTPhyMode.field.MCS = pHtPhy->field.MCS;
pAd->MacTab.Content[BSSID_WCID].HTPhyMode.field.MODE = pHtPhy->field.MODE;
}
if (pAd->CommonCfg.TxRate <= RATE_11)
{
pMaxHtPhy->field.MODE = MODE_CCK;
pMaxHtPhy->field.MCS = pAd->CommonCfg.TxRate;
pMinHtPhy->field.MCS = pAd->CommonCfg.MinTxRate;
}
else
{
pMaxHtPhy->field.MODE = MODE_OFDM;
pMaxHtPhy->field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.TxRate];
if (pAd->CommonCfg.MinTxRate >= RATE_6 && (pAd->CommonCfg.MinTxRate <= RATE_54))
{pMinHtPhy->field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.MinTxRate];}
else
{pMinHtPhy->field.MCS = pAd->CommonCfg.MinTxRate;}
}
pHtPhy->word = (pMaxHtPhy->word);
if (bLinkUp && (pAd->OpMode == OPMODE_STA))
{
pAd->MacTab.Content[BSSID_WCID].HTPhyMode.word = pHtPhy->word;
pAd->MacTab.Content[BSSID_WCID].MaxHTPhyMode.word = pMaxHtPhy->word;
pAd->MacTab.Content[BSSID_WCID].MinHTPhyMode.word = pMinHtPhy->word;
}
else
{
switch (pAd->CommonCfg.PhyMode)
{
case PHY_11BG_MIXED:
case PHY_11B:
case PHY_11BGN_MIXED:
pAd->CommonCfg.MlmeRate = RATE_1;
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_CCK;
pAd->CommonCfg.MlmeTransmit.field.MCS = RATE_1;
pAd->CommonCfg.RtsRate = RATE_11;
break;
case PHY_11G:
case PHY_11A:
case PHY_11AGN_MIXED:
case PHY_11GN_MIXED:
case PHY_11N_2_4G:
case PHY_11AN_MIXED:
case PHY_11N_5G:
pAd->CommonCfg.MlmeRate = RATE_6;
pAd->CommonCfg.RtsRate = RATE_6;
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_OFDM;
pAd->CommonCfg.MlmeTransmit.field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.MlmeRate];
break;
case PHY_11ABG_MIXED:
case PHY_11ABGN_MIXED:
if (pAd->CommonCfg.Channel <= 14)
{
pAd->CommonCfg.MlmeRate = RATE_1;
pAd->CommonCfg.RtsRate = RATE_1;
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_CCK;
pAd->CommonCfg.MlmeTransmit.field.MCS = RATE_1;
}
else
{
pAd->CommonCfg.MlmeRate = RATE_6;
pAd->CommonCfg.RtsRate = RATE_6;
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_OFDM;
pAd->CommonCfg.MlmeTransmit.field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.MlmeRate];
}
break;
default: // error
pAd->CommonCfg.MlmeRate = RATE_6;
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_OFDM;
pAd->CommonCfg.MlmeTransmit.field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.MlmeRate];
pAd->CommonCfg.RtsRate = RATE_1;
break;
}
//
// Keep Basic Mlme Rate.
//
pAd->MacTab.Content[MCAST_WCID].HTPhyMode.word = pAd->CommonCfg.MlmeTransmit.word;
if (pAd->CommonCfg.MlmeTransmit.field.MODE == MODE_OFDM)
pAd->MacTab.Content[MCAST_WCID].HTPhyMode.field.MCS = OfdmRateToRxwiMCS[RATE_24];
else
pAd->MacTab.Content[MCAST_WCID].HTPhyMode.field.MCS = RATE_1;
pAd->CommonCfg.BasicMlmeRate = pAd->CommonCfg.MlmeRate;
}
DBGPRINT(RT_DEBUG_TRACE, (" MlmeUpdateTxRates (MaxDesire=%d, MaxSupport=%d, MaxTxRate=%d, MinRate=%d, Rate Switching =%d)\n",
RateIdToMbps[MaxDesire], RateIdToMbps[MaxSupport], RateIdToMbps[pAd->CommonCfg.MaxTxRate], RateIdToMbps[pAd->CommonCfg.MinTxRate],
/*OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_TX_RATE_SWITCH_ENABLED)*/*auto_rate_cur_p));
DBGPRINT(RT_DEBUG_TRACE, (" MlmeUpdateTxRates (TxRate=%d, RtsRate=%d, BasicRateBitmap=0x%04lx)\n",
RateIdToMbps[pAd->CommonCfg.TxRate], RateIdToMbps[pAd->CommonCfg.RtsRate], BasicRateBitmap));
DBGPRINT(RT_DEBUG_TRACE, ("MlmeUpdateTxRates (MlmeTransmit=0x%x, MinHTPhyMode=%x, MaxHTPhyMode=0x%x, HTPhyMode=0x%x)\n",
pAd->CommonCfg.MlmeTransmit.word, pAd->MacTab.Content[BSSID_WCID].MinHTPhyMode.word ,pAd->MacTab.Content[BSSID_WCID].MaxHTPhyMode.word ,pAd->MacTab.Content[BSSID_WCID].HTPhyMode.word ));
}
/*
==========================================================================
Description:
This function update HT Rate setting.
Input Wcid value is valid for 2 case :
1. it's used for Station in infra mode that copy AP rate to Mactable.
2. OR Station in adhoc mode to copy peer's HT rate to Mactable.
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID MlmeUpdateHtTxRates(
IN PRTMP_ADAPTER pAd,
IN UCHAR apidx)
{
UCHAR StbcMcs; //j, StbcMcs, bitmask;
CHAR i; // 3*3
RT_HT_CAPABILITY *pRtHtCap = NULL;
RT_HT_PHY_INFO *pActiveHtPhy = NULL;
ULONG BasicMCS;
UCHAR j, bitmask;
PRT_HT_PHY_INFO pDesireHtPhy = NULL;
PHTTRANSMIT_SETTING pHtPhy = NULL;
PHTTRANSMIT_SETTING pMaxHtPhy = NULL;
PHTTRANSMIT_SETTING pMinHtPhy = NULL;
BOOLEAN *auto_rate_cur_p;
DBGPRINT(RT_DEBUG_TRACE,("MlmeUpdateHtTxRates===> \n"));
auto_rate_cur_p = NULL;
{
pDesireHtPhy = &pAd->StaCfg.DesiredHtPhyInfo;
pActiveHtPhy = &pAd->StaCfg.DesiredHtPhyInfo;
pHtPhy = &pAd->StaCfg.HTPhyMode;
pMaxHtPhy = &pAd->StaCfg.MaxHTPhyMode;
pMinHtPhy = &pAd->StaCfg.MinHTPhyMode;
auto_rate_cur_p = &pAd->StaCfg.bAutoTxRateSwitch;
}
if ((ADHOC_ON(pAd) || INFRA_ON(pAd)) && (pAd->OpMode == OPMODE_STA))
{
if (pAd->StaActive.SupportedPhyInfo.bHtEnable == FALSE)
return;
pRtHtCap = &pAd->StaActive.SupportedHtPhy;
pActiveHtPhy = &pAd->StaActive.SupportedPhyInfo;
StbcMcs = (UCHAR)pAd->MlmeAux.AddHtInfo.AddHtInfo3.StbcMcs;
BasicMCS =pAd->MlmeAux.AddHtInfo.MCSSet[0]+(pAd->MlmeAux.AddHtInfo.MCSSet[1]<<8)+(StbcMcs<<16);
if ((pAd->CommonCfg.DesiredHtPhy.TxSTBC) && (pRtHtCap->RxSTBC) && (pAd->Antenna.field.TxPath == 2))
pMaxHtPhy->field.STBC = STBC_USE;
else
pMaxHtPhy->field.STBC = STBC_NONE;
}
else
{
if (pDesireHtPhy->bHtEnable == FALSE)
return;
pRtHtCap = &pAd->CommonCfg.DesiredHtPhy;
StbcMcs = (UCHAR)pAd->CommonCfg.AddHTInfo.AddHtInfo3.StbcMcs;
BasicMCS = pAd->CommonCfg.AddHTInfo.MCSSet[0]+(pAd->CommonCfg.AddHTInfo.MCSSet[1]<<8)+(StbcMcs<<16);
if ((pAd->CommonCfg.DesiredHtPhy.TxSTBC) && (pRtHtCap->RxSTBC) && (pAd->Antenna.field.TxPath == 2))
pMaxHtPhy->field.STBC = STBC_USE;
else
pMaxHtPhy->field.STBC = STBC_NONE;
}
// Decide MAX ht rate.
if ((pRtHtCap->GF) && (pAd->CommonCfg.DesiredHtPhy.GF))
pMaxHtPhy->field.MODE = MODE_HTGREENFIELD;
else
pMaxHtPhy->field.MODE = MODE_HTMIX;
if ((pAd->CommonCfg.DesiredHtPhy.ChannelWidth) && (pRtHtCap->ChannelWidth))
pMaxHtPhy->field.BW = BW_40;
else
pMaxHtPhy->field.BW = BW_20;
if (pMaxHtPhy->field.BW == BW_20)
pMaxHtPhy->field.ShortGI = (pAd->CommonCfg.DesiredHtPhy.ShortGIfor20 & pRtHtCap->ShortGIfor20);
else
pMaxHtPhy->field.ShortGI = (pAd->CommonCfg.DesiredHtPhy.ShortGIfor40 & pRtHtCap->ShortGIfor40);
for (i=23; i>=0; i--) // 3*3
{
j = i/8;
bitmask = (1<<(i-(j*8)));
if ((pActiveHtPhy->MCSSet[j] & bitmask) && (pDesireHtPhy->MCSSet[j] & bitmask))
{
pMaxHtPhy->field.MCS = i;
break;
}
if (i==0)
break;
}
// Copy MIN ht rate. rt2860???
pMinHtPhy->field.BW = BW_20;
pMinHtPhy->field.MCS = 0;
pMinHtPhy->field.STBC = 0;
pMinHtPhy->field.ShortGI = 0;
//If STA assigns fixed rate. update to fixed here.
if ( (pAd->OpMode == OPMODE_STA) && (pDesireHtPhy->MCSSet[0] != 0xff))
{
if (pDesireHtPhy->MCSSet[4] != 0)
{
pMaxHtPhy->field.MCS = 32;
pMinHtPhy->field.MCS = 32;
DBGPRINT(RT_DEBUG_TRACE,("MlmeUpdateHtTxRates<=== Use Fixed MCS = %d\n",pMinHtPhy->field.MCS));
}
for (i=23; (CHAR)i >= 0; i--) // 3*3
{
j = i/8;
bitmask = (1<<(i-(j*8)));
if ( (pDesireHtPhy->MCSSet[j] & bitmask) && (pActiveHtPhy->MCSSet[j] & bitmask))
{
pMaxHtPhy->field.MCS = i;
pMinHtPhy->field.MCS = i;
break;
}
if (i==0)
break;
}
}
// Decide ht rate
pHtPhy->field.STBC = pMaxHtPhy->field.STBC;
pHtPhy->field.BW = pMaxHtPhy->field.BW;
pHtPhy->field.MODE = pMaxHtPhy->field.MODE;
pHtPhy->field.MCS = pMaxHtPhy->field.MCS;
pHtPhy->field.ShortGI = pMaxHtPhy->field.ShortGI;
// use default now. rt2860
if (pDesireHtPhy->MCSSet[0] != 0xff)
*auto_rate_cur_p = FALSE;
else
*auto_rate_cur_p = TRUE;
DBGPRINT(RT_DEBUG_TRACE, (" MlmeUpdateHtTxRates<---.AMsduSize = %d \n", pAd->CommonCfg.DesiredHtPhy.AmsduSize ));
DBGPRINT(RT_DEBUG_TRACE,("TX: MCS[0] = %x (choose %d), BW = %d, ShortGI = %d, MODE = %d, \n", pActiveHtPhy->MCSSet[0],pHtPhy->field.MCS,
pHtPhy->field.BW, pHtPhy->field.ShortGI, pHtPhy->field.MODE));
DBGPRINT(RT_DEBUG_TRACE,("MlmeUpdateHtTxRates<=== \n"));
}
// IRQL = DISPATCH_LEVEL
VOID MlmeRadioOff(
IN PRTMP_ADAPTER pAd)
{
RT28XX_MLME_RADIO_OFF(pAd);
}
// IRQL = DISPATCH_LEVEL
VOID MlmeRadioOn(
IN PRTMP_ADAPTER pAd)
{
RT28XX_MLME_RADIO_ON(pAd);
}
// ===========================================================================================
// bss_table.c
// ===========================================================================================
/*! \brief initialize BSS table
* \param p_tab pointer to the table
* \return none
* \pre
* \post
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
*/
VOID BssTableInit(
IN BSS_TABLE *Tab)
{
int i;
Tab->BssNr = 0;
Tab->BssOverlapNr = 0;
for (i = 0; i < MAX_LEN_OF_BSS_TABLE; i++)
{
NdisZeroMemory(&Tab->BssEntry[i], sizeof(BSS_ENTRY));
Tab->BssEntry[i].Rssi = -127; // initial the rssi as a minimum value
}
}
VOID BATableInit(
IN PRTMP_ADAPTER pAd,
IN BA_TABLE *Tab)
{
int i;
Tab->numAsOriginator = 0;
Tab->numAsRecipient = 0;
NdisAllocateSpinLock(&pAd->BATabLock);
for (i = 0; i < MAX_LEN_OF_BA_REC_TABLE; i++)
{
Tab->BARecEntry[i].REC_BA_Status = Recipient_NONE;
NdisAllocateSpinLock(&(Tab->BARecEntry[i].RxReRingLock));
}
for (i = 0; i < MAX_LEN_OF_BA_ORI_TABLE; i++)
{
Tab->BAOriEntry[i].ORI_BA_Status = Originator_NONE;
}
}
/*! \brief search the BSS table by SSID
* \param p_tab pointer to the bss table
* \param ssid SSID string
* \return index of the table, BSS_NOT_FOUND if not in the table
* \pre
* \post
* \note search by sequential search
IRQL = DISPATCH_LEVEL
*/
ULONG BssTableSearch(
IN BSS_TABLE *Tab,
IN PUCHAR pBssid,
IN UCHAR Channel)
{
UCHAR i;
for (i = 0; i < Tab->BssNr; i++)
{
//
// Some AP that support A/B/G mode that may used the same BSSID on 11A and 11B/G.
// We should distinguish this case.
//
if ((((Tab->BssEntry[i].Channel <= 14) && (Channel <= 14)) ||
((Tab->BssEntry[i].Channel > 14) && (Channel > 14))) &&
MAC_ADDR_EQUAL(Tab->BssEntry[i].Bssid, pBssid))
{
return i;
}
}
return (ULONG)BSS_NOT_FOUND;
}
ULONG BssSsidTableSearch(
IN BSS_TABLE *Tab,
IN PUCHAR pBssid,
IN PUCHAR pSsid,
IN UCHAR SsidLen,
IN UCHAR Channel)
{
UCHAR i;
for (i = 0; i < Tab->BssNr; i++)
{
//
// Some AP that support A/B/G mode that may used the same BSSID on 11A and 11B/G.
// We should distinguish this case.
//
if ((((Tab->BssEntry[i].Channel <= 14) && (Channel <= 14)) ||
((Tab->BssEntry[i].Channel > 14) && (Channel > 14))) &&
MAC_ADDR_EQUAL(Tab->BssEntry[i].Bssid, pBssid) &&
SSID_EQUAL(pSsid, SsidLen, Tab->BssEntry[i].Ssid, Tab->BssEntry[i].SsidLen))
{
return i;
}
}
return (ULONG)BSS_NOT_FOUND;
}
ULONG BssTableSearchWithSSID(
IN BSS_TABLE *Tab,
IN PUCHAR Bssid,
IN PUCHAR pSsid,
IN UCHAR SsidLen,
IN UCHAR Channel)
{
UCHAR i;
for (i = 0; i < Tab->BssNr; i++)
{
if ((((Tab->BssEntry[i].Channel <= 14) && (Channel <= 14)) ||
((Tab->BssEntry[i].Channel > 14) && (Channel > 14))) &&
MAC_ADDR_EQUAL(&(Tab->BssEntry[i].Bssid), Bssid) &&
(SSID_EQUAL(pSsid, SsidLen, Tab->BssEntry[i].Ssid, Tab->BssEntry[i].SsidLen) ||
(NdisEqualMemory(pSsid, ZeroSsid, SsidLen)) ||
(NdisEqualMemory(Tab->BssEntry[i].Ssid, ZeroSsid, Tab->BssEntry[i].SsidLen))))
{
return i;
}
}
return (ULONG)BSS_NOT_FOUND;
}
// IRQL = DISPATCH_LEVEL
VOID BssTableDeleteEntry(
IN OUT BSS_TABLE *Tab,
IN PUCHAR pBssid,
IN UCHAR Channel)
{
UCHAR i, j;
for (i = 0; i < Tab->BssNr; i++)
{
if ((Tab->BssEntry[i].Channel == Channel) &&
(MAC_ADDR_EQUAL(Tab->BssEntry[i].Bssid, pBssid)))
{
for (j = i; j < Tab->BssNr - 1; j++)
{
NdisMoveMemory(&(Tab->BssEntry[j]), &(Tab->BssEntry[j + 1]), sizeof(BSS_ENTRY));
}
NdisZeroMemory(&(Tab->BssEntry[Tab->BssNr - 1]), sizeof(BSS_ENTRY));
Tab->BssNr -= 1;
return;
}
}
}
/*
========================================================================
Routine Description:
Delete the Originator Entry in BAtable. Or decrease numAs Originator by 1 if needed.
Arguments:
// IRQL = DISPATCH_LEVEL
========================================================================
*/
VOID BATableDeleteORIEntry(
IN OUT PRTMP_ADAPTER pAd,
IN BA_ORI_ENTRY *pBAORIEntry)
{
if (pBAORIEntry->ORI_BA_Status != Originator_NONE)
{
NdisAcquireSpinLock(&pAd->BATabLock);
if (pBAORIEntry->ORI_BA_Status == Originator_Done)
{
pAd->BATable.numAsOriginator -= 1;
DBGPRINT(RT_DEBUG_TRACE, ("BATableDeleteORIEntry numAsOriginator= %ld\n", pAd->BATable.numAsRecipient));
// Erase Bitmap flag.
}
pAd->MacTab.Content[pBAORIEntry->Wcid].TXBAbitmap &= (~(1<<(pBAORIEntry->TID) )); // If STA mode, erase flag here
pAd->MacTab.Content[pBAORIEntry->Wcid].BAOriWcidArray[pBAORIEntry->TID] = 0; // If STA mode, erase flag here
pBAORIEntry->ORI_BA_Status = Originator_NONE;
pBAORIEntry->Token = 1;
// Not clear Sequence here.
NdisReleaseSpinLock(&pAd->BATabLock);
}
}
/*! \brief
* \param
* \return
* \pre
* \post
IRQL = DISPATCH_LEVEL
*/
VOID BssEntrySet(
IN PRTMP_ADAPTER pAd,
OUT BSS_ENTRY *pBss,
IN PUCHAR pBssid,
IN CHAR Ssid[],
IN UCHAR SsidLen,
IN UCHAR BssType,
IN USHORT BeaconPeriod,
IN PCF_PARM pCfParm,
IN USHORT AtimWin,
IN USHORT CapabilityInfo,
IN UCHAR SupRate[],
IN UCHAR SupRateLen,
IN UCHAR ExtRate[],
IN UCHAR ExtRateLen,
IN HT_CAPABILITY_IE *pHtCapability,
IN ADD_HT_INFO_IE *pAddHtInfo, // AP might use this additional ht info IE
IN UCHAR HtCapabilityLen,
IN UCHAR AddHtInfoLen,
IN UCHAR NewExtChanOffset,
IN UCHAR Channel,
IN CHAR Rssi,
IN LARGE_INTEGER TimeStamp,
IN UCHAR CkipFlag,
IN PEDCA_PARM pEdcaParm,
IN PQOS_CAPABILITY_PARM pQosCapability,
IN PQBSS_LOAD_PARM pQbssLoad,
IN USHORT LengthVIE,
IN PNDIS_802_11_VARIABLE_IEs pVIE)
{
COPY_MAC_ADDR(pBss->Bssid, pBssid);
// Default Hidden SSID to be TRUE, it will be turned to FALSE after coping SSID
pBss->Hidden = 1;
if (SsidLen > 0)
{
// For hidden SSID AP, it might send beacon with SSID len equal to 0
// Or send beacon /probe response with SSID len matching real SSID length,
// but SSID is all zero. such as "00-00-00-00" with length 4.
// We have to prevent this case overwrite correct table
if (NdisEqualMemory(Ssid, ZeroSsid, SsidLen) == 0)
{
NdisZeroMemory(pBss->Ssid, MAX_LEN_OF_SSID);
NdisMoveMemory(pBss->Ssid, Ssid, SsidLen);
pBss->SsidLen = SsidLen;
pBss->Hidden = 0;
}
}
else
pBss->SsidLen = 0;
pBss->BssType = BssType;
pBss->BeaconPeriod = BeaconPeriod;
if (BssType == BSS_INFRA)
{
if (pCfParm->bValid)
{
pBss->CfpCount = pCfParm->CfpCount;
pBss->CfpPeriod = pCfParm->CfpPeriod;
pBss->CfpMaxDuration = pCfParm->CfpMaxDuration;
pBss->CfpDurRemaining = pCfParm->CfpDurRemaining;
}
}
else
{
pBss->AtimWin = AtimWin;
}
pBss->CapabilityInfo = CapabilityInfo;
// The privacy bit indicate security is ON, it maight be WEP, TKIP or AES
// Combine with AuthMode, they will decide the connection methods.
pBss->Privacy = CAP_IS_PRIVACY_ON(pBss->CapabilityInfo);
ASSERT(SupRateLen <= MAX_LEN_OF_SUPPORTED_RATES);
if (SupRateLen <= MAX_LEN_OF_SUPPORTED_RATES)
NdisMoveMemory(pBss->SupRate, SupRate, SupRateLen);
else
NdisMoveMemory(pBss->SupRate, SupRate, MAX_LEN_OF_SUPPORTED_RATES);
pBss->SupRateLen = SupRateLen;
ASSERT(ExtRateLen <= MAX_LEN_OF_SUPPORTED_RATES);
NdisMoveMemory(pBss->ExtRate, ExtRate, ExtRateLen);
NdisMoveMemory(&pBss->HtCapability, pHtCapability, HtCapabilityLen);
NdisMoveMemory(&pBss->AddHtInfo, pAddHtInfo, AddHtInfoLen);
pBss->NewExtChanOffset = NewExtChanOffset;
pBss->ExtRateLen = ExtRateLen;
pBss->Channel = Channel;
pBss->CentralChannel = Channel;
pBss->Rssi = Rssi;
// Update CkipFlag. if not exists, the value is 0x0
pBss->CkipFlag = CkipFlag;
// New for microsoft Fixed IEs
NdisMoveMemory(pBss->FixIEs.Timestamp, &TimeStamp, 8);
pBss->FixIEs.BeaconInterval = BeaconPeriod;
pBss->FixIEs.Capabilities = CapabilityInfo;
// New for microsoft Variable IEs
if (LengthVIE != 0)
{
pBss->VarIELen = LengthVIE;
NdisMoveMemory(pBss->VarIEs, pVIE, pBss->VarIELen);
}
else
{
pBss->VarIELen = 0;
}
pBss->AddHtInfoLen = 0;
pBss->HtCapabilityLen = 0;
if (HtCapabilityLen> 0)
{
pBss->HtCapabilityLen = HtCapabilityLen;
NdisMoveMemory(&pBss->HtCapability, pHtCapability, HtCapabilityLen);
if (AddHtInfoLen > 0)
{
pBss->AddHtInfoLen = AddHtInfoLen;
NdisMoveMemory(&pBss->AddHtInfo, pAddHtInfo, AddHtInfoLen);
if ((pAddHtInfo->ControlChan > 2)&& (pAddHtInfo->AddHtInfo.ExtChanOffset == EXTCHA_BELOW) && (pHtCapability->HtCapInfo.ChannelWidth == BW_40))
{
pBss->CentralChannel = pAddHtInfo->ControlChan - 2;
}
else if ((pAddHtInfo->AddHtInfo.ExtChanOffset == EXTCHA_ABOVE) && (pHtCapability->HtCapInfo.ChannelWidth == BW_40))
{
pBss->CentralChannel = pAddHtInfo->ControlChan + 2;
}
}
}
BssCipherParse(pBss);
// new for QOS
if (pEdcaParm)
NdisMoveMemory(&pBss->EdcaParm, pEdcaParm, sizeof(EDCA_PARM));
else
pBss->EdcaParm.bValid = FALSE;
if (pQosCapability)
NdisMoveMemory(&pBss->QosCapability, pQosCapability, sizeof(QOS_CAPABILITY_PARM));
else
pBss->QosCapability.bValid = FALSE;
if (pQbssLoad)
NdisMoveMemory(&pBss->QbssLoad, pQbssLoad, sizeof(QBSS_LOAD_PARM));
else
pBss->QbssLoad.bValid = FALSE;
{
PEID_STRUCT pEid;
USHORT Length = 0;
NdisZeroMemory(&pBss->WpaIE.IE[0], MAX_CUSTOM_LEN);
NdisZeroMemory(&pBss->RsnIE.IE[0], MAX_CUSTOM_LEN);
pEid = (PEID_STRUCT) pVIE;
while ((Length + 2 + (USHORT)pEid->Len) <= LengthVIE)
{
switch(pEid->Eid)
{
case IE_WPA:
if (NdisEqualMemory(pEid->Octet, WPA_OUI, 4))
{
if ((pEid->Len + 2) > MAX_CUSTOM_LEN)
{
pBss->WpaIE.IELen = 0;
break;
}
pBss->WpaIE.IELen = pEid->Len + 2;
NdisMoveMemory(pBss->WpaIE.IE, pEid, pBss->WpaIE.IELen);
}
break;
case IE_RSN:
if (NdisEqualMemory(pEid->Octet + 2, RSN_OUI, 3))
{
if ((pEid->Len + 2) > MAX_CUSTOM_LEN)
{
pBss->RsnIE.IELen = 0;
break;
}
pBss->RsnIE.IELen = pEid->Len + 2;
NdisMoveMemory(pBss->RsnIE.IE, pEid, pBss->RsnIE.IELen);
}
break;
}
Length = Length + 2 + (USHORT)pEid->Len; // Eid[1] + Len[1]+ content[Len]
pEid = (PEID_STRUCT)((UCHAR*)pEid + 2 + pEid->Len);
}
}
}
/*!
* \brief insert an entry into the bss table
* \param p_tab The BSS table
* \param Bssid BSSID
* \param ssid SSID
* \param ssid_len Length of SSID
* \param bss_type
* \param beacon_period
* \param timestamp
* \param p_cf
* \param atim_win
* \param cap
* \param rates
* \param rates_len
* \param channel_idx
* \return none
* \pre
* \post
* \note If SSID is identical, the old entry will be replaced by the new one
IRQL = DISPATCH_LEVEL
*/
ULONG BssTableSetEntry(
IN PRTMP_ADAPTER pAd,
OUT BSS_TABLE *Tab,
IN PUCHAR pBssid,
IN CHAR Ssid[],
IN UCHAR SsidLen,
IN UCHAR BssType,
IN USHORT BeaconPeriod,
IN CF_PARM *CfParm,
IN USHORT AtimWin,
IN USHORT CapabilityInfo,
IN UCHAR SupRate[],
IN UCHAR SupRateLen,
IN UCHAR ExtRate[],
IN UCHAR ExtRateLen,
IN HT_CAPABILITY_IE *pHtCapability,
IN ADD_HT_INFO_IE *pAddHtInfo, // AP might use this additional ht info IE
IN UCHAR HtCapabilityLen,
IN UCHAR AddHtInfoLen,
IN UCHAR NewExtChanOffset,
IN UCHAR ChannelNo,
IN CHAR Rssi,
IN LARGE_INTEGER TimeStamp,
IN UCHAR CkipFlag,
IN PEDCA_PARM pEdcaParm,
IN PQOS_CAPABILITY_PARM pQosCapability,
IN PQBSS_LOAD_PARM pQbssLoad,
IN USHORT LengthVIE,
IN PNDIS_802_11_VARIABLE_IEs pVIE)
{
ULONG Idx;
Idx = BssTableSearchWithSSID(Tab, pBssid, Ssid, SsidLen, ChannelNo);
if (Idx == BSS_NOT_FOUND)
{
if (Tab->BssNr >= MAX_LEN_OF_BSS_TABLE)
{
//
// It may happen when BSS Table was full.
// The desired AP will not be added into BSS Table
// In this case, if we found the desired AP then overwrite BSS Table.
//
if(!OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED))
{
if (MAC_ADDR_EQUAL(pAd->MlmeAux.Bssid, pBssid) ||
SSID_EQUAL(pAd->MlmeAux.Ssid, pAd->MlmeAux.SsidLen, Ssid, SsidLen))
{
Idx = Tab->BssOverlapNr;
BssEntrySet(pAd, &Tab->BssEntry[Idx], pBssid, Ssid, SsidLen, BssType, BeaconPeriod, CfParm, AtimWin,
CapabilityInfo, SupRate, SupRateLen, ExtRate, ExtRateLen,pHtCapability, pAddHtInfo,HtCapabilityLen, AddHtInfoLen,
NewExtChanOffset, ChannelNo, Rssi, TimeStamp, CkipFlag, pEdcaParm, pQosCapability, pQbssLoad, LengthVIE, pVIE);
Tab->BssOverlapNr = (Tab->BssOverlapNr++) % MAX_LEN_OF_BSS_TABLE;
}
return Idx;
}
else
{
return BSS_NOT_FOUND;
}
}
Idx = Tab->BssNr;
BssEntrySet(pAd, &Tab->BssEntry[Idx], pBssid, Ssid, SsidLen, BssType, BeaconPeriod, CfParm, AtimWin,
CapabilityInfo, SupRate, SupRateLen, ExtRate, ExtRateLen,pHtCapability, pAddHtInfo,HtCapabilityLen, AddHtInfoLen,
NewExtChanOffset, ChannelNo, Rssi, TimeStamp, CkipFlag, pEdcaParm, pQosCapability, pQbssLoad, LengthVIE, pVIE);
Tab->BssNr++;
}
else
{
/* avoid Hidden SSID form beacon to overwirite correct SSID from probe response */
if ((SSID_EQUAL(Ssid, SsidLen, Tab->BssEntry[Idx].Ssid, Tab->BssEntry[Idx].SsidLen)) ||
(NdisEqualMemory(Tab->BssEntry[Idx].Ssid, ZeroSsid, Tab->BssEntry[Idx].SsidLen)))
{
BssEntrySet(pAd, &Tab->BssEntry[Idx], pBssid, Ssid, SsidLen, BssType, BeaconPeriod,CfParm, AtimWin,
CapabilityInfo, SupRate, SupRateLen, ExtRate, ExtRateLen,pHtCapability, pAddHtInfo,HtCapabilityLen, AddHtInfoLen,
NewExtChanOffset, ChannelNo, Rssi, TimeStamp, CkipFlag, pEdcaParm, pQosCapability, pQbssLoad, LengthVIE, pVIE);
}
}
return Idx;
}
// IRQL = DISPATCH_LEVEL
VOID BssTableSsidSort(
IN PRTMP_ADAPTER pAd,
OUT BSS_TABLE *OutTab,
IN CHAR Ssid[],
IN UCHAR SsidLen)
{
INT i;
BssTableInit(OutTab);
for (i = 0; i < pAd->ScanTab.BssNr; i++)
{
BSS_ENTRY *pInBss = &pAd->ScanTab.BssEntry[i];
BOOLEAN bIsHiddenApIncluded = FALSE;
if (((pAd->CommonCfg.bIEEE80211H == 1) &&
(pAd->MlmeAux.Channel > 14) &&
RadarChannelCheck(pAd, pInBss->Channel))
)
{
if (pInBss->Hidden)
bIsHiddenApIncluded = TRUE;
}
if ((pInBss->BssType == pAd->StaCfg.BssType) &&
(SSID_EQUAL(Ssid, SsidLen, pInBss->Ssid, pInBss->SsidLen) || bIsHiddenApIncluded))
{
BSS_ENTRY *pOutBss = &OutTab->BssEntry[OutTab->BssNr];
// 2.4G/5G N only mode
if ((pInBss->HtCapabilityLen == 0) &&
((pAd->CommonCfg.PhyMode == PHY_11N_2_4G) || (pAd->CommonCfg.PhyMode == PHY_11N_5G)))
{
DBGPRINT(RT_DEBUG_TRACE,("STA is in N-only Mode, this AP don't have Ht capability in Beacon.\n"));
continue;
}
// New for WPA2
// Check the Authmode first
if (pAd->StaCfg.AuthMode >= Ndis802_11AuthModeWPA)
{
// Check AuthMode and AuthModeAux for matching, in case AP support dual-mode
if ((pAd->StaCfg.AuthMode != pInBss->AuthMode) && (pAd->StaCfg.AuthMode != pInBss->AuthModeAux))
// None matched
continue;
// Check cipher suite, AP must have more secured cipher than station setting
if ((pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPA) || (pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPAPSK))
{
// If it's not mixed mode, we should only let BSS pass with the same encryption
if (pInBss->WPA.bMixMode == FALSE)
if (pAd->StaCfg.WepStatus != pInBss->WPA.GroupCipher)
continue;
// check group cipher
if (pInBss->WPA.GroupCipher != Ndis802_11GroupWEP40Enabled &&
pInBss->WPA.GroupCipher != Ndis802_11GroupWEP104Enabled &&
pAd->StaCfg.WepStatus < pInBss->WPA.GroupCipher)
continue;
// check pairwise cipher, skip if none matched
// If profile set to AES, let it pass without question.
// If profile set to TKIP, we must find one mateched
if ((pAd->StaCfg.WepStatus == Ndis802_11Encryption2Enabled) &&
(pAd->StaCfg.WepStatus != pInBss->WPA.PairCipher) &&
(pAd->StaCfg.WepStatus != pInBss->WPA.PairCipherAux))
continue;
}
else if ((pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPA2) || (pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPA2PSK))
{
// If it's not mixed mode, we should only let BSS pass with the same encryption
if (pInBss->WPA2.bMixMode == FALSE)
if (pAd->StaCfg.WepStatus != pInBss->WPA2.GroupCipher)
continue;
// check group cipher
if (pInBss->WPA2.GroupCipher != Ndis802_11GroupWEP40Enabled &&
pInBss->WPA2.GroupCipher != Ndis802_11GroupWEP104Enabled &&
pAd->StaCfg.WepStatus < pInBss->WPA2.GroupCipher)
continue;
// check pairwise cipher, skip if none matched
// If profile set to AES, let it pass without question.
// If profile set to TKIP, we must find one mateched
if ((pAd->StaCfg.WepStatus == Ndis802_11Encryption2Enabled) &&
(pAd->StaCfg.WepStatus != pInBss->WPA2.PairCipher) &&
(pAd->StaCfg.WepStatus != pInBss->WPA2.PairCipherAux))
continue;
}
}
// Bss Type matched, SSID matched.
// We will check wepstatus for qualification Bss
else if (pAd->StaCfg.WepStatus != pInBss->WepStatus)
{
DBGPRINT(RT_DEBUG_TRACE,("StaCfg.WepStatus=%d, while pInBss->WepStatus=%d\n", pAd->StaCfg.WepStatus, pInBss->WepStatus));
//
// For the SESv2 case, we will not qualify WepStatus.
//
if (!pInBss->bSES)
continue;
}
// Since the AP is using hidden SSID, and we are trying to connect to ANY
// It definitely will fail. So, skip it.
// CCX also require not even try to connect it!!
if (SsidLen == 0)
continue;
// If both station and AP use 40MHz, still need to check if the 40MHZ band's legality in my country region
// If this 40MHz wideband is not allowed in my country list, use bandwidth 20MHZ instead,
if ((pInBss->CentralChannel != pInBss->Channel) &&
(pAd->CommonCfg.RegTransmitSetting.field.BW == BW_40))
{
if (RTMPCheckChannel(pAd, pInBss->CentralChannel, pInBss->Channel) == FALSE)
{
pAd->CommonCfg.RegTransmitSetting.field.BW = BW_20;
SetCommonHT(pAd);
pAd->CommonCfg.RegTransmitSetting.field.BW = BW_40;
}
else
{
if (pAd->CommonCfg.DesiredHtPhy.ChannelWidth == BAND_WIDTH_20)
{
SetCommonHT(pAd);
}
}
}
// copy matching BSS from InTab to OutTab
NdisMoveMemory(pOutBss, pInBss, sizeof(BSS_ENTRY));
OutTab->BssNr++;
}
else if ((pInBss->BssType == pAd->StaCfg.BssType) && (SsidLen == 0))
{
BSS_ENTRY *pOutBss = &OutTab->BssEntry[OutTab->BssNr];
// 2.4G/5G N only mode
if ((pInBss->HtCapabilityLen == 0) &&
((pAd->CommonCfg.PhyMode == PHY_11N_2_4G) || (pAd->CommonCfg.PhyMode == PHY_11N_5G)))
{
DBGPRINT(RT_DEBUG_TRACE,("STA is in N-only Mode, this AP don't have Ht capability in Beacon.\n"));
continue;
}
// New for WPA2
// Check the Authmode first
if (pAd->StaCfg.AuthMode >= Ndis802_11AuthModeWPA)
{
// Check AuthMode and AuthModeAux for matching, in case AP support dual-mode
if ((pAd->StaCfg.AuthMode != pInBss->AuthMode) && (pAd->StaCfg.AuthMode != pInBss->AuthModeAux))
// None matched
continue;
// Check cipher suite, AP must have more secured cipher than station setting
if ((pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPA) || (pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPAPSK))
{
// If it's not mixed mode, we should only let BSS pass with the same encryption
if (pInBss->WPA.bMixMode == FALSE)
if (pAd->StaCfg.WepStatus != pInBss->WPA.GroupCipher)
continue;
// check group cipher
if (pAd->StaCfg.WepStatus < pInBss->WPA.GroupCipher)
continue;
// check pairwise cipher, skip if none matched
// If profile set to AES, let it pass without question.
// If profile set to TKIP, we must find one mateched
if ((pAd->StaCfg.WepStatus == Ndis802_11Encryption2Enabled) &&
(pAd->StaCfg.WepStatus != pInBss->WPA.PairCipher) &&
(pAd->StaCfg.WepStatus != pInBss->WPA.PairCipherAux))
continue;
}
else if ((pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPA2) || (pAd->StaCfg.AuthMode == Ndis802_11AuthModeWPA2PSK))
{
// If it's not mixed mode, we should only let BSS pass with the same encryption
if (pInBss->WPA2.bMixMode == FALSE)
if (pAd->StaCfg.WepStatus != pInBss->WPA2.GroupCipher)
continue;
// check group cipher
if (pAd->StaCfg.WepStatus < pInBss->WPA2.GroupCipher)
continue;
// check pairwise cipher, skip if none matched
// If profile set to AES, let it pass without question.
// If profile set to TKIP, we must find one mateched
if ((pAd->StaCfg.WepStatus == Ndis802_11Encryption2Enabled) &&
(pAd->StaCfg.WepStatus != pInBss->WPA2.PairCipher) &&
(pAd->StaCfg.WepStatus != pInBss->WPA2.PairCipherAux))
continue;
}
}
// Bss Type matched, SSID matched.
// We will check wepstatus for qualification Bss
else if (pAd->StaCfg.WepStatus != pInBss->WepStatus)
continue;
// If both station and AP use 40MHz, still need to check if the 40MHZ band's legality in my country region
// If this 40MHz wideband is not allowed in my country list, use bandwidth 20MHZ instead,
if ((pInBss->CentralChannel != pInBss->Channel) &&
(pAd->CommonCfg.RegTransmitSetting.field.BW == BW_40))
{
if (RTMPCheckChannel(pAd, pInBss->CentralChannel, pInBss->Channel) == FALSE)
{
pAd->CommonCfg.RegTransmitSetting.field.BW = BW_20;
SetCommonHT(pAd);
pAd->CommonCfg.RegTransmitSetting.field.BW = BW_40;
}
}
// copy matching BSS from InTab to OutTab
NdisMoveMemory(pOutBss, pInBss, sizeof(BSS_ENTRY));
OutTab->BssNr++;
}
if (OutTab->BssNr >= MAX_LEN_OF_BSS_TABLE)
break;
}
BssTableSortByRssi(OutTab);
}
// IRQL = DISPATCH_LEVEL
VOID BssTableSortByRssi(
IN OUT BSS_TABLE *OutTab)
{
INT i, j;
BSS_ENTRY TmpBss;
for (i = 0; i < OutTab->BssNr - 1; i++)
{
for (j = i+1; j < OutTab->BssNr; j++)
{
if (OutTab->BssEntry[j].Rssi > OutTab->BssEntry[i].Rssi)
{
NdisMoveMemory(&TmpBss, &OutTab->BssEntry[j], sizeof(BSS_ENTRY));
NdisMoveMemory(&OutTab->BssEntry[j], &OutTab->BssEntry[i], sizeof(BSS_ENTRY));
NdisMoveMemory(&OutTab->BssEntry[i], &TmpBss, sizeof(BSS_ENTRY));
}
}
}
}
VOID BssCipherParse(
IN OUT PBSS_ENTRY pBss)
{
PEID_STRUCT pEid;
PUCHAR pTmp;
PRSN_IE_HEADER_STRUCT pRsnHeader;
PCIPHER_SUITE_STRUCT pCipher;
PAKM_SUITE_STRUCT pAKM;
USHORT Count;
INT Length;
NDIS_802_11_ENCRYPTION_STATUS TmpCipher;
//
// WepStatus will be reset later, if AP announce TKIP or AES on the beacon frame.
//
if (pBss->Privacy)
{
pBss->WepStatus = Ndis802_11WEPEnabled;
}
else
{
pBss->WepStatus = Ndis802_11WEPDisabled;
}
// Set default to disable & open authentication before parsing variable IE
pBss->AuthMode = Ndis802_11AuthModeOpen;
pBss->AuthModeAux = Ndis802_11AuthModeOpen;
// Init WPA setting
pBss->WPA.PairCipher = Ndis802_11WEPDisabled;
pBss->WPA.PairCipherAux = Ndis802_11WEPDisabled;
pBss->WPA.GroupCipher = Ndis802_11WEPDisabled;
pBss->WPA.RsnCapability = 0;
pBss->WPA.bMixMode = FALSE;
// Init WPA2 setting
pBss->WPA2.PairCipher = Ndis802_11WEPDisabled;
pBss->WPA2.PairCipherAux = Ndis802_11WEPDisabled;
pBss->WPA2.GroupCipher = Ndis802_11WEPDisabled;
pBss->WPA2.RsnCapability = 0;
pBss->WPA2.bMixMode = FALSE;
Length = (INT) pBss->VarIELen;
while (Length > 0)
{
// Parse cipher suite base on WPA1 & WPA2, they should be parsed differently
pTmp = ((PUCHAR) pBss->VarIEs) + pBss->VarIELen - Length;
pEid = (PEID_STRUCT) pTmp;
switch (pEid->Eid)
{
case IE_WPA:
//Parse Cisco IE_WPA (LEAP, CCKM, etc.)
if ( NdisEqualMemory((pTmp+8), CISCO_OUI, 3))
{
pTmp += 11;
switch (*pTmp)
{
case 1:
case 5: // Although WEP is not allowed in WPA related auth mode, we parse it anyway
pBss->WepStatus = Ndis802_11Encryption1Enabled;
pBss->WPA.PairCipher = Ndis802_11Encryption1Enabled;
pBss->WPA.GroupCipher = Ndis802_11Encryption1Enabled;
break;
case 2:
pBss->WepStatus = Ndis802_11Encryption2Enabled;
pBss->WPA.PairCipher = Ndis802_11Encryption1Enabled;
pBss->WPA.GroupCipher = Ndis802_11Encryption1Enabled;
break;
case 4:
pBss->WepStatus = Ndis802_11Encryption3Enabled;
pBss->WPA.PairCipher = Ndis802_11Encryption1Enabled;
pBss->WPA.GroupCipher = Ndis802_11Encryption1Enabled;
break;
default:
break;
}
// if Cisco IE_WPA, break
break;
}
else if (NdisEqualMemory(pEid->Octet, SES_OUI, 3) && (pEid->Len == 7))
{
pBss->bSES = TRUE;
break;
}
else if (NdisEqualMemory(pEid->Octet, WPA_OUI, 4) != 1)
{
// if unsupported vendor specific IE
break;
}
// Skip OUI, version, and multicast suite
// This part should be improved in the future when AP supported multiple cipher suite.
// For now, it's OK since almost all APs have fixed cipher suite supported.
// pTmp = (PUCHAR) pEid->Octet;
pTmp += 11;
// Cipher Suite Selectors from Spec P802.11i/D3.2 P26.
// Value Meaning
// 0 None
// 1 WEP-40
// 2 Tkip
// 3 WRAP
// 4 AES
// 5 WEP-104
// Parse group cipher
switch (*pTmp)
{
case 1:
pBss->WPA.GroupCipher = Ndis802_11GroupWEP40Enabled;
break;
case 5:
pBss->WPA.GroupCipher = Ndis802_11GroupWEP104Enabled;
break;
case 2:
pBss->WPA.GroupCipher = Ndis802_11Encryption2Enabled;
break;
case 4:
pBss->WPA.GroupCipher = Ndis802_11Encryption3Enabled;
break;
default:
break;
}
// number of unicast suite
pTmp += 1;
// skip all unicast cipher suites
//Count = *(PUSHORT) pTmp;
Count = (pTmp[1]<<8) + pTmp[0];
pTmp += sizeof(USHORT);
// Parsing all unicast cipher suite
while (Count > 0)
{
// Skip OUI
pTmp += 3;
TmpCipher = Ndis802_11WEPDisabled;
switch (*pTmp)
{
case 1:
case 5: // Although WEP is not allowed in WPA related auth mode, we parse it anyway
TmpCipher = Ndis802_11Encryption1Enabled;
break;
case 2:
TmpCipher = Ndis802_11Encryption2Enabled;
break;
case 4:
TmpCipher = Ndis802_11Encryption3Enabled;
break;
default:
break;
}
if (TmpCipher > pBss->WPA.PairCipher)
{
// Move the lower cipher suite to PairCipherAux
pBss->WPA.PairCipherAux = pBss->WPA.PairCipher;
pBss->WPA.PairCipher = TmpCipher;
}
else
{
pBss->WPA.PairCipherAux = TmpCipher;
}
pTmp++;
Count--;
}
// 4. get AKM suite counts
//Count = *(PUSHORT) pTmp;
Count = (pTmp[1]<<8) + pTmp[0];
pTmp += sizeof(USHORT);
pTmp += 3;
switch (*pTmp)
{
case 1:
// Set AP support WPA mode
if (pBss->AuthMode == Ndis802_11AuthModeOpen)
pBss->AuthMode = Ndis802_11AuthModeWPA;
else
pBss->AuthModeAux = Ndis802_11AuthModeWPA;
break;
case 2:
// Set AP support WPA mode
if (pBss->AuthMode == Ndis802_11AuthModeOpen)
pBss->AuthMode = Ndis802_11AuthModeWPAPSK;
else
pBss->AuthModeAux = Ndis802_11AuthModeWPAPSK;
break;
default:
break;
}
pTmp += 1;
// Fixed for WPA-None
if (pBss->BssType == BSS_ADHOC)
{
pBss->AuthMode = Ndis802_11AuthModeWPANone;
pBss->AuthModeAux = Ndis802_11AuthModeWPANone;
pBss->WepStatus = pBss->WPA.GroupCipher;
if (pBss->WPA.PairCipherAux == Ndis802_11WEPDisabled)
pBss->WPA.PairCipherAux = pBss->WPA.GroupCipher;
}
else
pBss->WepStatus = pBss->WPA.PairCipher;
// Check the Pair & Group, if different, turn on mixed mode flag
if (pBss->WPA.GroupCipher != pBss->WPA.PairCipher)
pBss->WPA.bMixMode = TRUE;
break;
case IE_RSN:
pRsnHeader = (PRSN_IE_HEADER_STRUCT) pTmp;
// 0. Version must be 1
if (le2cpu16(pRsnHeader->Version) != 1)
break;
pTmp += sizeof(RSN_IE_HEADER_STRUCT);
// 1. Check group cipher
pCipher = (PCIPHER_SUITE_STRUCT) pTmp;
if (!RTMPEqualMemory(pTmp, RSN_OUI, 3))
break;
// Parse group cipher
switch (pCipher->Type)
{
case 1:
pBss->WPA2.GroupCipher = Ndis802_11GroupWEP40Enabled;
break;
case 5:
pBss->WPA2.GroupCipher = Ndis802_11GroupWEP104Enabled;
break;
case 2:
pBss->WPA2.GroupCipher = Ndis802_11Encryption2Enabled;
break;
case 4:
pBss->WPA2.GroupCipher = Ndis802_11Encryption3Enabled;
break;
default:
break;
}
// set to correct offset for next parsing
pTmp += sizeof(CIPHER_SUITE_STRUCT);
// 2. Get pairwise cipher counts
//Count = *(PUSHORT) pTmp;
Count = (pTmp[1]<<8) + pTmp[0];
pTmp += sizeof(USHORT);
// 3. Get pairwise cipher
// Parsing all unicast cipher suite
while (Count > 0)
{
// Skip OUI
pCipher = (PCIPHER_SUITE_STRUCT) pTmp;
TmpCipher = Ndis802_11WEPDisabled;
switch (pCipher->Type)
{
case 1:
case 5: // Although WEP is not allowed in WPA related auth mode, we parse it anyway
TmpCipher = Ndis802_11Encryption1Enabled;
break;
case 2:
TmpCipher = Ndis802_11Encryption2Enabled;
break;
case 4:
TmpCipher = Ndis802_11Encryption3Enabled;
break;
default:
break;
}
if (TmpCipher > pBss->WPA2.PairCipher)
{
// Move the lower cipher suite to PairCipherAux
pBss->WPA2.PairCipherAux = pBss->WPA2.PairCipher;
pBss->WPA2.PairCipher = TmpCipher;
}
else
{
pBss->WPA2.PairCipherAux = TmpCipher;
}
pTmp += sizeof(CIPHER_SUITE_STRUCT);
Count--;
}
// 4. get AKM suite counts
//Count = *(PUSHORT) pTmp;
Count = (pTmp[1]<<8) + pTmp[0];
pTmp += sizeof(USHORT);
// 5. Get AKM ciphers
pAKM = (PAKM_SUITE_STRUCT) pTmp;
if (!RTMPEqualMemory(pTmp, RSN_OUI, 3))
break;
switch (pAKM->Type)
{
case 1:
// Set AP support WPA mode
if (pBss->AuthMode == Ndis802_11AuthModeOpen)
pBss->AuthMode = Ndis802_11AuthModeWPA2;
else
pBss->AuthModeAux = Ndis802_11AuthModeWPA2;
break;
case 2:
// Set AP support WPA mode
if (pBss->AuthMode == Ndis802_11AuthModeOpen)
pBss->AuthMode = Ndis802_11AuthModeWPA2PSK;
else
pBss->AuthModeAux = Ndis802_11AuthModeWPA2PSK;
break;
default:
break;
}
pTmp += (Count * sizeof(AKM_SUITE_STRUCT));
// Fixed for WPA-None
if (pBss->BssType == BSS_ADHOC)
{
pBss->AuthMode = Ndis802_11AuthModeWPANone;
pBss->AuthModeAux = Ndis802_11AuthModeWPANone;
pBss->WPA.PairCipherAux = pBss->WPA2.PairCipherAux;
pBss->WPA.GroupCipher = pBss->WPA2.GroupCipher;
pBss->WepStatus = pBss->WPA.GroupCipher;
if (pBss->WPA.PairCipherAux == Ndis802_11WEPDisabled)
pBss->WPA.PairCipherAux = pBss->WPA.GroupCipher;
}
pBss->WepStatus = pBss->WPA2.PairCipher;
// 6. Get RSN capability
//pBss->WPA2.RsnCapability = *(PUSHORT) pTmp;
pBss->WPA2.RsnCapability = (pTmp[1]<<8) + pTmp[0];
pTmp += sizeof(USHORT);
// Check the Pair & Group, if different, turn on mixed mode flag
if (pBss->WPA2.GroupCipher != pBss->WPA2.PairCipher)
pBss->WPA2.bMixMode = TRUE;
break;
default:
break;
}
Length -= (pEid->Len + 2);
}
}
// ===========================================================================================
// mac_table.c
// ===========================================================================================
/*! \brief generates a random mac address value for IBSS BSSID
* \param Addr the bssid location
* \return none
* \pre
* \post
*/
VOID MacAddrRandomBssid(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pAddr)
{
INT i;
for (i = 0; i < MAC_ADDR_LEN; i++)
{
pAddr[i] = RandomByte(pAd);
}
pAddr[0] = (pAddr[0] & 0xfe) | 0x02; // the first 2 bits must be 01xxxxxxxx
}
/*! \brief init the management mac frame header
* \param p_hdr mac header
* \param subtype subtype of the frame
* \param p_ds destination address, don't care if it is a broadcast address
* \return none
* \pre the station has the following information in the pAd->StaCfg
* - bssid
* - station address
* \post
* \note this function initializes the following field
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
*/
VOID MgtMacHeaderInit(
IN PRTMP_ADAPTER pAd,
IN OUT PHEADER_802_11 pHdr80211,
IN UCHAR SubType,
IN UCHAR ToDs,
IN PUCHAR pDA,
IN PUCHAR pBssid)
{
NdisZeroMemory(pHdr80211, sizeof(HEADER_802_11));
pHdr80211->FC.Type = BTYPE_MGMT;
pHdr80211->FC.SubType = SubType;
pHdr80211->FC.ToDs = ToDs;
COPY_MAC_ADDR(pHdr80211->Addr1, pDA);
COPY_MAC_ADDR(pHdr80211->Addr2, pAd->CurrentAddress);
COPY_MAC_ADDR(pHdr80211->Addr3, pBssid);
}
// ===========================================================================================
// mem_mgmt.c
// ===========================================================================================
/*!***************************************************************************
* This routine build an outgoing frame, and fill all information specified
* in argument list to the frame body. The actual frame size is the summation
* of all arguments.
* input params:
* Buffer - pointer to a pre-allocated memory segment
* args - a list of <int arg_size, arg> pairs.
* NOTE NOTE NOTE!!!! the last argument must be NULL, otherwise this
* function will FAIL!!!
* return:
* Size of the buffer
* usage:
* MakeOutgoingFrame(Buffer, output_length, 2, &fc, 2, &dur, 6, p_addr1, 6,p_addr2, END_OF_ARGS);
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
****************************************************************************/
ULONG MakeOutgoingFrame(
OUT CHAR *Buffer,
OUT ULONG *FrameLen, ...)
{
CHAR *p;
int leng;
ULONG TotLeng;
va_list Args;
// calculates the total length
TotLeng = 0;
va_start(Args, FrameLen);
do
{
leng = va_arg(Args, int);
if (leng == END_OF_ARGS)
{
break;
}
p = va_arg(Args, PVOID);
NdisMoveMemory(&Buffer[TotLeng], p, leng);
TotLeng = TotLeng + leng;
} while(TRUE);
va_end(Args); /* clean up */
*FrameLen = TotLeng;
return TotLeng;
}
// ===========================================================================================
// mlme_queue.c
// ===========================================================================================
/*! \brief Initialize The MLME Queue, used by MLME Functions
* \param *Queue The MLME Queue
* \return Always Return NDIS_STATE_SUCCESS in this implementation
* \pre
* \post
* \note Because this is done only once (at the init stage), no need to be locked
IRQL = PASSIVE_LEVEL
*/
NDIS_STATUS MlmeQueueInit(
IN MLME_QUEUE *Queue)
{
INT i;
NdisAllocateSpinLock(&Queue->Lock);
Queue->Num = 0;
Queue->Head = 0;
Queue->Tail = 0;
for (i = 0; i < MAX_LEN_OF_MLME_QUEUE; i++)
{
Queue->Entry[i].Occupied = FALSE;
Queue->Entry[i].MsgLen = 0;
NdisZeroMemory(Queue->Entry[i].Msg, MGMT_DMA_BUFFER_SIZE);
}
return NDIS_STATUS_SUCCESS;
}
/*! \brief Enqueue a message for other threads, if they want to send messages to MLME thread
* \param *Queue The MLME Queue
* \param Machine The State Machine Id
* \param MsgType The Message Type
* \param MsgLen The Message length
* \param *Msg The message pointer
* \return TRUE if enqueue is successful, FALSE if the queue is full
* \pre
* \post
* \note The message has to be initialized
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
*/
BOOLEAN MlmeEnqueue(
IN PRTMP_ADAPTER pAd,
IN ULONG Machine,
IN ULONG MsgType,
IN ULONG MsgLen,
IN VOID *Msg)
{
INT Tail;
MLME_QUEUE *Queue = (MLME_QUEUE *)&pAd->Mlme.Queue;
// Do nothing if the driver is starting halt state.
// This might happen when timer already been fired before cancel timer with mlmehalt
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS | fRTMP_ADAPTER_NIC_NOT_EXIST))
return FALSE;
// First check the size, it MUST not exceed the mlme queue size
if (MsgLen > MGMT_DMA_BUFFER_SIZE)
{
DBGPRINT_ERR(("MlmeEnqueue: msg too large, size = %ld \n", MsgLen));
return FALSE;
}
if (MlmeQueueFull(Queue))
{
return FALSE;
}
NdisAcquireSpinLock(&(Queue->Lock));
Tail = Queue->Tail;
Queue->Tail++;
Queue->Num++;
if (Queue->Tail == MAX_LEN_OF_MLME_QUEUE)
{
Queue->Tail = 0;
}
Queue->Entry[Tail].Wcid = RESERVED_WCID;
Queue->Entry[Tail].Occupied = TRUE;
Queue->Entry[Tail].Machine = Machine;
Queue->Entry[Tail].MsgType = MsgType;
Queue->Entry[Tail].MsgLen = MsgLen;
if (Msg != NULL)
{
NdisMoveMemory(Queue->Entry[Tail].Msg, Msg, MsgLen);
}
NdisReleaseSpinLock(&(Queue->Lock));
return TRUE;
}
/*! \brief This function is used when Recv gets a MLME message
* \param *Queue The MLME Queue
* \param TimeStampHigh The upper 32 bit of timestamp
* \param TimeStampLow The lower 32 bit of timestamp
* \param Rssi The receiving RSSI strength
* \param MsgLen The length of the message
* \param *Msg The message pointer
* \return TRUE if everything ok, FALSE otherwise (like Queue Full)
* \pre
* \post
IRQL = DISPATCH_LEVEL
*/
BOOLEAN MlmeEnqueueForRecv(
IN PRTMP_ADAPTER pAd,
IN ULONG Wcid,
IN ULONG TimeStampHigh,
IN ULONG TimeStampLow,
IN UCHAR Rssi0,
IN UCHAR Rssi1,
IN UCHAR Rssi2,
IN ULONG MsgLen,
IN VOID *Msg,
IN UCHAR Signal)
{
INT Tail, Machine;
PFRAME_802_11 pFrame = (PFRAME_802_11)Msg;
INT MsgType;
MLME_QUEUE *Queue = (MLME_QUEUE *)&pAd->Mlme.Queue;
// Do nothing if the driver is starting halt state.
// This might happen when timer already been fired before cancel timer with mlmehalt
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS | fRTMP_ADAPTER_NIC_NOT_EXIST))
{
DBGPRINT_ERR(("MlmeEnqueueForRecv: fRTMP_ADAPTER_HALT_IN_PROGRESS\n"));
return FALSE;
}
// First check the size, it MUST not exceed the mlme queue size
if (MsgLen > MGMT_DMA_BUFFER_SIZE)
{
DBGPRINT_ERR(("MlmeEnqueueForRecv: frame too large, size = %ld \n", MsgLen));
return FALSE;
}
if (MlmeQueueFull(Queue))
{
return FALSE;
}
{
if (!MsgTypeSubst(pAd, pFrame, &Machine, &MsgType))
{
DBGPRINT_ERR(("MlmeEnqueueForRecv: un-recongnized mgmt->subtype=%d\n",pFrame->Hdr.FC.SubType));
return FALSE;
}
}
// OK, we got all the informations, it is time to put things into queue
NdisAcquireSpinLock(&(Queue->Lock));
Tail = Queue->Tail;
Queue->Tail++;
Queue->Num++;
if (Queue->Tail == MAX_LEN_OF_MLME_QUEUE)
{
Queue->Tail = 0;
}
Queue->Entry[Tail].Occupied = TRUE;
Queue->Entry[Tail].Machine = Machine;
Queue->Entry[Tail].MsgType = MsgType;
Queue->Entry[Tail].MsgLen = MsgLen;
Queue->Entry[Tail].TimeStamp.u.LowPart = TimeStampLow;
Queue->Entry[Tail].TimeStamp.u.HighPart = TimeStampHigh;
Queue->Entry[Tail].Rssi0 = Rssi0;
Queue->Entry[Tail].Rssi1 = Rssi1;
Queue->Entry[Tail].Rssi2 = Rssi2;
Queue->Entry[Tail].Signal = Signal;
Queue->Entry[Tail].Wcid = (UCHAR)Wcid;
Queue->Entry[Tail].Channel = pAd->LatchRfRegs.Channel;
if (Msg != NULL)
{
NdisMoveMemory(Queue->Entry[Tail].Msg, Msg, MsgLen);
}
NdisReleaseSpinLock(&(Queue->Lock));
RT28XX_MLME_HANDLER(pAd);
return TRUE;
}
/*! \brief Dequeue a message from the MLME Queue
* \param *Queue The MLME Queue
* \param *Elem The message dequeued from MLME Queue
* \return TRUE if the Elem contains something, FALSE otherwise
* \pre
* \post
IRQL = DISPATCH_LEVEL
*/
BOOLEAN MlmeDequeue(
IN MLME_QUEUE *Queue,
OUT MLME_QUEUE_ELEM **Elem)
{
NdisAcquireSpinLock(&(Queue->Lock));
*Elem = &(Queue->Entry[Queue->Head]);
Queue->Num--;
Queue->Head++;
if (Queue->Head == MAX_LEN_OF_MLME_QUEUE)
{
Queue->Head = 0;
}
NdisReleaseSpinLock(&(Queue->Lock));
return TRUE;
}
// IRQL = DISPATCH_LEVEL
VOID MlmeRestartStateMachine(
IN PRTMP_ADAPTER pAd)
{
#ifdef RT2860
MLME_QUEUE_ELEM *Elem = NULL;
#endif
BOOLEAN Cancelled;
DBGPRINT(RT_DEBUG_TRACE, ("MlmeRestartStateMachine \n"));
#ifdef RT2860
NdisAcquireSpinLock(&pAd->Mlme.TaskLock);
if(pAd->Mlme.bRunning)
{
NdisReleaseSpinLock(&pAd->Mlme.TaskLock);
return;
}
else
{
pAd->Mlme.bRunning = TRUE;
}
NdisReleaseSpinLock(&pAd->Mlme.TaskLock);
// Remove all Mlme queues elements
while (!MlmeQueueEmpty(&pAd->Mlme.Queue))
{
//From message type, determine which state machine I should drive
if (MlmeDequeue(&pAd->Mlme.Queue, &Elem))
{
// free MLME element
Elem->Occupied = FALSE;
Elem->MsgLen = 0;
}
else {
DBGPRINT_ERR(("MlmeRestartStateMachine: MlmeQueue empty\n"));
}
}
#endif /* RT2860 */
{
// Cancel all timer events
// Be careful to cancel new added timer
RTMPCancelTimer(&pAd->MlmeAux.AssocTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.ReassocTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.DisassocTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.AuthTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.BeaconTimer, &Cancelled);
RTMPCancelTimer(&pAd->MlmeAux.ScanTimer, &Cancelled);
}
// Change back to original channel in case of doing scan
AsicSwitchChannel(pAd, pAd->CommonCfg.Channel, FALSE);
AsicLockChannel(pAd, pAd->CommonCfg.Channel);
// Resume MSDU which is turned off durning scan
RTMPResumeMsduTransmission(pAd);
{
// Set all state machines back IDLE
pAd->Mlme.CntlMachine.CurrState = CNTL_IDLE;
pAd->Mlme.AssocMachine.CurrState = ASSOC_IDLE;
pAd->Mlme.AuthMachine.CurrState = AUTH_REQ_IDLE;
pAd->Mlme.AuthRspMachine.CurrState = AUTH_RSP_IDLE;
pAd->Mlme.SyncMachine.CurrState = SYNC_IDLE;
pAd->Mlme.ActMachine.CurrState = ACT_IDLE;
}
#ifdef RT2860
// Remove running state
NdisAcquireSpinLock(&pAd->Mlme.TaskLock);
pAd->Mlme.bRunning = FALSE;
NdisReleaseSpinLock(&pAd->Mlme.TaskLock);
#endif
}
/*! \brief test if the MLME Queue is empty
* \param *Queue The MLME Queue
* \return TRUE if the Queue is empty, FALSE otherwise
* \pre
* \post
IRQL = DISPATCH_LEVEL
*/
BOOLEAN MlmeQueueEmpty(
IN MLME_QUEUE *Queue)
{
BOOLEAN Ans;
NdisAcquireSpinLock(&(Queue->Lock));
Ans = (Queue->Num == 0);
NdisReleaseSpinLock(&(Queue->Lock));
return Ans;
}
/*! \brief test if the MLME Queue is full
* \param *Queue The MLME Queue
* \return TRUE if the Queue is empty, FALSE otherwise
* \pre
* \post
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
*/
BOOLEAN MlmeQueueFull(
IN MLME_QUEUE *Queue)
{
BOOLEAN Ans;
NdisAcquireSpinLock(&(Queue->Lock));
Ans = (Queue->Num == MAX_LEN_OF_MLME_QUEUE || Queue->Entry[Queue->Tail].Occupied);
NdisReleaseSpinLock(&(Queue->Lock));
return Ans;
}
/*! \brief The destructor of MLME Queue
* \param
* \return
* \pre
* \post
* \note Clear Mlme Queue, Set Queue->Num to Zero.
IRQL = PASSIVE_LEVEL
*/
VOID MlmeQueueDestroy(
IN MLME_QUEUE *pQueue)
{
NdisAcquireSpinLock(&(pQueue->Lock));
pQueue->Num = 0;
pQueue->Head = 0;
pQueue->Tail = 0;
NdisReleaseSpinLock(&(pQueue->Lock));
NdisFreeSpinLock(&(pQueue->Lock));
}
/*! \brief To substitute the message type if the message is coming from external
* \param pFrame The frame received
* \param *Machine The state machine
* \param *MsgType the message type for the state machine
* \return TRUE if the substitution is successful, FALSE otherwise
* \pre
* \post
IRQL = DISPATCH_LEVEL
*/
BOOLEAN MsgTypeSubst(
IN PRTMP_ADAPTER pAd,
IN PFRAME_802_11 pFrame,
OUT INT *Machine,
OUT INT *MsgType)
{
USHORT Seq;
UCHAR EAPType;
PUCHAR pData;
// Pointer to start of data frames including SNAP header
pData = (PUCHAR) pFrame + LENGTH_802_11;
// The only data type will pass to this function is EAPOL frame
if (pFrame->Hdr.FC.Type == BTYPE_DATA)
{
if (NdisEqualMemory(SNAP_AIRONET, pData, LENGTH_802_1_H))
{
// Cisco Aironet SNAP header
*Machine = AIRONET_STATE_MACHINE;
*MsgType = MT2_AIRONET_MSG;
return (TRUE);
}
{
*Machine = WPA_PSK_STATE_MACHINE;
EAPType = *((UCHAR*)pFrame + LENGTH_802_11 + LENGTH_802_1_H + 1);
return(WpaMsgTypeSubst(EAPType, MsgType));
}
}
switch (pFrame->Hdr.FC.SubType)
{
case SUBTYPE_ASSOC_REQ:
*Machine = ASSOC_STATE_MACHINE;
*MsgType = MT2_PEER_ASSOC_REQ;
break;
case SUBTYPE_ASSOC_RSP:
*Machine = ASSOC_STATE_MACHINE;
*MsgType = MT2_PEER_ASSOC_RSP;
break;
case SUBTYPE_REASSOC_REQ:
*Machine = ASSOC_STATE_MACHINE;
*MsgType = MT2_PEER_REASSOC_REQ;
break;
case SUBTYPE_REASSOC_RSP:
*Machine = ASSOC_STATE_MACHINE;
*MsgType = MT2_PEER_REASSOC_RSP;
break;
case SUBTYPE_PROBE_REQ:
*Machine = SYNC_STATE_MACHINE;
*MsgType = MT2_PEER_PROBE_REQ;
break;
case SUBTYPE_PROBE_RSP:
*Machine = SYNC_STATE_MACHINE;
*MsgType = MT2_PEER_PROBE_RSP;
break;
case SUBTYPE_BEACON:
*Machine = SYNC_STATE_MACHINE;
*MsgType = MT2_PEER_BEACON;
break;
case SUBTYPE_ATIM:
*Machine = SYNC_STATE_MACHINE;
*MsgType = MT2_PEER_ATIM;
break;
case SUBTYPE_DISASSOC:
*Machine = ASSOC_STATE_MACHINE;
*MsgType = MT2_PEER_DISASSOC_REQ;
break;
case SUBTYPE_AUTH:
// get the sequence number from payload 24 Mac Header + 2 bytes algorithm
NdisMoveMemory(&Seq, &pFrame->Octet[2], sizeof(USHORT));
if (Seq == 1 || Seq == 3)
{
*Machine = AUTH_RSP_STATE_MACHINE;
*MsgType = MT2_PEER_AUTH_ODD;
}
else if (Seq == 2 || Seq == 4)
{
*Machine = AUTH_STATE_MACHINE;
*MsgType = MT2_PEER_AUTH_EVEN;
}
else
{
return FALSE;
}
break;
case SUBTYPE_DEAUTH:
*Machine = AUTH_RSP_STATE_MACHINE;
*MsgType = MT2_PEER_DEAUTH;
break;
case SUBTYPE_ACTION:
*Machine = ACTION_STATE_MACHINE;
// Sometimes Sta will return with category bytes with MSB = 1, if they receive catogory out of their support
if ((pFrame->Octet[0]&0x7F) > MAX_PEER_CATE_MSG)
{
*MsgType = MT2_ACT_INVALID;
}
else
{
*MsgType = (pFrame->Octet[0]&0x7F);
}
break;
default:
return FALSE;
break;
}
return TRUE;
}
// ===========================================================================================
// state_machine.c
// ===========================================================================================
/*! \brief Initialize the state machine.
* \param *S pointer to the state machine
* \param Trans State machine transition function
* \param StNr number of states
* \param MsgNr number of messages
* \param DefFunc default function, when there is invalid state/message combination
* \param InitState initial state of the state machine
* \param Base StateMachine base, internal use only
* \pre p_sm should be a legal pointer
* \post
IRQL = PASSIVE_LEVEL
*/
VOID StateMachineInit(
IN STATE_MACHINE *S,
IN STATE_MACHINE_FUNC Trans[],
IN ULONG StNr,
IN ULONG MsgNr,
IN STATE_MACHINE_FUNC DefFunc,
IN ULONG InitState,
IN ULONG Base)
{
ULONG i, j;
// set number of states and messages
S->NrState = StNr;
S->NrMsg = MsgNr;
S->Base = Base;
S->TransFunc = Trans;
// init all state transition to default function
for (i = 0; i < StNr; i++)
{
for (j = 0; j < MsgNr; j++)
{
S->TransFunc[i * MsgNr + j] = DefFunc;
}
}
// set the starting state
S->CurrState = InitState;
}
/*! \brief This function fills in the function pointer into the cell in the state machine
* \param *S pointer to the state machine
* \param St state
* \param Msg incoming message
* \param f the function to be executed when (state, message) combination occurs at the state machine
* \pre *S should be a legal pointer to the state machine, st, msg, should be all within the range, Base should be set in the initial state
* \post
IRQL = PASSIVE_LEVEL
*/
VOID StateMachineSetAction(
IN STATE_MACHINE *S,
IN ULONG St,
IN ULONG Msg,
IN STATE_MACHINE_FUNC Func)
{
ULONG MsgIdx;
MsgIdx = Msg - S->Base;
if (St < S->NrState && MsgIdx < S->NrMsg)
{
// boundary checking before setting the action
S->TransFunc[St * S->NrMsg + MsgIdx] = Func;
}
}
/*! \brief This function does the state transition
* \param *Adapter the NIC adapter pointer
* \param *S the state machine
* \param *Elem the message to be executed
* \return None
IRQL = DISPATCH_LEVEL
*/
VOID StateMachinePerformAction(
IN PRTMP_ADAPTER pAd,
IN STATE_MACHINE *S,
IN MLME_QUEUE_ELEM *Elem)
{
(*(S->TransFunc[S->CurrState * S->NrMsg + Elem->MsgType - S->Base]))(pAd, Elem);
}
/*
==========================================================================
Description:
The drop function, when machine executes this, the message is simply
ignored. This function does nothing, the message is freed in
StateMachinePerformAction()
==========================================================================
*/
VOID Drop(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
}
// ===========================================================================================
// lfsr.c
// ===========================================================================================
/*
==========================================================================
Description:
IRQL = PASSIVE_LEVEL
==========================================================================
*/
VOID LfsrInit(
IN PRTMP_ADAPTER pAd,
IN ULONG Seed)
{
if (Seed == 0)
pAd->Mlme.ShiftReg = 1;
else
pAd->Mlme.ShiftReg = Seed;
}
/*
==========================================================================
Description:
==========================================================================
*/
UCHAR RandomByte(
IN PRTMP_ADAPTER pAd)
{
ULONG i;
UCHAR R, Result;
R = 0;
if (pAd->Mlme.ShiftReg == 0)
NdisGetSystemUpTime((ULONG *)&pAd->Mlme.ShiftReg);
for (i = 0; i < 8; i++)
{
if (pAd->Mlme.ShiftReg & 0x00000001)
{
pAd->Mlme.ShiftReg = ((pAd->Mlme.ShiftReg ^ LFSR_MASK) >> 1) | 0x80000000;
Result = 1;
}
else
{
pAd->Mlme.ShiftReg = pAd->Mlme.ShiftReg >> 1;
Result = 0;
}
R = (R << 1) | Result;
}
return R;
}
VOID AsicUpdateAutoFallBackTable(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pRateTable)
{
UCHAR i;
HT_FBK_CFG0_STRUC HtCfg0;
HT_FBK_CFG1_STRUC HtCfg1;
LG_FBK_CFG0_STRUC LgCfg0;
LG_FBK_CFG1_STRUC LgCfg1;
PRTMP_TX_RATE_SWITCH pCurrTxRate, pNextTxRate;
// set to initial value
HtCfg0.word = 0x65432100;
HtCfg1.word = 0xedcba988;
LgCfg0.word = 0xedcba988;
LgCfg1.word = 0x00002100;
pNextTxRate = (PRTMP_TX_RATE_SWITCH)pRateTable+1;
for (i = 1; i < *((PUCHAR) pRateTable); i++)
{
pCurrTxRate = (PRTMP_TX_RATE_SWITCH)pRateTable+1+i;
switch (pCurrTxRate->Mode)
{
case 0: //CCK
break;
case 1: //OFDM
{
switch(pCurrTxRate->CurrMCS)
{
case 0:
LgCfg0.field.OFDMMCS0FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 1:
LgCfg0.field.OFDMMCS1FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 2:
LgCfg0.field.OFDMMCS2FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 3:
LgCfg0.field.OFDMMCS3FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 4:
LgCfg0.field.OFDMMCS4FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 5:
LgCfg0.field.OFDMMCS5FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 6:
LgCfg0.field.OFDMMCS6FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
case 7:
LgCfg0.field.OFDMMCS7FBK = (pNextTxRate->Mode == MODE_OFDM) ? (pNextTxRate->CurrMCS+8): pNextTxRate->CurrMCS;
break;
}
}
break;
case 2: //HT-MIX
case 3: //HT-GF
{
if ((pNextTxRate->Mode >= MODE_HTMIX) && (pCurrTxRate->CurrMCS != pNextTxRate->CurrMCS))
{
switch(pCurrTxRate->CurrMCS)
{
case 0:
HtCfg0.field.HTMCS0FBK = pNextTxRate->CurrMCS;
break;
case 1:
HtCfg0.field.HTMCS1FBK = pNextTxRate->CurrMCS;
break;
case 2:
HtCfg0.field.HTMCS2FBK = pNextTxRate->CurrMCS;
break;
case 3:
HtCfg0.field.HTMCS3FBK = pNextTxRate->CurrMCS;
break;
case 4:
HtCfg0.field.HTMCS4FBK = pNextTxRate->CurrMCS;
break;
case 5:
HtCfg0.field.HTMCS5FBK = pNextTxRate->CurrMCS;
break;
case 6:
HtCfg0.field.HTMCS6FBK = pNextTxRate->CurrMCS;
break;
case 7:
HtCfg0.field.HTMCS7FBK = pNextTxRate->CurrMCS;
break;
case 8:
HtCfg1.field.HTMCS8FBK = pNextTxRate->CurrMCS;
break;
case 9:
HtCfg1.field.HTMCS9FBK = pNextTxRate->CurrMCS;
break;
case 10:
HtCfg1.field.HTMCS10FBK = pNextTxRate->CurrMCS;
break;
case 11:
HtCfg1.field.HTMCS11FBK = pNextTxRate->CurrMCS;
break;
case 12:
HtCfg1.field.HTMCS12FBK = pNextTxRate->CurrMCS;
break;
case 13:
HtCfg1.field.HTMCS13FBK = pNextTxRate->CurrMCS;
break;
case 14:
HtCfg1.field.HTMCS14FBK = pNextTxRate->CurrMCS;
break;
case 15:
HtCfg1.field.HTMCS15FBK = pNextTxRate->CurrMCS;
break;
default:
DBGPRINT(RT_DEBUG_ERROR, ("AsicUpdateAutoFallBackTable: not support CurrMCS=%d\n", pCurrTxRate->CurrMCS));
}
}
}
break;
}
pNextTxRate = pCurrTxRate;
}
RTMP_IO_WRITE32(pAd, HT_FBK_CFG0, HtCfg0.word);
RTMP_IO_WRITE32(pAd, HT_FBK_CFG1, HtCfg1.word);
RTMP_IO_WRITE32(pAd, LG_FBK_CFG0, LgCfg0.word);
RTMP_IO_WRITE32(pAd, LG_FBK_CFG1, LgCfg1.word);
}
/*
========================================================================
Routine Description:
Set MAC register value according operation mode.
OperationMode AND bNonGFExist are for MM and GF Proteciton.
If MM or GF mask is not set, those passing argument doesn't not take effect.
Operation mode meaning:
= 0 : Pure HT, no preotection.
= 0x01; there may be non-HT devices in both the control and extension channel, protection is optional in BSS.
= 0x10: No Transmission in 40M is protected.
= 0x11: Transmission in both 40M and 20M shall be protected
if (bNonGFExist)
we should choose not to use GF. But still set correct ASIC registers.
========================================================================
*/
VOID AsicUpdateProtect(
IN PRTMP_ADAPTER pAd,
IN USHORT OperationMode,
IN UCHAR SetMask,
IN BOOLEAN bDisableBGProtect,
IN BOOLEAN bNonGFExist)
{
PROT_CFG_STRUC ProtCfg, ProtCfg4;
UINT32 Protect[6];
USHORT offset;
UCHAR i;
UINT32 MacReg = 0;
if (!(pAd->CommonCfg.bHTProtect) && (OperationMode != 8))
{
return;
}
if (pAd->BATable.numAsOriginator)
{
//
// enable the RTS/CTS to avoid channel collision
//
SetMask = ALLN_SETPROTECT;
OperationMode = 8;
}
// Config ASIC RTS threshold register
RTMP_IO_READ32(pAd, TX_RTS_CFG, &MacReg);
MacReg &= 0xFF0000FF;
// If the user want disable RtsThreshold and enable Amsdu/Ralink-Aggregation, set the RtsThreshold as 4096
if ((
(pAd->CommonCfg.BACapability.field.AmsduEnable) ||
(pAd->CommonCfg.bAggregationCapable == TRUE))
&& pAd->CommonCfg.RtsThreshold == MAX_RTS_THRESHOLD)
{
MacReg |= (0x1000 << 8);
}
else
{
MacReg |= (pAd->CommonCfg.RtsThreshold << 8);
}
RTMP_IO_WRITE32(pAd, TX_RTS_CFG, MacReg);
// Initial common protection settings
RTMPZeroMemory(Protect, sizeof(Protect));
ProtCfg4.word = 0;
ProtCfg.word = 0;
ProtCfg.field.TxopAllowGF40 = 1;
ProtCfg.field.TxopAllowGF20 = 1;
ProtCfg.field.TxopAllowMM40 = 1;
ProtCfg.field.TxopAllowMM20 = 1;
ProtCfg.field.TxopAllowOfdm = 1;
ProtCfg.field.TxopAllowCck = 1;
ProtCfg.field.RTSThEn = 1;
ProtCfg.field.ProtectNav = ASIC_SHORTNAV;
// update PHY mode and rate
if (pAd->CommonCfg.Channel > 14)
ProtCfg.field.ProtectRate = 0x4000;
ProtCfg.field.ProtectRate |= pAd->CommonCfg.RtsRate;
// Handle legacy(B/G) protection
if (bDisableBGProtect)
{
//ProtCfg.field.ProtectRate = pAd->CommonCfg.RtsRate;
ProtCfg.field.ProtectCtrl = 0;
Protect[0] = ProtCfg.word;
Protect[1] = ProtCfg.word;
}
else
{
//ProtCfg.field.ProtectRate = pAd->CommonCfg.RtsRate;
ProtCfg.field.ProtectCtrl = 0; // CCK do not need to be protected
Protect[0] = ProtCfg.word;
ProtCfg.field.ProtectCtrl = ASIC_CTS; // OFDM needs using CCK to protect
Protect[1] = ProtCfg.word;
}
// Decide HT frame protection.
if ((SetMask & ALLN_SETPROTECT) != 0)
{
switch(OperationMode)
{
case 0x0:
// NO PROTECT
// 1.All STAs in the BSS are 20/40 MHz HT
// 2. in ai 20/40MHz BSS
// 3. all STAs are 20MHz in a 20MHz BSS
// Pure HT. no protection.
// MM20_PROT_CFG
// Reserved (31:27)
// PROT_TXOP(25:20) -- 010111
// PROT_NAV(19:18) -- 01 (Short NAV protection)
// PROT_CTRL(17:16) -- 00 (None)
// PROT_RATE(15:0) -- 0x4004 (OFDM 24M)
Protect[2] = 0x01744004;
// MM40_PROT_CFG
// Reserved (31:27)
// PROT_TXOP(25:20) -- 111111
// PROT_NAV(19:18) -- 01 (Short NAV protection)
// PROT_CTRL(17:16) -- 00 (None)
// PROT_RATE(15:0) -- 0x4084 (duplicate OFDM 24M)
Protect[3] = 0x03f44084;
// CF20_PROT_CFG
// Reserved (31:27)
// PROT_TXOP(25:20) -- 010111
// PROT_NAV(19:18) -- 01 (Short NAV protection)
// PROT_CTRL(17:16) -- 00 (None)
// PROT_RATE(15:0) -- 0x4004 (OFDM 24M)
Protect[4] = 0x01744004;
// CF40_PROT_CFG
// Reserved (31:27)
// PROT_TXOP(25:20) -- 111111
// PROT_NAV(19:18) -- 01 (Short NAV protection)
// PROT_CTRL(17:16) -- 00 (None)
// PROT_RATE(15:0) -- 0x4084 (duplicate OFDM 24M)
Protect[5] = 0x03f44084;
if (bNonGFExist)
{
// PROT_NAV(19:18) -- 01 (Short NAV protectiion)
// PROT_CTRL(17:16) -- 01 (RTS/CTS)
Protect[4] = 0x01754004;
Protect[5] = 0x03f54084;
}
pAd->CommonCfg.IOTestParm.bRTSLongProtOn = FALSE;
break;
case 1:
// This is "HT non-member protection mode."
// If there may be non-HT STAs my BSS
ProtCfg.word = 0x01744004; // PROT_CTRL(17:16) : 0 (None)
ProtCfg4.word = 0x03f44084; // duplicaet legacy 24M. BW set 1.
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_BG_PROTECTION_INUSED))
{
ProtCfg.word = 0x01740003; //ERP use Protection bit is set, use protection rate at Clause 18..
ProtCfg4.word = 0x03f40003; // Don't duplicate RTS/CTS in CCK mode. 0x03f40083;
}
//Assign Protection method for 20&40 MHz packets
ProtCfg.field.ProtectCtrl = ASIC_RTS;
ProtCfg.field.ProtectNav = ASIC_SHORTNAV;
ProtCfg4.field.ProtectCtrl = ASIC_RTS;
ProtCfg4.field.ProtectNav = ASIC_SHORTNAV;
Protect[2] = ProtCfg.word;
Protect[3] = ProtCfg4.word;
Protect[4] = ProtCfg.word;
Protect[5] = ProtCfg4.word;
pAd->CommonCfg.IOTestParm.bRTSLongProtOn = TRUE;
break;
case 2:
// If only HT STAs are in BSS. at least one is 20MHz. Only protect 40MHz packets
ProtCfg.word = 0x01744004; // PROT_CTRL(17:16) : 0 (None)
ProtCfg4.word = 0x03f44084; // duplicaet legacy 24M. BW set 1.
//Assign Protection method for 40MHz packets
ProtCfg4.field.ProtectCtrl = ASIC_RTS;
ProtCfg4.field.ProtectNav = ASIC_SHORTNAV;
Protect[2] = ProtCfg.word;
Protect[3] = ProtCfg4.word;
if (bNonGFExist)
{
ProtCfg.field.ProtectCtrl = ASIC_RTS;
ProtCfg.field.ProtectNav = ASIC_SHORTNAV;
}
Protect[4] = ProtCfg.word;
Protect[5] = ProtCfg4.word;
pAd->CommonCfg.IOTestParm.bRTSLongProtOn = FALSE;
break;
case 3:
// HT mixed mode. PROTECT ALL!
// Assign Rate
ProtCfg.word = 0x01744004; //duplicaet legacy 24M. BW set 1.
ProtCfg4.word = 0x03f44084;
// both 20MHz and 40MHz are protected. Whether use RTS or CTS-to-self depends on the
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_BG_PROTECTION_INUSED))
{
ProtCfg.word = 0x01740003; //ERP use Protection bit is set, use protection rate at Clause 18..
ProtCfg4.word = 0x03f40003; // Don't duplicate RTS/CTS in CCK mode. 0x03f40083
}
//Assign Protection method for 20&40 MHz packets
ProtCfg.field.ProtectCtrl = ASIC_RTS;
ProtCfg.field.ProtectNav = ASIC_SHORTNAV;
ProtCfg4.field.ProtectCtrl = ASIC_RTS;
ProtCfg4.field.ProtectNav = ASIC_SHORTNAV;
Protect[2] = ProtCfg.word;
Protect[3] = ProtCfg4.word;
Protect[4] = ProtCfg.word;
Protect[5] = ProtCfg4.word;
pAd->CommonCfg.IOTestParm.bRTSLongProtOn = TRUE;
break;
case 8:
// Special on for Atheros problem n chip.
Protect[2] = 0x01754004;
Protect[3] = 0x03f54084;
Protect[4] = 0x01754004;
Protect[5] = 0x03f54084;
pAd->CommonCfg.IOTestParm.bRTSLongProtOn = TRUE;
break;
}
}
offset = CCK_PROT_CFG;
for (i = 0;i < 6;i++)
{
if ((SetMask & (1<< i)))
{
RTMP_IO_WRITE32(pAd, offset + i*4, Protect[i]);
}
}
}
#ifdef RT2870
/*
==========================================================================
Description:
Load RF normal operation-mode setup
==========================================================================
*/
VOID RT30xxLoadRFNormalModeSetup(
IN PRTMP_ADAPTER pAd)
{
UCHAR RFValue;
// RX0_PD & TX0_PD, RF R1 register Bit 2 & Bit 3 to 0 and RF_BLOCK_en,RX1_PD & TX1_PD, Bit0, Bit 4 & Bit5 to 1
RT30xxReadRFRegister(pAd, RF_R01, &RFValue);
RFValue = (RFValue & (~0x0C)) | 0x31;
RT30xxWriteRFRegister(pAd, RF_R01, RFValue);
// TX_LO2_en, RF R15 register Bit 3 to 0
RT30xxReadRFRegister(pAd, RF_R15, &RFValue);
RFValue &= (~0x08);
RT30xxWriteRFRegister(pAd, RF_R15, RFValue);
// TX_LO1_en, RF R17 register Bit 3 to 0
RT30xxReadRFRegister(pAd, RF_R17, &RFValue);
RFValue &= (~0x08);
// to fix rx long range issue
if (((pAd->MACVersion & 0xffff) >= 0x0211) && (pAd->NicConfig2.field.ExternalLNAForG == 0))
{
RFValue |= 0x20;
}
RT30xxWriteRFRegister(pAd, RF_R17, RFValue);
// RX_LO1_en, RF R20 register Bit 3 to 0
RT30xxReadRFRegister(pAd, RF_R20, &RFValue);
RFValue &= (~0x08);
RT30xxWriteRFRegister(pAd, RF_R20, RFValue);
// RX_LO2_en, RF R21 register Bit 3 to 0
RT30xxReadRFRegister(pAd, RF_R21, &RFValue);
RFValue &= (~0x08);
RT30xxWriteRFRegister(pAd, RF_R21, RFValue);
// LDORF_VC, RF R27 register Bit 2 to 0
RT30xxReadRFRegister(pAd, RF_R27, &RFValue);
if ((pAd->MACVersion & 0xffff) < 0x0211)
RFValue = (RFValue & (~0x77)) | 0x3;
else
RFValue = (RFValue & (~0x77));
RT30xxWriteRFRegister(pAd, RF_R27, RFValue);
/* end johnli */
}
/*
==========================================================================
Description:
Load RF sleep-mode setup
==========================================================================
*/
VOID RT30xxLoadRFSleepModeSetup(
IN PRTMP_ADAPTER pAd)
{
UCHAR RFValue;
UINT32 MACValue;
// RF_BLOCK_en. RF R1 register Bit 0 to 0
RT30xxReadRFRegister(pAd, RF_R01, &RFValue);
RFValue &= (~0x01);
RT30xxWriteRFRegister(pAd, RF_R01, RFValue);
// VCO_IC, RF R7 register Bit 4 & Bit 5 to 0
RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
RFValue &= (~0x30);
RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
// Idoh, RF R9 register Bit 1, Bit 2 & Bit 3 to 0
RT30xxReadRFRegister(pAd, RF_R09, &RFValue);
RFValue &= (~0x0E);
RT30xxWriteRFRegister(pAd, RF_R09, RFValue);
// RX_CTB_en, RF R21 register Bit 7 to 0
RT30xxReadRFRegister(pAd, RF_R21, &RFValue);
RFValue &= (~0x80);
RT30xxWriteRFRegister(pAd, RF_R21, RFValue);
// LDORF_VC, RF R27 register Bit 0, Bit 1 & Bit 2 to 1
RT30xxReadRFRegister(pAd, RF_R27, &RFValue);
RFValue |= 0x77;
RT30xxWriteRFRegister(pAd, RF_R27, RFValue);
RTMP_IO_READ32(pAd, LDO_CFG0, &MACValue);
MACValue |= 0x1D000000;
RTMP_IO_WRITE32(pAd, LDO_CFG0, MACValue);
}
/*
==========================================================================
Description:
Reverse RF sleep-mode setup
==========================================================================
*/
VOID RT30xxReverseRFSleepModeSetup(
IN PRTMP_ADAPTER pAd)
{
UCHAR RFValue;
UINT32 MACValue;
// RF_BLOCK_en, RF R1 register Bit 0 to 1
RT30xxReadRFRegister(pAd, RF_R01, &RFValue);
RFValue |= 0x01;
RT30xxWriteRFRegister(pAd, RF_R01, RFValue);
// VCO_IC, RF R7 register Bit 4 & Bit 5 to 1
RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
RFValue |= 0x30;
RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
// Idoh, RF R9 register Bit 1, Bit 2 & Bit 3 to 1
RT30xxReadRFRegister(pAd, RF_R09, &RFValue);
RFValue |= 0x0E;
RT30xxWriteRFRegister(pAd, RF_R09, RFValue);
// RX_CTB_en, RF R21 register Bit 7 to 1
RT30xxReadRFRegister(pAd, RF_R21, &RFValue);
RFValue |= 0x80;
RT30xxWriteRFRegister(pAd, RF_R21, RFValue);
// LDORF_VC, RF R27 register Bit 2 to 0
RT30xxReadRFRegister(pAd, RF_R27, &RFValue);
if ((pAd->MACVersion & 0xffff) < 0x0211)
RFValue = (RFValue & (~0x77)) | 0x3;
else
RFValue = (RFValue & (~0x77));
RT30xxWriteRFRegister(pAd, RF_R27, RFValue);
// RT3071 version E has fixed this issue
if ((pAd->NicConfig2.field.DACTestBit == 1) && ((pAd->MACVersion & 0xffff) < 0x0211))
{
// patch tx EVM issue temporarily
RTMP_IO_READ32(pAd, LDO_CFG0, &MACValue);
MACValue = ((MACValue & 0xE0FFFFFF) | 0x0D000000);
RTMP_IO_WRITE32(pAd, LDO_CFG0, MACValue);
}
else
{
RTMP_IO_READ32(pAd, LDO_CFG0, &MACValue);
MACValue = ((MACValue & 0xE0FFFFFF) | 0x01000000);
RTMP_IO_WRITE32(pAd, LDO_CFG0, MACValue);
}
}
#endif
/*
==========================================================================
Description:
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicSwitchChannel(
IN PRTMP_ADAPTER pAd,
IN UCHAR Channel,
IN BOOLEAN bScan)
{
ULONG R2 = 0, R3 = DEFAULT_RF_TX_POWER, R4 = 0;
CHAR TxPwer = 0, TxPwer2 = DEFAULT_RF_TX_POWER; //Bbp94 = BBPR94_DEFAULT, TxPwer2 = DEFAULT_RF_TX_POWER;
UCHAR index;
UINT32 Value = 0; //BbpReg, Value;
RTMP_RF_REGS *RFRegTable;
// Search Tx power value
// We can't use ChannelList to search channel, since some central channl's txpowr doesn't list
// in ChannelList, so use TxPower array instead.
//
for (index = 0; index < MAX_NUM_OF_CHANNELS; index++)
{
if (Channel == pAd->TxPower[index].Channel)
{
TxPwer = pAd->TxPower[index].Power;
TxPwer2 = pAd->TxPower[index].Power2;
break;
}
}
if (index == MAX_NUM_OF_CHANNELS)
DBGPRINT(RT_DEBUG_ERROR, ("AsicSwitchChannel: Can't find the Channel#%d \n", Channel));
#ifdef RT2870
// The RF programming sequence is difference between 3xxx and 2xxx
if ((IS_RT3070(pAd) || IS_RT3090(pAd)) && (
(pAd->RfIcType == RFIC_3022) || (pAd->RfIcType == RFIC_3021) ||
(pAd->RfIcType == RFIC_3020) || (pAd->RfIcType == RFIC_2020)))
{
/* modify by WY for Read RF Reg. error */
UCHAR RFValue;
for (index = 0; index < NUM_OF_3020_CHNL; index++)
{
if (Channel == FreqItems3020[index].Channel)
{
// Programming channel parameters
RT30xxWriteRFRegister(pAd, RF_R02, FreqItems3020[index].N);
RT30xxWriteRFRegister(pAd, RF_R03, FreqItems3020[index].K);
RT30xxReadRFRegister(pAd, RF_R06, &RFValue);
RFValue = (RFValue & 0xFC) | FreqItems3020[index].R;
RT30xxWriteRFRegister(pAd, RF_R06, RFValue);
// Set Tx0 Power
RT30xxReadRFRegister(pAd, RF_R12, &RFValue);
RFValue = (RFValue & 0xE0) | TxPwer;
RT30xxWriteRFRegister(pAd, RF_R12, RFValue);
// Set Tx1 Power
RT30xxReadRFRegister(pAd, RF_R13, &RFValue);
RFValue = (RFValue & 0xE0) | TxPwer2;
RT30xxWriteRFRegister(pAd, RF_R13, RFValue);
// Tx/Rx Stream setting
RT30xxReadRFRegister(pAd, RF_R01, &RFValue);
//if (IS_RT3090(pAd))
// RFValue |= 0x01; // Enable RF block.
RFValue &= 0x03; //clear bit[7~2]
if (pAd->Antenna.field.TxPath == 1)
RFValue |= 0xA0;
else if (pAd->Antenna.field.TxPath == 2)
RFValue |= 0x80;
if (pAd->Antenna.field.RxPath == 1)
RFValue |= 0x50;
else if (pAd->Antenna.field.RxPath == 2)
RFValue |= 0x40;
RT30xxWriteRFRegister(pAd, RF_R01, RFValue);
// Set RF offset
RT30xxReadRFRegister(pAd, RF_R23, &RFValue);
RFValue = (RFValue & 0x80) | pAd->RfFreqOffset;
RT30xxWriteRFRegister(pAd, RF_R23, RFValue);
// Set BW
if (!bScan && (pAd->CommonCfg.BBPCurrentBW == BW_40))
{
RFValue = pAd->Mlme.CaliBW40RfR24;
//DISABLE_11N_CHECK(pAd);
}
else
{
RFValue = pAd->Mlme.CaliBW20RfR24;
}
RT30xxWriteRFRegister(pAd, RF_R24, RFValue);
RT30xxWriteRFRegister(pAd, RF_R31, RFValue);
// Enable RF tuning
RT30xxReadRFRegister(pAd, RF_R07, &RFValue);
RFValue = RFValue | 0x1;
RT30xxWriteRFRegister(pAd, RF_R07, RFValue);
// latch channel for future usage.
pAd->LatchRfRegs.Channel = Channel;
DBGPRINT(RT_DEBUG_TRACE, ("SwitchChannel#%d(RF=%d, Pwr0=%d, Pwr1=%d, %dT), N=0x%02X, K=0x%02X, R=0x%02X\n",
Channel,
pAd->RfIcType,
TxPwer,
TxPwer2,
pAd->Antenna.field.TxPath,
FreqItems3020[index].N,
FreqItems3020[index].K,
FreqItems3020[index].R));
break;
}
}
DBGPRINT(RT_DEBUG_TRACE, ("SwitchChannel#%d(RF=%d, Pwr0=%d, Pwr1=%d, %dT), N=0x%02X, K=0x%02X, R=0x%02X\n",
Channel,
pAd->RfIcType,
TxPwer,
TxPwer2,
pAd->Antenna.field.TxPath,
FreqItems3020[index].N,
FreqItems3020[index].K,
FreqItems3020[index].R));
}
else
#endif // RT2870 //
{
RFRegTable = RF2850RegTable;
switch (pAd->RfIcType)
{
case RFIC_2820:
case RFIC_2850:
case RFIC_2720:
case RFIC_2750:
for (index = 0; index < NUM_OF_2850_CHNL; index++)
{
if (Channel == RFRegTable[index].Channel)
{
R2 = RFRegTable[index].R2;
if (pAd->Antenna.field.TxPath == 1)
{
R2 |= 0x4000; // If TXpath is 1, bit 14 = 1;
}
if (pAd->Antenna.field.RxPath == 2)
{
R2 |= 0x40; // write 1 to off Rxpath.
}
else if (pAd->Antenna.field.RxPath == 1)
{
R2 |= 0x20040; // write 1 to off RxPath
}
if (Channel > 14)
{
// initialize R3, R4
R3 = (RFRegTable[index].R3 & 0xffffc1ff);
R4 = (RFRegTable[index].R4 & (~0x001f87c0)) | (pAd->RfFreqOffset << 15);
// 5G band power range: 0xF9~0X0F, TX0 Reg3 bit9/TX1 Reg4 bit6="0" means the TX power reduce 7dB
// R3
if ((TxPwer >= -7) && (TxPwer < 0))
{
TxPwer = (7+TxPwer);
TxPwer = (TxPwer > 0xF) ? (0xF) : (TxPwer);
R3 |= (TxPwer << 10);
DBGPRINT(RT_DEBUG_ERROR, ("AsicSwitchChannel: TxPwer=%d \n", TxPwer));
}
else
{
TxPwer = (TxPwer > 0xF) ? (0xF) : (TxPwer);
R3 |= (TxPwer << 10) | (1 << 9);
}
// R4
if ((TxPwer2 >= -7) && (TxPwer2 < 0))
{
TxPwer2 = (7+TxPwer2);
TxPwer2 = (TxPwer2 > 0xF) ? (0xF) : (TxPwer2);
R4 |= (TxPwer2 << 7);
DBGPRINT(RT_DEBUG_ERROR, ("AsicSwitchChannel: TxPwer2=%d \n", TxPwer2));
}
else
{
TxPwer2 = (TxPwer2 > 0xF) ? (0xF) : (TxPwer2);
R4 |= (TxPwer2 << 7) | (1 << 6);
}
}
else
{
R3 = (RFRegTable[index].R3 & 0xffffc1ff) | (TxPwer << 9); // set TX power0
R4 = (RFRegTable[index].R4 & (~0x001f87c0)) | (pAd->RfFreqOffset << 15) | (TxPwer2 <<6);// Set freq Offset & TxPwr1
}
// Based on BBP current mode before changing RF channel.
if (!bScan && (pAd->CommonCfg.BBPCurrentBW == BW_40))
{
R4 |=0x200000;
}
// Update variables
pAd->LatchRfRegs.Channel = Channel;
pAd->LatchRfRegs.R1 = RFRegTable[index].R1;
pAd->LatchRfRegs.R2 = R2;
pAd->LatchRfRegs.R3 = R3;
pAd->LatchRfRegs.R4 = R4;
// Set RF value 1's set R3[bit2] = [0]
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R1);
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R2);
RTMP_RF_IO_WRITE32(pAd, (pAd->LatchRfRegs.R3 & (~0x04)));
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R4);
RTMPusecDelay(200);
// Set RF value 2's set R3[bit2] = [1]
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R1);
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R2);
RTMP_RF_IO_WRITE32(pAd, (pAd->LatchRfRegs.R3 | 0x04));
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R4);
RTMPusecDelay(200);
// Set RF value 3's set R3[bit2] = [0]
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R1);
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R2);
RTMP_RF_IO_WRITE32(pAd, (pAd->LatchRfRegs.R3 & (~0x04)));
RTMP_RF_IO_WRITE32(pAd, pAd->LatchRfRegs.R4);
break;
}
}
break;
default:
break;
}
}
// Change BBP setting during siwtch from a->g, g->a
if (Channel <= 14)
{
ULONG TxPinCfg = 0x00050F0A;//Gary 2007/08/09 0x050A0A
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R62, (0x37 - GET_LNA_GAIN(pAd)));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R63, (0x37 - GET_LNA_GAIN(pAd)));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R64, (0x37 - GET_LNA_GAIN(pAd)));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R86, 0);//(0x44 - GET_LNA_GAIN(pAd))); // According the Rory's suggestion to solve the middle range issue.
//RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R82, 0x62);
// Rx High power VGA offset for LNA select
if (pAd->NicConfig2.field.ExternalLNAForG)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R82, 0x62);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R75, 0x46);
}
else
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R82, 0x84);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R75, 0x50);
}
// 5G band selection PIN, bit1 and bit2 are complement
RTMP_IO_READ32(pAd, TX_BAND_CFG, &Value);
Value &= (~0x6);
Value |= (0x04);
RTMP_IO_WRITE32(pAd, TX_BAND_CFG, Value);
// Turn off unused PA or LNA when only 1T or 1R
if (pAd->Antenna.field.TxPath == 1)
{
TxPinCfg &= 0xFFFFFFF3;
}
if (pAd->Antenna.field.RxPath == 1)
{
TxPinCfg &= 0xFFFFF3FF;
}
RTMP_IO_WRITE32(pAd, TX_PIN_CFG, TxPinCfg);
}
else
{
ULONG TxPinCfg = 0x00050F05;//Gary 2007/8/9 0x050505
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R62, (0x37 - GET_LNA_GAIN(pAd)));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R63, (0x37 - GET_LNA_GAIN(pAd)));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R64, (0x37 - GET_LNA_GAIN(pAd)));
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R86, 0);//(0x44 - GET_LNA_GAIN(pAd))); // According the Rory's suggestion to solve the middle range issue.
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R82, 0xF2);
// Rx High power VGA offset for LNA select
if (pAd->NicConfig2.field.ExternalLNAForA)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R75, 0x46);
}
else
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R75, 0x50);
}
// 5G band selection PIN, bit1 and bit2 are complement
RTMP_IO_READ32(pAd, TX_BAND_CFG, &Value);
Value &= (~0x6);
Value |= (0x02);
RTMP_IO_WRITE32(pAd, TX_BAND_CFG, Value);
// Turn off unused PA or LNA when only 1T or 1R
if (pAd->Antenna.field.TxPath == 1)
{
TxPinCfg &= 0xFFFFFFF3;
}
if (pAd->Antenna.field.RxPath == 1)
{
TxPinCfg &= 0xFFFFF3FF;
}
RTMP_IO_WRITE32(pAd, TX_PIN_CFG, TxPinCfg);
}
// R66 should be set according to Channel and use 20MHz when scanning
//RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, (0x2E + GET_LNA_GAIN(pAd)));
if (bScan)
RTMPSetAGCInitValue(pAd, BW_20);
else
RTMPSetAGCInitValue(pAd, pAd->CommonCfg.BBPCurrentBW);
//
// On 11A, We should delay and wait RF/BBP to be stable
// and the appropriate time should be 1000 micro seconds
// 2005/06/05 - On 11G, We also need this delay time. Otherwise it's difficult to pass the WHQL.
//
RTMPusecDelay(1000);
DBGPRINT(RT_DEBUG_TRACE, ("SwitchChannel#%d(RF=%d, Pwr0=%lu, Pwr1=%lu, %dT) to , R1=0x%08lx, R2=0x%08lx, R3=0x%08lx, R4=0x%08lx\n",
Channel,
pAd->RfIcType,
(R3 & 0x00003e00) >> 9,
(R4 & 0x000007c0) >> 6,
pAd->Antenna.field.TxPath,
pAd->LatchRfRegs.R1,
pAd->LatchRfRegs.R2,
pAd->LatchRfRegs.R3,
pAd->LatchRfRegs.R4));
}
/*
==========================================================================
Description:
This function is required for 2421 only, and should not be used during
site survey. It's only required after NIC decided to stay at a channel
for a longer period.
When this function is called, it's always after AsicSwitchChannel().
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicLockChannel(
IN PRTMP_ADAPTER pAd,
IN UCHAR Channel)
{
}
VOID AsicRfTuningExec(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
}
/*
==========================================================================
Description:
Gives CCK TX rate 2 more dB TX power.
This routine works only in LINK UP in INFRASTRUCTURE mode.
calculate desired Tx power in RF R3.Tx0~5, should consider -
0. if current radio is a noisy environment (pAd->DrsCounters.fNoisyEnvironment)
1. TxPowerPercentage
2. auto calibration based on TSSI feedback
3. extra 2 db for CCK
4. -10 db upon very-short distance (AvgRSSI >= -40db) to AP
NOTE: Since this routine requires the value of (pAd->DrsCounters.fNoisyEnvironment),
it should be called AFTER MlmeDynamicTxRatSwitching()
==========================================================================
*/
VOID AsicAdjustTxPower(
IN PRTMP_ADAPTER pAd)
{
INT i, j;
CHAR DeltaPwr = 0;
BOOLEAN bAutoTxAgc = FALSE;
UCHAR TssiRef, *pTssiMinusBoundary, *pTssiPlusBoundary, TxAgcStep;
UCHAR BbpR1 = 0, BbpR49 = 0, idx;
PCHAR pTxAgcCompensate;
ULONG TxPwr[5];
CHAR Value;
#ifdef RT2860
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)
|| (pAd->bPCIclkOff == TRUE)
|| RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_IDLE_RADIO_OFF)
|| RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_BSS_SCAN_IN_PROGRESS))
return;
#endif
if (pAd->CommonCfg.BBPCurrentBW == BW_40)
{
if (pAd->CommonCfg.CentralChannel > 14)
{
TxPwr[0] = pAd->Tx40MPwrCfgABand[0];
TxPwr[1] = pAd->Tx40MPwrCfgABand[1];
TxPwr[2] = pAd->Tx40MPwrCfgABand[2];
TxPwr[3] = pAd->Tx40MPwrCfgABand[3];
TxPwr[4] = pAd->Tx40MPwrCfgABand[4];
}
else
{
TxPwr[0] = pAd->Tx40MPwrCfgGBand[0];
TxPwr[1] = pAd->Tx40MPwrCfgGBand[1];
TxPwr[2] = pAd->Tx40MPwrCfgGBand[2];
TxPwr[3] = pAd->Tx40MPwrCfgGBand[3];
TxPwr[4] = pAd->Tx40MPwrCfgGBand[4];
}
}
else
{
if (pAd->CommonCfg.Channel > 14)
{
TxPwr[0] = pAd->Tx20MPwrCfgABand[0];
TxPwr[1] = pAd->Tx20MPwrCfgABand[1];
TxPwr[2] = pAd->Tx20MPwrCfgABand[2];
TxPwr[3] = pAd->Tx20MPwrCfgABand[3];
TxPwr[4] = pAd->Tx20MPwrCfgABand[4];
}
else
{
TxPwr[0] = pAd->Tx20MPwrCfgGBand[0];
TxPwr[1] = pAd->Tx20MPwrCfgGBand[1];
TxPwr[2] = pAd->Tx20MPwrCfgGBand[2];
TxPwr[3] = pAd->Tx20MPwrCfgGBand[3];
TxPwr[4] = pAd->Tx20MPwrCfgGBand[4];
}
}
// TX power compensation for temperature variation based on TSSI. try every 4 second
if (pAd->Mlme.OneSecPeriodicRound % 4 == 0)
{
if (pAd->CommonCfg.Channel <= 14)
{
/* bg channel */
bAutoTxAgc = pAd->bAutoTxAgcG;
TssiRef = pAd->TssiRefG;
pTssiMinusBoundary = &pAd->TssiMinusBoundaryG[0];
pTssiPlusBoundary = &pAd->TssiPlusBoundaryG[0];
TxAgcStep = pAd->TxAgcStepG;
pTxAgcCompensate = &pAd->TxAgcCompensateG;
}
else
{
/* a channel */
bAutoTxAgc = pAd->bAutoTxAgcA;
TssiRef = pAd->TssiRefA;
pTssiMinusBoundary = &pAd->TssiMinusBoundaryA[0];
pTssiPlusBoundary = &pAd->TssiPlusBoundaryA[0];
TxAgcStep = pAd->TxAgcStepA;
pTxAgcCompensate = &pAd->TxAgcCompensateA;
}
if (bAutoTxAgc)
{
/* BbpR1 is unsigned char */
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R49, &BbpR49);
/* (p) TssiPlusBoundaryG[0] = 0 = (m) TssiMinusBoundaryG[0] */
/* compensate: +4 +3 +2 +1 0 -1 -2 -3 -4 * steps */
/* step value is defined in pAd->TxAgcStepG for tx power value */
/* [4]+1+[4] p4 p3 p2 p1 o1 m1 m2 m3 m4 */
/* ex: 0x00 0x15 0x25 0x45 0x88 0xA0 0xB5 0xD0 0xF0
above value are examined in mass factory production */
/* [4] [3] [2] [1] [0] [1] [2] [3] [4] */
/* plus (+) is 0x00 ~ 0x45, minus (-) is 0xa0 ~ 0xf0 */
/* if value is between p1 ~ o1 or o1 ~ s1, no need to adjust tx power */
/* if value is 0xa5, tx power will be -= TxAgcStep*(2-1) */
if (BbpR49 > pTssiMinusBoundary[1])
{
// Reading is larger than the reference value
// check for how large we need to decrease the Tx power
for (idx = 1; idx < 5; idx++)
{
if (BbpR49 <= pTssiMinusBoundary[idx]) // Found the range
break;
}
// The index is the step we should decrease, idx = 0 means there is nothing to compensate
*pTxAgcCompensate = -(TxAgcStep * (idx-1));
DeltaPwr += (*pTxAgcCompensate);
DBGPRINT(RT_DEBUG_TRACE, ("-- Tx Power, BBP R1=%x, TssiRef=%x, TxAgcStep=%x, step = -%d\n",
BbpR49, TssiRef, TxAgcStep, idx-1));
}
else if (BbpR49 < pTssiPlusBoundary[1])
{
// Reading is smaller than the reference value
// check for how large we need to increase the Tx power
for (idx = 1; idx < 5; idx++)
{
if (BbpR49 >= pTssiPlusBoundary[idx]) // Found the range
break;
}
// The index is the step we should increase, idx = 0 means there is nothing to compensate
*pTxAgcCompensate = TxAgcStep * (idx-1);
DeltaPwr += (*pTxAgcCompensate);
DBGPRINT(RT_DEBUG_TRACE, ("++ Tx Power, BBP R1=%x, TssiRef=%x, TxAgcStep=%x, step = +%d\n",
BbpR49, TssiRef, TxAgcStep, idx-1));
}
else
{
*pTxAgcCompensate = 0;
DBGPRINT(RT_DEBUG_TRACE, (" Tx Power, BBP R49=%x, TssiRef=%x, TxAgcStep=%x, step = +%d\n",
BbpR49, TssiRef, TxAgcStep, 0));
}
}
}
else
{
if (pAd->CommonCfg.Channel <= 14)
{
bAutoTxAgc = pAd->bAutoTxAgcG;
pTxAgcCompensate = &pAd->TxAgcCompensateG;
}
else
{
bAutoTxAgc = pAd->bAutoTxAgcA;
pTxAgcCompensate = &pAd->TxAgcCompensateA;
}
if (bAutoTxAgc)
DeltaPwr += (*pTxAgcCompensate);
}
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R1, &BbpR1);
BbpR1 &= 0xFC;
/* calculate delta power based on the percentage specified from UI */
// E2PROM setting is calibrated for maximum TX power (i.e. 100%)
// We lower TX power here according to the percentage specified from UI
if (pAd->CommonCfg.TxPowerPercentage == 0xffffffff) // AUTO TX POWER control
;
else if (pAd->CommonCfg.TxPowerPercentage > 90) // 91 ~ 100% & AUTO, treat as 100% in terms of mW
;
else if (pAd->CommonCfg.TxPowerPercentage > 60) // 61 ~ 90%, treat as 75% in terms of mW // DeltaPwr -= 1;
{
DeltaPwr -= 1;
}
else if (pAd->CommonCfg.TxPowerPercentage > 30) // 31 ~ 60%, treat as 50% in terms of mW // DeltaPwr -= 3;
{
DeltaPwr -= 3;
}
else if (pAd->CommonCfg.TxPowerPercentage > 15) // 16 ~ 30%, treat as 25% in terms of mW // DeltaPwr -= 6;
{
BbpR1 |= 0x01;
}
else if (pAd->CommonCfg.TxPowerPercentage > 9) // 10 ~ 15%, treat as 12.5% in terms of mW // DeltaPwr -= 9;
{
BbpR1 |= 0x01;
DeltaPwr -= 3;
}
else // 0 ~ 9 %, treat as MIN(~3%) in terms of mW // DeltaPwr -= 12;
{
BbpR1 |= 0x02;
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R1, BbpR1);
/* reset different new tx power for different TX rate */
for(i=0; i<5; i++)
{
if (TxPwr[i] != 0xffffffff)
{
for (j=0; j<8; j++)
{
Value = (CHAR)((TxPwr[i] >> j*4) & 0x0F); /* 0 ~ 15 */
if ((Value + DeltaPwr) < 0)
{
Value = 0; /* min */
}
else if ((Value + DeltaPwr) > 0xF)
{
Value = 0xF; /* max */
}
else
{
Value += DeltaPwr; /* temperature compensation */
}
/* fill new value to CSR offset */
TxPwr[i] = (TxPwr[i] & ~(0x0000000F << j*4)) | (Value << j*4);
}
/* write tx power value to CSR */
/* TX_PWR_CFG_0 (8 tx rate) for TX power for OFDM 12M/18M
TX power for OFDM 6M/9M
TX power for CCK5.5M/11M
TX power for CCK1M/2M */
/* TX_PWR_CFG_1 ~ TX_PWR_CFG_4 */
RTMP_IO_WRITE32(pAd, TX_PWR_CFG_0 + i*4, TxPwr[i]);
}
}
}
/*
==========================================================================
Description:
put PHY to sleep here, and set next wakeup timer. PHY doesn't not wakeup
automatically. Instead, MCU will issue a TwakeUpInterrupt to host after
the wakeup timer timeout. Driver has to issue a separate command to wake
PHY up.
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicSleepThenAutoWakeup(
IN PRTMP_ADAPTER pAd,
IN USHORT TbttNumToNextWakeUp)
{
RT28XX_STA_SLEEP_THEN_AUTO_WAKEUP(pAd, TbttNumToNextWakeUp);
}
/*
==========================================================================
Description:
AsicForceWakeup() is used whenever manual wakeup is required
AsicForceSleep() should only be used when not in INFRA BSS. When
in INFRA BSS, we should use AsicSleepThenAutoWakeup() instead.
==========================================================================
*/
VOID AsicForceSleep(
IN PRTMP_ADAPTER pAd)
{
}
/*
==========================================================================
Description:
AsicForceWakeup() is used whenever Twakeup timer (set via AsicSleepThenAutoWakeup)
expired.
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicForceWakeup(
IN PRTMP_ADAPTER pAd,
#ifdef RT2860
IN UCHAR Level)
#endif
#ifdef RT2870
IN BOOLEAN bFromTx)
#endif
{
DBGPRINT(RT_DEBUG_TRACE, ("--> AsicForceWakeup \n"));
#ifdef RT2860
RT28XX_STA_FORCE_WAKEUP(pAd, Level);
#endif
#ifdef RT2870
RT28XX_STA_FORCE_WAKEUP(pAd, bFromTx);
#endif
}
/*
==========================================================================
Description:
Set My BSSID
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicSetBssid(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pBssid)
{
ULONG Addr4;
DBGPRINT(RT_DEBUG_TRACE, ("==============> AsicSetBssid %x:%x:%x:%x:%x:%x\n",
pBssid[0],pBssid[1],pBssid[2],pBssid[3], pBssid[4],pBssid[5]));
Addr4 = (ULONG)(pBssid[0]) |
(ULONG)(pBssid[1] << 8) |
(ULONG)(pBssid[2] << 16) |
(ULONG)(pBssid[3] << 24);
RTMP_IO_WRITE32(pAd, MAC_BSSID_DW0, Addr4);
Addr4 = 0;
// always one BSSID in STA mode
Addr4 = (ULONG)(pBssid[4]) | (ULONG)(pBssid[5] << 8);
RTMP_IO_WRITE32(pAd, MAC_BSSID_DW1, Addr4);
}
VOID AsicSetMcastWC(
IN PRTMP_ADAPTER pAd)
{
MAC_TABLE_ENTRY *pEntry = &pAd->MacTab.Content[MCAST_WCID];
USHORT offset;
pEntry->Sst = SST_ASSOC;
pEntry->Aid = MCAST_WCID; // Softap supports 1 BSSID and use WCID=0 as multicast Wcid index
pEntry->PsMode = PWR_ACTIVE;
pEntry->CurrTxRate = pAd->CommonCfg.MlmeRate;
offset = MAC_WCID_BASE + BSS0Mcast_WCID * HW_WCID_ENTRY_SIZE;
}
/*
==========================================================================
Description:
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicDelWcidTab(
IN PRTMP_ADAPTER pAd,
IN UCHAR Wcid)
{
ULONG Addr0 = 0x0, Addr1 = 0x0;
ULONG offset;
DBGPRINT(RT_DEBUG_TRACE, ("AsicDelWcidTab==>Wcid = 0x%x\n",Wcid));
offset = MAC_WCID_BASE + Wcid * HW_WCID_ENTRY_SIZE;
RTMP_IO_WRITE32(pAd, offset, Addr0);
offset += 4;
RTMP_IO_WRITE32(pAd, offset, Addr1);
}
/*
==========================================================================
Description:
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicEnableRDG(
IN PRTMP_ADAPTER pAd)
{
TX_LINK_CFG_STRUC TxLinkCfg;
UINT32 Data = 0;
RTMP_IO_READ32(pAd, TX_LINK_CFG, &TxLinkCfg.word);
TxLinkCfg.field.TxRDGEn = 1;
RTMP_IO_WRITE32(pAd, TX_LINK_CFG, TxLinkCfg.word);
RTMP_IO_READ32(pAd, EDCA_AC0_CFG, &Data);
Data &= 0xFFFFFF00;
Data |= 0x80;
RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Data);
//OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_AGGREGATION_INUSED);
}
/*
==========================================================================
Description:
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicDisableRDG(
IN PRTMP_ADAPTER pAd)
{
TX_LINK_CFG_STRUC TxLinkCfg;
UINT32 Data = 0;
RTMP_IO_READ32(pAd, TX_LINK_CFG, &TxLinkCfg.word);
TxLinkCfg.field.TxRDGEn = 0;
RTMP_IO_WRITE32(pAd, TX_LINK_CFG, TxLinkCfg.word);
RTMP_IO_READ32(pAd, EDCA_AC0_CFG, &Data);
Data &= 0xFFFFFF00;
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_DYNAMIC_BE_TXOP_ACTIVE)
&& (pAd->MacTab.fAnyStationMIMOPSDynamic == FALSE)
)
{
// For CWC test, change txop from 0x30 to 0x20 in TxBurst mode
if (pAd->CommonCfg.bEnableTxBurst)
Data |= 0x20;
}
RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Data);
}
/*
==========================================================================
Description:
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicDisableSync(
IN PRTMP_ADAPTER pAd)
{
BCN_TIME_CFG_STRUC csr;
DBGPRINT(RT_DEBUG_TRACE, ("--->Disable TSF synchronization\n"));
// 2003-12-20 disable TSF and TBTT while NIC in power-saving have side effect
// that NIC will never wakes up because TSF stops and no more
// TBTT interrupts
pAd->TbttTickCount = 0;
RTMP_IO_READ32(pAd, BCN_TIME_CFG, &csr.word);
csr.field.bBeaconGen = 0;
csr.field.bTBTTEnable = 0;
csr.field.TsfSyncMode = 0;
csr.field.bTsfTicking = 0;
RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr.word);
}
/*
==========================================================================
Description:
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicEnableBssSync(
IN PRTMP_ADAPTER pAd)
{
BCN_TIME_CFG_STRUC csr;
DBGPRINT(RT_DEBUG_TRACE, ("--->AsicEnableBssSync(INFRA mode)\n"));
RTMP_IO_READ32(pAd, BCN_TIME_CFG, &csr.word);
{
csr.field.BeaconInterval = pAd->CommonCfg.BeaconPeriod << 4; // ASIC register in units of 1/16 TU
csr.field.bTsfTicking = 1;
csr.field.TsfSyncMode = 1; // sync TSF in INFRASTRUCTURE mode
csr.field.bBeaconGen = 0; // do NOT generate BEACON
csr.field.bTBTTEnable = 1;
}
RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr.word);
}
/*
==========================================================================
Description:
Note:
BEACON frame in shared memory should be built ok before this routine
can be called. Otherwise, a garbage frame maybe transmitted out every
Beacon period.
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicEnableIbssSync(
IN PRTMP_ADAPTER pAd)
{
BCN_TIME_CFG_STRUC csr9;
PUCHAR ptr;
UINT i;
DBGPRINT(RT_DEBUG_TRACE, ("--->AsicEnableIbssSync(ADHOC mode. MPDUtotalByteCount = %d)\n", pAd->BeaconTxWI.MPDUtotalByteCount));
RTMP_IO_READ32(pAd, BCN_TIME_CFG, &csr9.word);
csr9.field.bBeaconGen = 0;
csr9.field.bTBTTEnable = 0;
csr9.field.bTsfTicking = 0;
RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr9.word);
#ifdef RT2860
// move BEACON TXD and frame content to on-chip memory
ptr = (PUCHAR)&pAd->BeaconTxWI;
for (i=0; i<TXWI_SIZE; i+=4) // 16-byte TXWI field
{
UINT32 longptr = *ptr + (*(ptr+1)<<8) + (*(ptr+2)<<16) + (*(ptr+3)<<24);
RTMP_IO_WRITE32(pAd, HW_BEACON_BASE0 + i, longptr);
ptr += 4;
}
// start right after the 16-byte TXWI field
ptr = pAd->BeaconBuf;
for (i=0; i< pAd->BeaconTxWI.MPDUtotalByteCount; i+=4)
{
UINT32 longptr = *ptr + (*(ptr+1)<<8) + (*(ptr+2)<<16) + (*(ptr+3)<<24);
RTMP_IO_WRITE32(pAd, HW_BEACON_BASE0 + TXWI_SIZE + i, longptr);
ptr +=4;
}
#endif
#ifdef RT2870
// move BEACON TXD and frame content to on-chip memory
ptr = (PUCHAR)&pAd->BeaconTxWI;
for (i=0; i<TXWI_SIZE; i+=2) // 16-byte TXWI field
{
RTUSBMultiWrite(pAd, HW_BEACON_BASE0 + i, ptr, 2);
ptr += 2;
}
// start right after the 16-byte TXWI field
ptr = pAd->BeaconBuf;
for (i=0; i< pAd->BeaconTxWI.MPDUtotalByteCount; i+=2)
{
RTUSBMultiWrite(pAd, HW_BEACON_BASE0 + TXWI_SIZE + i, ptr, 2);
ptr +=2;
}
#endif // RT2870 //
// start sending BEACON
csr9.field.BeaconInterval = pAd->CommonCfg.BeaconPeriod << 4; // ASIC register in units of 1/16 TU
csr9.field.bTsfTicking = 1;
csr9.field.TsfSyncMode = 2; // sync TSF in IBSS mode
csr9.field.bTBTTEnable = 1;
csr9.field.bBeaconGen = 1;
RTMP_IO_WRITE32(pAd, BCN_TIME_CFG, csr9.word);
}
/*
==========================================================================
Description:
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicSetEdcaParm(
IN PRTMP_ADAPTER pAd,
IN PEDCA_PARM pEdcaParm)
{
EDCA_AC_CFG_STRUC Ac0Cfg, Ac1Cfg, Ac2Cfg, Ac3Cfg;
AC_TXOP_CSR0_STRUC csr0;
AC_TXOP_CSR1_STRUC csr1;
AIFSN_CSR_STRUC AifsnCsr;
CWMIN_CSR_STRUC CwminCsr;
CWMAX_CSR_STRUC CwmaxCsr;
int i;
Ac0Cfg.word = 0;
Ac1Cfg.word = 0;
Ac2Cfg.word = 0;
Ac3Cfg.word = 0;
if ((pEdcaParm == NULL) || (pEdcaParm->bValid == FALSE))
{
DBGPRINT(RT_DEBUG_TRACE,("AsicSetEdcaParm\n"));
OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_WMM_INUSED);
for (i=0; i<MAX_LEN_OF_MAC_TABLE; i++)
{
if (pAd->MacTab.Content[i].ValidAsCLI || pAd->MacTab.Content[i].ValidAsApCli)
CLIENT_STATUS_CLEAR_FLAG(&pAd->MacTab.Content[i], fCLIENT_STATUS_WMM_CAPABLE);
}
//========================================================
// MAC Register has a copy .
//========================================================
if( pAd->CommonCfg.bEnableTxBurst )
{
// For CWC test, change txop from 0x30 to 0x20 in TxBurst mode
Ac0Cfg.field.AcTxop = 0x20; // Suggest by John for TxBurst in HT Mode
}
else
Ac0Cfg.field.AcTxop = 0; // QID_AC_BE
Ac0Cfg.field.Cwmin = CW_MIN_IN_BITS;
Ac0Cfg.field.Cwmax = CW_MAX_IN_BITS;
Ac0Cfg.field.Aifsn = 2;
RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Ac0Cfg.word);
Ac1Cfg.field.AcTxop = 0; // QID_AC_BK
Ac1Cfg.field.Cwmin = CW_MIN_IN_BITS;
Ac1Cfg.field.Cwmax = CW_MAX_IN_BITS;
Ac1Cfg.field.Aifsn = 2;
RTMP_IO_WRITE32(pAd, EDCA_AC1_CFG, Ac1Cfg.word);
if (pAd->CommonCfg.PhyMode == PHY_11B)
{
Ac2Cfg.field.AcTxop = 192; // AC_VI: 192*32us ~= 6ms
Ac3Cfg.field.AcTxop = 96; // AC_VO: 96*32us ~= 3ms
}
else
{
Ac2Cfg.field.AcTxop = 96; // AC_VI: 96*32us ~= 3ms
Ac3Cfg.field.AcTxop = 48; // AC_VO: 48*32us ~= 1.5ms
}
Ac2Cfg.field.Cwmin = CW_MIN_IN_BITS;
Ac2Cfg.field.Cwmax = CW_MAX_IN_BITS;
Ac2Cfg.field.Aifsn = 2;
RTMP_IO_WRITE32(pAd, EDCA_AC2_CFG, Ac2Cfg.word);
Ac3Cfg.field.Cwmin = CW_MIN_IN_BITS;
Ac3Cfg.field.Cwmax = CW_MAX_IN_BITS;
Ac3Cfg.field.Aifsn = 2;
RTMP_IO_WRITE32(pAd, EDCA_AC3_CFG, Ac3Cfg.word);
//========================================================
// DMA Register has a copy too.
//========================================================
csr0.field.Ac0Txop = 0; // QID_AC_BE
csr0.field.Ac1Txop = 0; // QID_AC_BK
RTMP_IO_WRITE32(pAd, WMM_TXOP0_CFG, csr0.word);
if (pAd->CommonCfg.PhyMode == PHY_11B)
{
csr1.field.Ac2Txop = 192; // AC_VI: 192*32us ~= 6ms
csr1.field.Ac3Txop = 96; // AC_VO: 96*32us ~= 3ms
}
else
{
csr1.field.Ac2Txop = 96; // AC_VI: 96*32us ~= 3ms
csr1.field.Ac3Txop = 48; // AC_VO: 48*32us ~= 1.5ms
}
RTMP_IO_WRITE32(pAd, WMM_TXOP1_CFG, csr1.word);
CwminCsr.word = 0;
CwminCsr.field.Cwmin0 = CW_MIN_IN_BITS;
CwminCsr.field.Cwmin1 = CW_MIN_IN_BITS;
CwminCsr.field.Cwmin2 = CW_MIN_IN_BITS;
CwminCsr.field.Cwmin3 = CW_MIN_IN_BITS;
RTMP_IO_WRITE32(pAd, WMM_CWMIN_CFG, CwminCsr.word);
CwmaxCsr.word = 0;
CwmaxCsr.field.Cwmax0 = CW_MAX_IN_BITS;
CwmaxCsr.field.Cwmax1 = CW_MAX_IN_BITS;
CwmaxCsr.field.Cwmax2 = CW_MAX_IN_BITS;
CwmaxCsr.field.Cwmax3 = CW_MAX_IN_BITS;
RTMP_IO_WRITE32(pAd, WMM_CWMAX_CFG, CwmaxCsr.word);
RTMP_IO_WRITE32(pAd, WMM_AIFSN_CFG, 0x00002222);
NdisZeroMemory(&pAd->CommonCfg.APEdcaParm, sizeof(EDCA_PARM));
}
else
{
OPSTATUS_SET_FLAG(pAd, fOP_STATUS_WMM_INUSED);
//========================================================
// MAC Register has a copy.
//========================================================
//
// Modify Cwmin/Cwmax/Txop on queue[QID_AC_VI], Recommend by Jerry 2005/07/27
// To degrade our VIDO Queue's throughput for WiFi WMM S3T07 Issue.
//
//pEdcaParm->Txop[QID_AC_VI] = pEdcaParm->Txop[QID_AC_VI] * 7 / 10; // rt2860c need this
Ac0Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BE];
Ac0Cfg.field.Cwmin= pEdcaParm->Cwmin[QID_AC_BE];
Ac0Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_BE];
Ac0Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BE]; //+1;
Ac1Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BK];
Ac1Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_BK]; //+2;
Ac1Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_BK];
Ac1Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BK]; //+1;
Ac2Cfg.field.AcTxop = (pEdcaParm->Txop[QID_AC_VI] * 6) / 10;
Ac2Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_VI];
Ac2Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_VI];
Ac2Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_VI];
{
// Tuning for Wi-Fi WMM S06
if (pAd->CommonCfg.bWiFiTest &&
pEdcaParm->Aifsn[QID_AC_VI] == 10)
Ac2Cfg.field.Aifsn -= 1;
// Tuning for TGn Wi-Fi 5.2.32
// STA TestBed changes in this item: connexant legacy sta ==> broadcom 11n sta
if (STA_TGN_WIFI_ON(pAd) &&
pEdcaParm->Aifsn[QID_AC_VI] == 10)
{
Ac0Cfg.field.Aifsn = 3;
Ac2Cfg.field.AcTxop = 5;
}
#ifdef RT2870
if (pAd->RfIcType == RFIC_3020 || pAd->RfIcType == RFIC_2020)
{
// Tuning for WiFi WMM S3-T07: connexant legacy sta ==> broadcom 11n sta.
Ac2Cfg.field.Aifsn = 5;
}
#endif
}
Ac3Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_VO];
Ac3Cfg.field.Cwmin = pEdcaParm->Cwmin[QID_AC_VO];
Ac3Cfg.field.Cwmax = pEdcaParm->Cwmax[QID_AC_VO];
Ac3Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_VO];
//#ifdef WIFI_TEST
if (pAd->CommonCfg.bWiFiTest)
{
if (Ac3Cfg.field.AcTxop == 102)
{
Ac0Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BE] ? pEdcaParm->Txop[QID_AC_BE] : 10;
Ac0Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BE]-1; /* AIFSN must >= 1 */
Ac1Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_BK];
Ac1Cfg.field.Aifsn = pEdcaParm->Aifsn[QID_AC_BK];
Ac2Cfg.field.AcTxop = pEdcaParm->Txop[QID_AC_VI];
} /* End of if */
}
//#endif // WIFI_TEST //
RTMP_IO_WRITE32(pAd, EDCA_AC0_CFG, Ac0Cfg.word);
RTMP_IO_WRITE32(pAd, EDCA_AC1_CFG, Ac1Cfg.word);
RTMP_IO_WRITE32(pAd, EDCA_AC2_CFG, Ac2Cfg.word);
RTMP_IO_WRITE32(pAd, EDCA_AC3_CFG, Ac3Cfg.word);
//========================================================
// DMA Register has a copy too.
//========================================================
csr0.field.Ac0Txop = Ac0Cfg.field.AcTxop;
csr0.field.Ac1Txop = Ac1Cfg.field.AcTxop;
RTMP_IO_WRITE32(pAd, WMM_TXOP0_CFG, csr0.word);
csr1.field.Ac2Txop = Ac2Cfg.field.AcTxop;
csr1.field.Ac3Txop = Ac3Cfg.field.AcTxop;
RTMP_IO_WRITE32(pAd, WMM_TXOP1_CFG, csr1.word);
CwminCsr.word = 0;
CwminCsr.field.Cwmin0 = pEdcaParm->Cwmin[QID_AC_BE];
CwminCsr.field.Cwmin1 = pEdcaParm->Cwmin[QID_AC_BK];
CwminCsr.field.Cwmin2 = pEdcaParm->Cwmin[QID_AC_VI];
CwminCsr.field.Cwmin3 = pEdcaParm->Cwmin[QID_AC_VO] - 1; //for TGn wifi test
RTMP_IO_WRITE32(pAd, WMM_CWMIN_CFG, CwminCsr.word);
CwmaxCsr.word = 0;
CwmaxCsr.field.Cwmax0 = pEdcaParm->Cwmax[QID_AC_BE];
CwmaxCsr.field.Cwmax1 = pEdcaParm->Cwmax[QID_AC_BK];
CwmaxCsr.field.Cwmax2 = pEdcaParm->Cwmax[QID_AC_VI];
CwmaxCsr.field.Cwmax3 = pEdcaParm->Cwmax[QID_AC_VO];
RTMP_IO_WRITE32(pAd, WMM_CWMAX_CFG, CwmaxCsr.word);
AifsnCsr.word = 0;
AifsnCsr.field.Aifsn0 = Ac0Cfg.field.Aifsn; //pEdcaParm->Aifsn[QID_AC_BE];
AifsnCsr.field.Aifsn1 = Ac1Cfg.field.Aifsn; //pEdcaParm->Aifsn[QID_AC_BK];
AifsnCsr.field.Aifsn2 = Ac2Cfg.field.Aifsn; //pEdcaParm->Aifsn[QID_AC_VI];
{
// Tuning for Wi-Fi WMM S06
if (pAd->CommonCfg.bWiFiTest &&
pEdcaParm->Aifsn[QID_AC_VI] == 10)
AifsnCsr.field.Aifsn2 = Ac2Cfg.field.Aifsn - 4;
// Tuning for TGn Wi-Fi 5.2.32
// STA TestBed changes in this item: connexant legacy sta ==> broadcom 11n sta
if (STA_TGN_WIFI_ON(pAd) &&
pEdcaParm->Aifsn[QID_AC_VI] == 10)
{
AifsnCsr.field.Aifsn0 = 3;
AifsnCsr.field.Aifsn2 = 7;
}
#ifdef RT2870
if (INFRA_ON(pAd))
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[BSSID_WCID], fCLIENT_STATUS_WMM_CAPABLE);
#endif
}
AifsnCsr.field.Aifsn3 = Ac3Cfg.field.Aifsn - 1; //pEdcaParm->Aifsn[QID_AC_VO]; //for TGn wifi test
#ifdef RT2870
if (pAd->RfIcType == RFIC_3020 || pAd->RfIcType == RFIC_2020)
AifsnCsr.field.Aifsn2 = 0x2; //pEdcaParm->Aifsn[QID_AC_VI]; //for WiFi WMM S4-T04.
#endif
RTMP_IO_WRITE32(pAd, WMM_AIFSN_CFG, AifsnCsr.word);
NdisMoveMemory(&pAd->CommonCfg.APEdcaParm, pEdcaParm, sizeof(EDCA_PARM));
if (!ADHOC_ON(pAd))
{
DBGPRINT(RT_DEBUG_TRACE,("EDCA [#%d]: AIFSN CWmin CWmax TXOP(us) ACM\n", pEdcaParm->EdcaUpdateCount));
DBGPRINT(RT_DEBUG_TRACE,(" AC_BE %2d %2d %2d %4d %d\n",
pEdcaParm->Aifsn[0],
pEdcaParm->Cwmin[0],
pEdcaParm->Cwmax[0],
pEdcaParm->Txop[0]<<5,
pEdcaParm->bACM[0]));
DBGPRINT(RT_DEBUG_TRACE,(" AC_BK %2d %2d %2d %4d %d\n",
pEdcaParm->Aifsn[1],
pEdcaParm->Cwmin[1],
pEdcaParm->Cwmax[1],
pEdcaParm->Txop[1]<<5,
pEdcaParm->bACM[1]));
DBGPRINT(RT_DEBUG_TRACE,(" AC_VI %2d %2d %2d %4d %d\n",
pEdcaParm->Aifsn[2],
pEdcaParm->Cwmin[2],
pEdcaParm->Cwmax[2],
pEdcaParm->Txop[2]<<5,
pEdcaParm->bACM[2]));
DBGPRINT(RT_DEBUG_TRACE,(" AC_VO %2d %2d %2d %4d %d\n",
pEdcaParm->Aifsn[3],
pEdcaParm->Cwmin[3],
pEdcaParm->Cwmax[3],
pEdcaParm->Txop[3]<<5,
pEdcaParm->bACM[3]));
}
}
}
/*
==========================================================================
Description:
IRQL = PASSIVE_LEVEL
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicSetSlotTime(
IN PRTMP_ADAPTER pAd,
IN BOOLEAN bUseShortSlotTime)
{
ULONG SlotTime;
UINT32 RegValue = 0;
if (pAd->CommonCfg.Channel > 14)
bUseShortSlotTime = TRUE;
if (bUseShortSlotTime)
OPSTATUS_SET_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED);
else
OPSTATUS_CLEAR_FLAG(pAd, fOP_STATUS_SHORT_SLOT_INUSED);
SlotTime = (bUseShortSlotTime)? 9 : 20;
{
// force using short SLOT time for FAE to demo performance when TxBurst is ON
if (((pAd->StaActive.SupportedPhyInfo.bHtEnable == FALSE) && (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_WMM_INUSED)))
|| ((pAd->StaActive.SupportedPhyInfo.bHtEnable == TRUE) && (pAd->CommonCfg.BACapability.field.Policy == BA_NOTUSE))
)
{
// In this case, we will think it is doing Wi-Fi test
// And we will not set to short slot when bEnableTxBurst is TRUE.
}
else if (pAd->CommonCfg.bEnableTxBurst)
SlotTime = 9;
}
//
// For some reasons, always set it to short slot time.
//
// ToDo: Should consider capability with 11B
//
if (pAd->StaCfg.BssType == BSS_ADHOC)
SlotTime = 20;
RTMP_IO_READ32(pAd, BKOFF_SLOT_CFG, &RegValue);
RegValue = RegValue & 0xFFFFFF00;
RegValue |= SlotTime;
RTMP_IO_WRITE32(pAd, BKOFF_SLOT_CFG, RegValue);
}
/*
========================================================================
Description:
Add Shared key information into ASIC.
Update shared key, TxMic and RxMic to Asic Shared key table
Update its cipherAlg to Asic Shared key Mode.
Return:
========================================================================
*/
VOID AsicAddSharedKeyEntry(
IN PRTMP_ADAPTER pAd,
IN UCHAR BssIndex,
IN UCHAR KeyIdx,
IN UCHAR CipherAlg,
IN PUCHAR pKey,
IN PUCHAR pTxMic,
IN PUCHAR pRxMic)
{
ULONG offset; //, csr0;
SHAREDKEY_MODE_STRUC csr1;
#ifdef RT2860
INT i;
#endif
DBGPRINT(RT_DEBUG_TRACE, ("AsicAddSharedKeyEntry BssIndex=%d, KeyIdx=%d\n", BssIndex,KeyIdx));
//============================================================================================
DBGPRINT(RT_DEBUG_TRACE,("AsicAddSharedKeyEntry: %s key #%d\n", CipherName[CipherAlg], BssIndex*4 + KeyIdx));
DBGPRINT_RAW(RT_DEBUG_TRACE, (" Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
pKey[0],pKey[1],pKey[2],pKey[3],pKey[4],pKey[5],pKey[6],pKey[7],pKey[8],pKey[9],pKey[10],pKey[11],pKey[12],pKey[13],pKey[14],pKey[15]));
if (pRxMic)
{
DBGPRINT_RAW(RT_DEBUG_TRACE, (" Rx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
pRxMic[0],pRxMic[1],pRxMic[2],pRxMic[3],pRxMic[4],pRxMic[5],pRxMic[6],pRxMic[7]));
}
if (pTxMic)
{
DBGPRINT_RAW(RT_DEBUG_TRACE, (" Tx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
pTxMic[0],pTxMic[1],pTxMic[2],pTxMic[3],pTxMic[4],pTxMic[5],pTxMic[6],pTxMic[7]));
}
//============================================================================================
//
// fill key material - key + TX MIC + RX MIC
//
offset = SHARED_KEY_TABLE_BASE + (4*BssIndex + KeyIdx)*HW_KEY_ENTRY_SIZE;
#ifdef RT2860
for (i=0; i<MAX_LEN_OF_SHARE_KEY; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pKey[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, pKey, MAX_LEN_OF_SHARE_KEY);
#endif
offset += MAX_LEN_OF_SHARE_KEY;
if (pTxMic)
{
#ifdef RT2860
for (i=0; i<8; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pTxMic[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, pTxMic, 8);
#endif
}
offset += 8;
if (pRxMic)
{
#ifdef RT2860
for (i=0; i<8; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pRxMic[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, pRxMic, 8);
#endif
}
//
// Update cipher algorithm. WSTA always use BSS0
//
RTMP_IO_READ32(pAd, SHARED_KEY_MODE_BASE+4*(BssIndex/2), &csr1.word);
DBGPRINT(RT_DEBUG_TRACE,("Read: SHARED_KEY_MODE_BASE at this Bss[%d] KeyIdx[%d]= 0x%x \n", BssIndex,KeyIdx, csr1.word));
if ((BssIndex%2) == 0)
{
if (KeyIdx == 0)
csr1.field.Bss0Key0CipherAlg = CipherAlg;
else if (KeyIdx == 1)
csr1.field.Bss0Key1CipherAlg = CipherAlg;
else if (KeyIdx == 2)
csr1.field.Bss0Key2CipherAlg = CipherAlg;
else
csr1.field.Bss0Key3CipherAlg = CipherAlg;
}
else
{
if (KeyIdx == 0)
csr1.field.Bss1Key0CipherAlg = CipherAlg;
else if (KeyIdx == 1)
csr1.field.Bss1Key1CipherAlg = CipherAlg;
else if (KeyIdx == 2)
csr1.field.Bss1Key2CipherAlg = CipherAlg;
else
csr1.field.Bss1Key3CipherAlg = CipherAlg;
}
DBGPRINT(RT_DEBUG_TRACE,("Write: SHARED_KEY_MODE_BASE at this Bss[%d] = 0x%x \n", BssIndex, csr1.word));
RTMP_IO_WRITE32(pAd, SHARED_KEY_MODE_BASE+4*(BssIndex/2), csr1.word);
}
// IRQL = DISPATCH_LEVEL
VOID AsicRemoveSharedKeyEntry(
IN PRTMP_ADAPTER pAd,
IN UCHAR BssIndex,
IN UCHAR KeyIdx)
{
//ULONG SecCsr0;
SHAREDKEY_MODE_STRUC csr1;
DBGPRINT(RT_DEBUG_TRACE,("AsicRemoveSharedKeyEntry: #%d \n", BssIndex*4 + KeyIdx));
RTMP_IO_READ32(pAd, SHARED_KEY_MODE_BASE+4*(BssIndex/2), &csr1.word);
if ((BssIndex%2) == 0)
{
if (KeyIdx == 0)
csr1.field.Bss0Key0CipherAlg = 0;
else if (KeyIdx == 1)
csr1.field.Bss0Key1CipherAlg = 0;
else if (KeyIdx == 2)
csr1.field.Bss0Key2CipherAlg = 0;
else
csr1.field.Bss0Key3CipherAlg = 0;
}
else
{
if (KeyIdx == 0)
csr1.field.Bss1Key0CipherAlg = 0;
else if (KeyIdx == 1)
csr1.field.Bss1Key1CipherAlg = 0;
else if (KeyIdx == 2)
csr1.field.Bss1Key2CipherAlg = 0;
else
csr1.field.Bss1Key3CipherAlg = 0;
}
DBGPRINT(RT_DEBUG_TRACE,("Write: SHARED_KEY_MODE_BASE at this Bss[%d] = 0x%x \n", BssIndex, csr1.word));
RTMP_IO_WRITE32(pAd, SHARED_KEY_MODE_BASE+4*(BssIndex/2), csr1.word);
ASSERT(BssIndex < 4);
ASSERT(KeyIdx < 4);
}
VOID AsicUpdateWCIDAttribute(
IN PRTMP_ADAPTER pAd,
IN USHORT WCID,
IN UCHAR BssIndex,
IN UCHAR CipherAlg,
IN BOOLEAN bUsePairewiseKeyTable)
{
ULONG WCIDAttri = 0, offset;
//
// Update WCID attribute.
// Only TxKey could update WCID attribute.
//
offset = MAC_WCID_ATTRIBUTE_BASE + (WCID * HW_WCID_ATTRI_SIZE);
WCIDAttri = (BssIndex << 4) | (CipherAlg << 1) | (bUsePairewiseKeyTable);
RTMP_IO_WRITE32(pAd, offset, WCIDAttri);
}
VOID AsicUpdateWCIDIVEIV(
IN PRTMP_ADAPTER pAd,
IN USHORT WCID,
IN ULONG uIV,
IN ULONG uEIV)
{
ULONG offset;
offset = MAC_IVEIV_TABLE_BASE + (WCID * HW_IVEIV_ENTRY_SIZE);
RTMP_IO_WRITE32(pAd, offset, uIV);
RTMP_IO_WRITE32(pAd, offset + 4, uEIV);
}
VOID AsicUpdateRxWCIDTable(
IN PRTMP_ADAPTER pAd,
IN USHORT WCID,
IN PUCHAR pAddr)
{
ULONG offset;
ULONG Addr;
offset = MAC_WCID_BASE + (WCID * HW_WCID_ENTRY_SIZE);
Addr = pAddr[0] + (pAddr[1] << 8) +(pAddr[2] << 16) +(pAddr[3] << 24);
RTMP_IO_WRITE32(pAd, offset, Addr);
Addr = pAddr[4] + (pAddr[5] << 8);
RTMP_IO_WRITE32(pAd, offset + 4, Addr);
}
/*
========================================================================
Routine Description:
Set Cipher Key, Cipher algorithm, IV/EIV to Asic
Arguments:
pAd Pointer to our adapter
WCID WCID Entry number.
BssIndex BSSID index, station or none multiple BSSID support
this value should be 0.
KeyIdx This KeyIdx will set to IV's KeyID if bTxKey enabled
pCipherKey Pointer to Cipher Key.
bUsePairewiseKeyTable TRUE means saved the key in SharedKey table,
otherwise PairewiseKey table
bTxKey This is the transmit key if enabled.
Return Value:
None
Note:
This routine will set the relative key stuff to Asic including WCID attribute,
Cipher Key, Cipher algorithm and IV/EIV.
IV/EIV will be update if this CipherKey is the transmission key because
ASIC will base on IV's KeyID value to select Cipher Key.
If bTxKey sets to FALSE, this is not the TX key, but it could be
RX key
For AP mode bTxKey must be always set to TRUE.
========================================================================
*/
VOID AsicAddKeyEntry(
IN PRTMP_ADAPTER pAd,
IN USHORT WCID,
IN UCHAR BssIndex,
IN UCHAR KeyIdx,
IN PCIPHER_KEY pCipherKey,
IN BOOLEAN bUsePairewiseKeyTable,
IN BOOLEAN bTxKey)
{
ULONG offset;
UCHAR IV4 = 0;
PUCHAR pKey = pCipherKey->Key;
PUCHAR pTxMic = pCipherKey->TxMic;
PUCHAR pRxMic = pCipherKey->RxMic;
PUCHAR pTxtsc = pCipherKey->TxTsc;
UCHAR CipherAlg = pCipherKey->CipherAlg;
SHAREDKEY_MODE_STRUC csr1;
#ifdef RT2860
UCHAR i;
#endif
DBGPRINT(RT_DEBUG_TRACE, ("==> AsicAddKeyEntry\n"));
//
// 1.) decide key table offset
//
if (bUsePairewiseKeyTable)
offset = PAIRWISE_KEY_TABLE_BASE + (WCID * HW_KEY_ENTRY_SIZE);
else
offset = SHARED_KEY_TABLE_BASE + (4 * BssIndex + KeyIdx) * HW_KEY_ENTRY_SIZE;
//
// 2.) Set Key to Asic
//
//for (i = 0; i < KeyLen; i++)
#ifdef RT2860
for (i = 0; i < MAX_LEN_OF_PEER_KEY; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pKey[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, pKey, MAX_LEN_OF_PEER_KEY);
#endif
offset += MAX_LEN_OF_PEER_KEY;
//
// 3.) Set MIC key if available
//
if (pTxMic)
{
#ifdef RT2860
for (i = 0; i < 8; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pTxMic[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, pTxMic, 8);
#endif
}
offset += LEN_TKIP_TXMICK;
if (pRxMic)
{
#ifdef RT2860
for (i = 0; i < 8; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pRxMic[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, pRxMic, 8);
#endif
}
//
// 4.) Modify IV/EIV if needs
// This will force Asic to use this key ID by setting IV.
//
if (bTxKey)
{
#ifdef RT2860
offset = MAC_IVEIV_TABLE_BASE + (WCID * HW_IVEIV_ENTRY_SIZE);
//
// Write IV
//
RTMP_IO_WRITE8(pAd, offset, pTxtsc[1]);
RTMP_IO_WRITE8(pAd, offset + 1, ((pTxtsc[1] | 0x20) & 0x7f));
RTMP_IO_WRITE8(pAd, offset + 2, pTxtsc[0]);
IV4 = (KeyIdx << 6);
if ((CipherAlg == CIPHER_TKIP) || (CipherAlg == CIPHER_TKIP_NO_MIC) ||(CipherAlg == CIPHER_AES))
IV4 |= 0x20; // turn on extension bit means EIV existence
RTMP_IO_WRITE8(pAd, offset + 3, IV4);
//
// Write EIV
//
offset += 4;
for (i = 0; i < 4; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pTxtsc[i + 2]);
}
#endif
#ifdef RT2870
UINT32 tmpVal;
//
// Write IV
//
IV4 = (KeyIdx << 6);
if ((CipherAlg == CIPHER_TKIP) || (CipherAlg == CIPHER_TKIP_NO_MIC) ||(CipherAlg == CIPHER_AES))
IV4 |= 0x20; // turn on extension bit means EIV existence
tmpVal = pTxtsc[1] + (((pTxtsc[1] | 0x20) & 0x7f) << 8) + (pTxtsc[0] << 16) + (IV4 << 24);
RTMP_IO_WRITE32(pAd, offset, tmpVal);
//
// Write EIV
//
offset += 4;
RTMP_IO_WRITE32(pAd, offset, *(PUINT32)&pCipherKey->TxTsc[2]);
#endif // RT2870 //
AsicUpdateWCIDAttribute(pAd, WCID, BssIndex, CipherAlg, bUsePairewiseKeyTable);
}
if (!bUsePairewiseKeyTable)
{
//
// Only update the shared key security mode
//
RTMP_IO_READ32(pAd, SHARED_KEY_MODE_BASE + 4 * (BssIndex / 2), &csr1.word);
if ((BssIndex % 2) == 0)
{
if (KeyIdx == 0)
csr1.field.Bss0Key0CipherAlg = CipherAlg;
else if (KeyIdx == 1)
csr1.field.Bss0Key1CipherAlg = CipherAlg;
else if (KeyIdx == 2)
csr1.field.Bss0Key2CipherAlg = CipherAlg;
else
csr1.field.Bss0Key3CipherAlg = CipherAlg;
}
else
{
if (KeyIdx == 0)
csr1.field.Bss1Key0CipherAlg = CipherAlg;
else if (KeyIdx == 1)
csr1.field.Bss1Key1CipherAlg = CipherAlg;
else if (KeyIdx == 2)
csr1.field.Bss1Key2CipherAlg = CipherAlg;
else
csr1.field.Bss1Key3CipherAlg = CipherAlg;
}
RTMP_IO_WRITE32(pAd, SHARED_KEY_MODE_BASE + 4 * (BssIndex / 2), csr1.word);
}
DBGPRINT(RT_DEBUG_TRACE, ("<== AsicAddKeyEntry\n"));
}
/*
========================================================================
Description:
Add Pair-wise key material into ASIC.
Update pairwise key, TxMic and RxMic to Asic Pair-wise key table
Return:
========================================================================
*/
VOID AsicAddPairwiseKeyEntry(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pAddr,
IN UCHAR WCID,
IN CIPHER_KEY *pCipherKey)
{
INT i;
ULONG offset;
PUCHAR pKey = pCipherKey->Key;
PUCHAR pTxMic = pCipherKey->TxMic;
PUCHAR pRxMic = pCipherKey->RxMic;
#ifdef DBG
UCHAR CipherAlg = pCipherKey->CipherAlg;
#endif // DBG //
// EKEY
offset = PAIRWISE_KEY_TABLE_BASE + (WCID * HW_KEY_ENTRY_SIZE);
#ifdef RT2860
for (i=0; i<MAX_LEN_OF_PEER_KEY; i++)
{
RTMP_IO_WRITE8(pAd, offset + i, pKey[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, &pCipherKey->Key[0], MAX_LEN_OF_PEER_KEY);
#endif // RT2870 //
for (i=0; i<MAX_LEN_OF_PEER_KEY; i+=4)
{
UINT32 Value;
RTMP_IO_READ32(pAd, offset + i, &Value);
}
offset += MAX_LEN_OF_PEER_KEY;
// MIC KEY
if (pTxMic)
{
#ifdef RT2860
for (i=0; i<8; i++)
{
RTMP_IO_WRITE8(pAd, offset+i, pTxMic[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, &pCipherKey->TxMic[0], 8);
#endif // RT2870 //
}
offset += 8;
if (pRxMic)
{
#ifdef RT2860
for (i=0; i<8; i++)
{
RTMP_IO_WRITE8(pAd, offset+i, pRxMic[i]);
}
#endif
#ifdef RT2870
RTUSBMultiWrite(pAd, offset, &pCipherKey->RxMic[0], 8);
#endif // RT2870 //
}
DBGPRINT(RT_DEBUG_TRACE,("AsicAddPairwiseKeyEntry: WCID #%d Alg=%s\n",WCID, CipherName[CipherAlg]));
DBGPRINT(RT_DEBUG_TRACE,(" Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
pKey[0],pKey[1],pKey[2],pKey[3],pKey[4],pKey[5],pKey[6],pKey[7],pKey[8],pKey[9],pKey[10],pKey[11],pKey[12],pKey[13],pKey[14],pKey[15]));
if (pRxMic)
{
DBGPRINT(RT_DEBUG_TRACE, (" Rx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
pRxMic[0],pRxMic[1],pRxMic[2],pRxMic[3],pRxMic[4],pRxMic[5],pRxMic[6],pRxMic[7]));
}
if (pTxMic)
{
DBGPRINT(RT_DEBUG_TRACE, (" Tx MIC Key = %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
pTxMic[0],pTxMic[1],pTxMic[2],pTxMic[3],pTxMic[4],pTxMic[5],pTxMic[6],pTxMic[7]));
}
}
/*
========================================================================
Description:
Remove Pair-wise key material from ASIC.
Return:
========================================================================
*/
VOID AsicRemovePairwiseKeyEntry(
IN PRTMP_ADAPTER pAd,
IN UCHAR BssIdx,
IN UCHAR Wcid)
{
ULONG WCIDAttri;
USHORT offset;
// re-set the entry's WCID attribute as OPEN-NONE.
offset = MAC_WCID_ATTRIBUTE_BASE + (Wcid * HW_WCID_ATTRI_SIZE);
WCIDAttri = (BssIdx<<4) | PAIRWISEKEYTABLE;
RTMP_IO_WRITE32(pAd, offset, WCIDAttri);
}
BOOLEAN AsicSendCommandToMcu(
IN PRTMP_ADAPTER pAd,
IN UCHAR Command,
IN UCHAR Token,
IN UCHAR Arg0,
IN UCHAR Arg1)
{
HOST_CMD_CSR_STRUC H2MCmd;
H2M_MAILBOX_STRUC H2MMailbox;
ULONG i = 0;
do
{
RTMP_IO_READ32(pAd, H2M_MAILBOX_CSR, &H2MMailbox.word);
if (H2MMailbox.field.Owner == 0)
break;
RTMPusecDelay(2);
} while(i++ < 100);
if (i > 100)
{
{
#ifdef RT2860
UINT32 Data;
// Reset DMA
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &Data);
Data |= 0x2;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
// After Reset DMA, DMA index will become Zero. So Driver need to reset all ring indexs too.
// Reset DMA/CPU ring index
RTMPRingCleanUp(pAd, QID_AC_BK);
RTMPRingCleanUp(pAd, QID_AC_BE);
RTMPRingCleanUp(pAd, QID_AC_VI);
RTMPRingCleanUp(pAd, QID_AC_VO);
RTMPRingCleanUp(pAd, QID_HCCA);
RTMPRingCleanUp(pAd, QID_MGMT);
RTMPRingCleanUp(pAd, QID_RX);
// Clear Reset
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &Data);
Data &= 0xfffffffd;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
#endif /* RT2860 */
DBGPRINT_ERR(("H2M_MAILBOX still hold by MCU. command fail\n"));
}
//return FALSE;
#ifdef RT2870
return FALSE;
#endif
}
H2MMailbox.field.Owner = 1; // pass ownership to MCU
H2MMailbox.field.CmdToken = Token;
H2MMailbox.field.HighByte = Arg1;
H2MMailbox.field.LowByte = Arg0;
RTMP_IO_WRITE32(pAd, H2M_MAILBOX_CSR, H2MMailbox.word);
H2MCmd.word = 0;
H2MCmd.field.HostCommand = Command;
RTMP_IO_WRITE32(pAd, HOST_CMD_CSR, H2MCmd.word);
if (Command != 0x80)
{
}
return TRUE;
}
#ifdef RT2860
BOOLEAN AsicCheckCommanOk(
IN PRTMP_ADAPTER pAd,
IN UCHAR Command)
{
UINT32 CmdStatus = 0, CID = 0, i;
UINT32 ThisCIDMask = 0;
i = 0;
do
{
RTMP_IO_READ32(pAd, H2M_MAILBOX_CID, &CID);
// Find where the command is. Because this is randomly specified by firmware.
if ((CID & CID0MASK) == Command)
{
ThisCIDMask = CID0MASK;
break;
}
else if ((((CID & CID1MASK)>>8) & 0xff) == Command)
{
ThisCIDMask = CID1MASK;
break;
}
else if ((((CID & CID2MASK)>>16) & 0xff) == Command)
{
ThisCIDMask = CID2MASK;
break;
}
else if ((((CID & CID3MASK)>>24) & 0xff) == Command)
{
ThisCIDMask = CID3MASK;
break;
}
RTMPusecDelay(100);
i++;
}while (i < 200);
// Get CommandStatus Value
RTMP_IO_READ32(pAd, H2M_MAILBOX_STATUS, &CmdStatus);
// This command's status is at the same position as command. So AND command position's bitmask to read status.
if (i < 200)
{
// If Status is 1, the comamnd is success.
if (((CmdStatus & ThisCIDMask) == 0x1) || ((CmdStatus & ThisCIDMask) == 0x100)
|| ((CmdStatus & ThisCIDMask) == 0x10000) || ((CmdStatus & ThisCIDMask) == 0x1000000))
{
DBGPRINT(RT_DEBUG_TRACE, ("--> AsicCheckCommanOk CID = 0x%x, CmdStatus= 0x%x \n", CID, CmdStatus));
RTMP_IO_WRITE32(pAd, H2M_MAILBOX_STATUS, 0xffffffff);
RTMP_IO_WRITE32(pAd, H2M_MAILBOX_CID, 0xffffffff);
return TRUE;
}
DBGPRINT(RT_DEBUG_TRACE, ("--> AsicCheckCommanFail1 CID = 0x%x, CmdStatus= 0x%x \n", CID, CmdStatus));
}
else
{
DBGPRINT(RT_DEBUG_TRACE, ("--> AsicCheckCommanFail2 Timeout Command = %d, CmdStatus= 0x%x \n", Command, CmdStatus));
}
// Clear Command and Status.
RTMP_IO_WRITE32(pAd, H2M_MAILBOX_STATUS, 0xffffffff);
RTMP_IO_WRITE32(pAd, H2M_MAILBOX_CID, 0xffffffff);
return FALSE;
}
#endif /* RT8260 */
/*
========================================================================
Routine Description:
Verify the support rate for different PHY type
Arguments:
pAd Pointer to our adapter
Return Value:
None
IRQL = PASSIVE_LEVEL
========================================================================
*/
VOID RTMPCheckRates(
IN PRTMP_ADAPTER pAd,
IN OUT UCHAR SupRate[],
IN OUT UCHAR *SupRateLen)
{
UCHAR RateIdx, i, j;
UCHAR NewRate[12], NewRateLen;
NewRateLen = 0;
if (pAd->CommonCfg.PhyMode == PHY_11B)
RateIdx = 4;
else
RateIdx = 12;
// Check for support rates exclude basic rate bit
for (i = 0; i < *SupRateLen; i++)
for (j = 0; j < RateIdx; j++)
if ((SupRate[i] & 0x7f) == RateIdTo500Kbps[j])
NewRate[NewRateLen++] = SupRate[i];
*SupRateLen = NewRateLen;
NdisMoveMemory(SupRate, NewRate, NewRateLen);
}
BOOLEAN RTMPCheckChannel(
IN PRTMP_ADAPTER pAd,
IN UCHAR CentralChannel,
IN UCHAR Channel)
{
UCHAR k;
UCHAR UpperChannel = 0, LowerChannel = 0;
UCHAR NoEffectChannelinList = 0;
// Find upper and lower channel according to 40MHz current operation.
if (CentralChannel < Channel)
{
UpperChannel = Channel;
if (CentralChannel > 2)
LowerChannel = CentralChannel - 2;
else
return FALSE;
}
else if (CentralChannel > Channel)
{
UpperChannel = CentralChannel + 2;
LowerChannel = Channel;
}
for (k = 0;k < pAd->ChannelListNum;k++)
{
if (pAd->ChannelList[k].Channel == UpperChannel)
{
NoEffectChannelinList ++;
}
if (pAd->ChannelList[k].Channel == LowerChannel)
{
NoEffectChannelinList ++;
}
}
DBGPRINT(RT_DEBUG_TRACE,("Total Channel in Channel List = [%d]\n", NoEffectChannelinList));
if (NoEffectChannelinList == 2)
return TRUE;
else
return FALSE;
}
/*
========================================================================
Routine Description:
Verify the support rate for HT phy type
Arguments:
pAd Pointer to our adapter
Return Value:
FALSE if pAd->CommonCfg.SupportedHtPhy doesn't accept the pHtCapability. (AP Mode)
IRQL = PASSIVE_LEVEL
========================================================================
*/
BOOLEAN RTMPCheckHt(
IN PRTMP_ADAPTER pAd,
IN UCHAR Wcid,
IN HT_CAPABILITY_IE *pHtCapability,
IN ADD_HT_INFO_IE *pAddHtInfo)
{
if (Wcid >= MAX_LEN_OF_MAC_TABLE)
return FALSE;
// If use AMSDU, set flag.
if (pAd->CommonCfg.DesiredHtPhy.AmsduEnable)
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[Wcid], fCLIENT_STATUS_AMSDU_INUSED);
// Save Peer Capability
if (pHtCapability->HtCapInfo.ShortGIfor20)
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[Wcid], fCLIENT_STATUS_SGI20_CAPABLE);
if (pHtCapability->HtCapInfo.ShortGIfor40)
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[Wcid], fCLIENT_STATUS_SGI40_CAPABLE);
if (pHtCapability->HtCapInfo.TxSTBC)
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[Wcid], fCLIENT_STATUS_TxSTBC_CAPABLE);
if (pHtCapability->HtCapInfo.RxSTBC)
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[Wcid], fCLIENT_STATUS_RxSTBC_CAPABLE);
if (pAd->CommonCfg.bRdg && pHtCapability->ExtHtCapInfo.RDGSupport)
{
CLIENT_STATUS_SET_FLAG(&pAd->MacTab.Content[Wcid], fCLIENT_STATUS_RDG_CAPABLE);
}
if (Wcid < MAX_LEN_OF_MAC_TABLE)
{
pAd->MacTab.Content[Wcid].MpduDensity = pHtCapability->HtCapParm.MpduDensity;
}
// Will check ChannelWidth for MCSSet[4] below
pAd->MlmeAux.HtCapability.MCSSet[4] = 0x1;
switch (pAd->CommonCfg.RxStream)
{
case 1:
pAd->MlmeAux.HtCapability.MCSSet[0] = 0xff;
pAd->MlmeAux.HtCapability.MCSSet[1] = 0x00;
pAd->MlmeAux.HtCapability.MCSSet[2] = 0x00;
pAd->MlmeAux.HtCapability.MCSSet[3] = 0x00;
break;
case 2:
pAd->MlmeAux.HtCapability.MCSSet[0] = 0xff;
pAd->MlmeAux.HtCapability.MCSSet[1] = 0xff;
pAd->MlmeAux.HtCapability.MCSSet[2] = 0x00;
pAd->MlmeAux.HtCapability.MCSSet[3] = 0x00;
break;
case 3:
pAd->MlmeAux.HtCapability.MCSSet[0] = 0xff;
pAd->MlmeAux.HtCapability.MCSSet[1] = 0xff;
pAd->MlmeAux.HtCapability.MCSSet[2] = 0xff;
pAd->MlmeAux.HtCapability.MCSSet[3] = 0x00;
break;
}
pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth = pAddHtInfo->AddHtInfo.RecomWidth & pAd->CommonCfg.DesiredHtPhy.ChannelWidth;
DBGPRINT(RT_DEBUG_TRACE, ("RTMPCheckHt:: HtCapInfo.ChannelWidth=%d, RecomWidth=%d, DesiredHtPhy.ChannelWidth=%d, BW40MAvailForA/G=%d/%d, PhyMode=%d \n",
pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth, pAddHtInfo->AddHtInfo.RecomWidth, pAd->CommonCfg.DesiredHtPhy.ChannelWidth,
pAd->NicConfig2.field.BW40MAvailForA, pAd->NicConfig2.field.BW40MAvailForG, pAd->CommonCfg.PhyMode));
pAd->MlmeAux.HtCapability.HtCapInfo.GF = pHtCapability->HtCapInfo.GF &pAd->CommonCfg.DesiredHtPhy.GF;
// Send Assoc Req with my HT capability.
pAd->MlmeAux.HtCapability.HtCapInfo.AMsduSize = pAd->CommonCfg.DesiredHtPhy.AmsduSize;
pAd->MlmeAux.HtCapability.HtCapInfo.MimoPs = pAd->CommonCfg.DesiredHtPhy.MimoPs;
pAd->MlmeAux.HtCapability.HtCapInfo.ShortGIfor20 = (pAd->CommonCfg.DesiredHtPhy.ShortGIfor20) & (pHtCapability->HtCapInfo.ShortGIfor20);
pAd->MlmeAux.HtCapability.HtCapInfo.ShortGIfor40 = (pAd->CommonCfg.DesiredHtPhy.ShortGIfor40) & (pHtCapability->HtCapInfo.ShortGIfor40);
pAd->MlmeAux.HtCapability.HtCapInfo.TxSTBC = (pAd->CommonCfg.DesiredHtPhy.TxSTBC)&(pHtCapability->HtCapInfo.RxSTBC);
pAd->MlmeAux.HtCapability.HtCapInfo.RxSTBC = (pAd->CommonCfg.DesiredHtPhy.RxSTBC)&(pHtCapability->HtCapInfo.TxSTBC);
pAd->MlmeAux.HtCapability.HtCapParm.MaxRAmpduFactor = pAd->CommonCfg.DesiredHtPhy.MaxRAmpduFactor;
pAd->MlmeAux.HtCapability.HtCapParm.MpduDensity = pAd->CommonCfg.HtCapability.HtCapParm.MpduDensity;
pAd->MlmeAux.HtCapability.ExtHtCapInfo.PlusHTC = pHtCapability->ExtHtCapInfo.PlusHTC;
pAd->MacTab.Content[Wcid].HTCapability.ExtHtCapInfo.PlusHTC = pHtCapability->ExtHtCapInfo.PlusHTC;
if (pAd->CommonCfg.bRdg)
{
pAd->MlmeAux.HtCapability.ExtHtCapInfo.RDGSupport = pHtCapability->ExtHtCapInfo.RDGSupport;
pAd->MlmeAux.HtCapability.ExtHtCapInfo.PlusHTC = 1;
}
if (pAd->MlmeAux.HtCapability.HtCapInfo.ChannelWidth == BW_20)
pAd->MlmeAux.HtCapability.MCSSet[4] = 0x0; // BW20 can't transmit MCS32
COPY_AP_HTSETTINGS_FROM_BEACON(pAd, pHtCapability);
return TRUE;
}
/*
========================================================================
Routine Description:
Verify the support rate for different PHY type
Arguments:
pAd Pointer to our adapter
Return Value:
None
IRQL = PASSIVE_LEVEL
========================================================================
*/
VOID RTMPUpdateMlmeRate(
IN PRTMP_ADAPTER pAd)
{
UCHAR MinimumRate;
UCHAR ProperMlmeRate; //= RATE_54;
UCHAR i, j, RateIdx = 12; //1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54
BOOLEAN bMatch = FALSE;
switch (pAd->CommonCfg.PhyMode)
{
case PHY_11B:
ProperMlmeRate = RATE_11;
MinimumRate = RATE_1;
break;
case PHY_11BG_MIXED:
case PHY_11ABGN_MIXED:
case PHY_11BGN_MIXED:
if ((pAd->MlmeAux.SupRateLen == 4) &&
(pAd->MlmeAux.ExtRateLen == 0))
// B only AP
ProperMlmeRate = RATE_11;
else
ProperMlmeRate = RATE_24;
if (pAd->MlmeAux.Channel <= 14)
MinimumRate = RATE_1;
else
MinimumRate = RATE_6;
break;
case PHY_11A:
case PHY_11N_2_4G: // rt2860 need to check mlmerate for 802.11n
case PHY_11GN_MIXED:
case PHY_11AGN_MIXED:
case PHY_11AN_MIXED:
case PHY_11N_5G:
ProperMlmeRate = RATE_24;
MinimumRate = RATE_6;
break;
case PHY_11ABG_MIXED:
ProperMlmeRate = RATE_24;
if (pAd->MlmeAux.Channel <= 14)
MinimumRate = RATE_1;
else
MinimumRate = RATE_6;
break;
default: // error
ProperMlmeRate = RATE_1;
MinimumRate = RATE_1;
break;
}
for (i = 0; i < pAd->MlmeAux.SupRateLen; i++)
{
for (j = 0; j < RateIdx; j++)
{
if ((pAd->MlmeAux.SupRate[i] & 0x7f) == RateIdTo500Kbps[j])
{
if (j == ProperMlmeRate)
{
bMatch = TRUE;
break;
}
}
}
if (bMatch)
break;
}
if (bMatch == FALSE)
{
for (i = 0; i < pAd->MlmeAux.ExtRateLen; i++)
{
for (j = 0; j < RateIdx; j++)
{
if ((pAd->MlmeAux.ExtRate[i] & 0x7f) == RateIdTo500Kbps[j])
{
if (j == ProperMlmeRate)
{
bMatch = TRUE;
break;
}
}
}
if (bMatch)
break;
}
}
if (bMatch == FALSE)
{
ProperMlmeRate = MinimumRate;
}
pAd->CommonCfg.MlmeRate = MinimumRate;
pAd->CommonCfg.RtsRate = ProperMlmeRate;
if (pAd->CommonCfg.MlmeRate >= RATE_6)
{
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_OFDM;
pAd->CommonCfg.MlmeTransmit.field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.MlmeRate];
pAd->MacTab.Content[BSS0Mcast_WCID].HTPhyMode.field.MODE = MODE_OFDM;
pAd->MacTab.Content[BSS0Mcast_WCID].HTPhyMode.field.MCS = OfdmRateToRxwiMCS[pAd->CommonCfg.MlmeRate];
}
else
{
pAd->CommonCfg.MlmeTransmit.field.MODE = MODE_CCK;
pAd->CommonCfg.MlmeTransmit.field.MCS = pAd->CommonCfg.MlmeRate;
pAd->MacTab.Content[BSS0Mcast_WCID].HTPhyMode.field.MODE = MODE_CCK;
pAd->MacTab.Content[BSS0Mcast_WCID].HTPhyMode.field.MCS = pAd->CommonCfg.MlmeRate;
}
DBGPRINT(RT_DEBUG_TRACE, ("RTMPUpdateMlmeRate ==> MlmeTransmit = 0x%x \n" , pAd->CommonCfg.MlmeTransmit.word));
}
CHAR RTMPMaxRssi(
IN PRTMP_ADAPTER pAd,
IN CHAR Rssi0,
IN CHAR Rssi1,
IN CHAR Rssi2)
{
CHAR larger = -127;
if ((pAd->Antenna.field.RxPath == 1) && (Rssi0 != 0))
{
larger = Rssi0;
}
if ((pAd->Antenna.field.RxPath >= 2) && (Rssi1 != 0))
{
larger = max(Rssi0, Rssi1);
}
if ((pAd->Antenna.field.RxPath == 3) && (Rssi2 != 0))
{
larger = max(larger, Rssi2);
}
if (larger == -127)
larger = 0;
return larger;
}
#ifdef RT2870
// Antenna divesity use GPIO3 and EESK pin for control
// Antenna and EEPROM access are both using EESK pin,
// Therefor we should avoid accessing EESK at the same time
// Then restore antenna after EEPROM access
VOID AsicSetRxAnt(
IN PRTMP_ADAPTER pAd,
IN UCHAR Ant)
{
UINT32 Value;
UINT32 x;
if ((pAd->EepromAccess) ||
(RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RESET_IN_PROGRESS)) ||
(RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS)) ||
(RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF)) ||
(RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST)))
{
return;
}
// the antenna selection is through firmware and MAC register(GPIO3)
if (Ant == 0)
{
// Main antenna
RTMP_IO_READ32(pAd, E2PROM_CSR, &x);
x |= (EESK);
RTMP_IO_WRITE32(pAd, E2PROM_CSR, x);
RTMP_IO_READ32(pAd, GPIO_CTRL_CFG, &Value);
Value &= ~(0x0808);
RTMP_IO_WRITE32(pAd, GPIO_CTRL_CFG, Value);
DBGPRINT_RAW(RT_DEBUG_TRACE, ("AsicSetRxAnt, switch to main antenna\n"));
}
else
{
// Aux antenna
RTMP_IO_READ32(pAd, E2PROM_CSR, &x);
x &= ~(EESK);
RTMP_IO_WRITE32(pAd, E2PROM_CSR, x);
RTMP_IO_READ32(pAd, GPIO_CTRL_CFG, &Value);
Value &= ~(0x0808);
Value |= 0x08;
RTMP_IO_WRITE32(pAd, GPIO_CTRL_CFG, Value);
DBGPRINT_RAW(RT_DEBUG_TRACE, ("AsicSetRxAnt, switch to aux antenna\n"));
}
}
#endif
/*
========================================================================
Routine Description:
Periodic evaluate antenna link status
Arguments:
pAd - Adapter pointer
Return Value:
None
========================================================================
*/
VOID AsicEvaluateRxAnt(
IN PRTMP_ADAPTER pAd)
{
UCHAR BBPR3 = 0;
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RESET_IN_PROGRESS |
fRTMP_ADAPTER_HALT_IN_PROGRESS |
fRTMP_ADAPTER_RADIO_OFF |
fRTMP_ADAPTER_NIC_NOT_EXIST |
fRTMP_ADAPTER_BSS_SCAN_IN_PROGRESS)
|| OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)
#ifdef RT2870
|| (pAd->EepromAccess)
#endif
)
return;
#ifdef RT30xx
// two antenna selection mechanism- one is antenna diversity, the other is failed antenna remove
// one is antenna diversity:there is only one antenna can rx and tx
// the other is failed antenna remove:two physical antenna can rx and tx
if (pAd->NicConfig2.field.AntDiversity)
{
DBGPRINT(RT_DEBUG_TRACE,("AntDiv - before evaluate Pair1-Ant (%d,%d)\n",
pAd->RxAnt.Pair1PrimaryRxAnt, pAd->RxAnt.Pair1SecondaryRxAnt));
AsicSetRxAnt(pAd, pAd->RxAnt.Pair1SecondaryRxAnt);
pAd->RxAnt.EvaluatePeriod = 1; // 1:Means switch to SecondaryRxAnt, 0:Means switch to Pair1PrimaryRxAnt
pAd->RxAnt.FirstPktArrivedWhenEvaluate = FALSE;
pAd->RxAnt.RcvPktNumWhenEvaluate = 0;
// a one-shot timer to end the evalution
// dynamic adjust antenna evaluation period according to the traffic
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED))
RTMPSetTimer(&pAd->Mlme.RxAntEvalTimer, 100);
else
RTMPSetTimer(&pAd->Mlme.RxAntEvalTimer, 300);
}
else
#endif
{
if (pAd->StaCfg.Psm == PWR_SAVE)
return;
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R3, &BBPR3);
BBPR3 &= (~0x18);
if(pAd->Antenna.field.RxPath == 3)
{
BBPR3 |= (0x10);
}
else if(pAd->Antenna.field.RxPath == 2)
{
BBPR3 |= (0x8);
}
else if(pAd->Antenna.field.RxPath == 1)
{
BBPR3 |= (0x0);
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R3, BBPR3);
#ifdef RT2860
pAd->StaCfg.BBPR3 = BBPR3;
#endif
}
if (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED)
)
{
ULONG TxTotalCnt = pAd->RalinkCounters.OneSecTxNoRetryOkCount +
pAd->RalinkCounters.OneSecTxRetryOkCount +
pAd->RalinkCounters.OneSecTxFailCount;
// dynamic adjust antenna evaluation period according to the traffic
if (TxTotalCnt > 50)
{
RTMPSetTimer(&pAd->Mlme.RxAntEvalTimer, 20);
pAd->Mlme.bLowThroughput = FALSE;
}
else
{
RTMPSetTimer(&pAd->Mlme.RxAntEvalTimer, 300);
pAd->Mlme.bLowThroughput = TRUE;
}
}
}
/*
========================================================================
Routine Description:
After evaluation, check antenna link status
Arguments:
pAd - Adapter pointer
Return Value:
None
========================================================================
*/
VOID AsicRxAntEvalTimeout(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
RTMP_ADAPTER *pAd = (RTMP_ADAPTER *)FunctionContext;
UCHAR BBPR3 = 0;
CHAR larger = -127, rssi0, rssi1, rssi2;
if (RTMP_TEST_FLAG(pAd, fRTMP_ADAPTER_RESET_IN_PROGRESS |
fRTMP_ADAPTER_HALT_IN_PROGRESS |
fRTMP_ADAPTER_RADIO_OFF |
fRTMP_ADAPTER_NIC_NOT_EXIST)
|| OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE)
#ifdef RT2870
|| (pAd->EepromAccess)
#endif
)
return;
{
#ifdef RT30xx
if (pAd->NicConfig2.field.AntDiversity)
{
if ((pAd->RxAnt.RcvPktNumWhenEvaluate != 0) && (pAd->RxAnt.Pair1AvgRssi[pAd->RxAnt.Pair1SecondaryRxAnt] >= pAd->RxAnt.Pair1AvgRssi[pAd->RxAnt.Pair1PrimaryRxAnt]))
{
UCHAR temp;
//
// select PrimaryRxAntPair
// Role change, Used Pair1SecondaryRxAnt as PrimaryRxAntPair.
// Since Pair1SecondaryRxAnt Quality good than Pair1PrimaryRxAnt
//
temp = pAd->RxAnt.Pair1PrimaryRxAnt;
pAd->RxAnt.Pair1PrimaryRxAnt = pAd->RxAnt.Pair1SecondaryRxAnt;
pAd->RxAnt.Pair1SecondaryRxAnt = temp;
pAd->RxAnt.Pair1LastAvgRssi = (pAd->RxAnt.Pair1AvgRssi[pAd->RxAnt.Pair1SecondaryRxAnt] >> 3);
pAd->RxAnt.EvaluateStableCnt = 0;
}
else
{
// if the evaluated antenna is not better than original, switch back to original antenna
AsicSetRxAnt(pAd, pAd->RxAnt.Pair1PrimaryRxAnt);
pAd->RxAnt.EvaluateStableCnt ++;
}
pAd->RxAnt.EvaluatePeriod = 0; // 1:Means switch to SecondaryRxAnt, 0:Means switch to Pair1PrimaryRxAnt
DBGPRINT(RT_DEBUG_TRACE,("AsicRxAntEvalAction::After Eval(fix in #%d), <%d, %d>, RcvPktNumWhenEvaluate=%ld\n",
pAd->RxAnt.Pair1PrimaryRxAnt, (pAd->RxAnt.Pair1AvgRssi[0] >> 3), (pAd->RxAnt.Pair1AvgRssi[1] >> 3), pAd->RxAnt.RcvPktNumWhenEvaluate));
}
else
#endif
{
if (pAd->StaCfg.Psm == PWR_SAVE)
return;
// if the traffic is low, use average rssi as the criteria
if (pAd->Mlme.bLowThroughput == TRUE)
{
rssi0 = pAd->StaCfg.RssiSample.LastRssi0;
rssi1 = pAd->StaCfg.RssiSample.LastRssi1;
rssi2 = pAd->StaCfg.RssiSample.LastRssi2;
}
else
{
rssi0 = pAd->StaCfg.RssiSample.AvgRssi0;
rssi1 = pAd->StaCfg.RssiSample.AvgRssi1;
rssi2 = pAd->StaCfg.RssiSample.AvgRssi2;
}
if(pAd->Antenna.field.RxPath == 3)
{
larger = max(rssi0, rssi1);
if (larger > (rssi2 + 20))
pAd->Mlme.RealRxPath = 2;
else
pAd->Mlme.RealRxPath = 3;
}
else if(pAd->Antenna.field.RxPath == 2)
{
if (rssi0 > (rssi1 + 20))
pAd->Mlme.RealRxPath = 1;
else
pAd->Mlme.RealRxPath = 2;
}
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R3, &BBPR3);
BBPR3 &= (~0x18);
if(pAd->Mlme.RealRxPath == 3)
{
BBPR3 |= (0x10);
}
else if(pAd->Mlme.RealRxPath == 2)
{
BBPR3 |= (0x8);
}
else if(pAd->Mlme.RealRxPath == 1)
{
BBPR3 |= (0x0);
}
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R3, BBPR3);
#ifdef RT2860
pAd->StaCfg.BBPR3 = BBPR3;
#endif
}
}
}
VOID APSDPeriodicExec(
IN PVOID SystemSpecific1,
IN PVOID FunctionContext,
IN PVOID SystemSpecific2,
IN PVOID SystemSpecific3)
{
RTMP_ADAPTER *pAd = (RTMP_ADAPTER *)FunctionContext;
if (!OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED))
return;
pAd->CommonCfg.TriggerTimerCount++;
}
/*
========================================================================
Routine Description:
Set/reset MAC registers according to bPiggyBack parameter
Arguments:
pAd - Adapter pointer
bPiggyBack - Enable / Disable Piggy-Back
Return Value:
None
========================================================================
*/
VOID RTMPSetPiggyBack(
IN PRTMP_ADAPTER pAd,
IN BOOLEAN bPiggyBack)
{
TX_LINK_CFG_STRUC TxLinkCfg;
RTMP_IO_READ32(pAd, TX_LINK_CFG, &TxLinkCfg.word);
TxLinkCfg.field.TxCFAckEn = bPiggyBack;
RTMP_IO_WRITE32(pAd, TX_LINK_CFG, TxLinkCfg.word);
}
/*
========================================================================
Routine Description:
check if this entry need to switch rate automatically
Arguments:
pAd
pEntry
Return Value:
TURE
FALSE
========================================================================
*/
BOOLEAN RTMPCheckEntryEnableAutoRateSwitch(
IN PRTMP_ADAPTER pAd,
IN PMAC_TABLE_ENTRY pEntry)
{
BOOLEAN result = TRUE;
{
// only associated STA counts
if (pEntry && (pEntry->ValidAsCLI) && (pEntry->Sst == SST_ASSOC))
{
result = pAd->StaCfg.bAutoTxRateSwitch;
}
else
result = FALSE;
}
return result;
}
BOOLEAN RTMPAutoRateSwitchCheck(
IN PRTMP_ADAPTER pAd)
{
if (pAd->StaCfg.bAutoTxRateSwitch)
return TRUE;
return FALSE;
}
/*
========================================================================
Routine Description:
check if this entry need to fix tx legacy rate
Arguments:
pAd
pEntry
Return Value:
TURE
FALSE
========================================================================
*/
UCHAR RTMPStaFixedTxMode(
IN PRTMP_ADAPTER pAd,
IN PMAC_TABLE_ENTRY pEntry)
{
UCHAR tx_mode = FIXED_TXMODE_HT;
tx_mode = (UCHAR)pAd->StaCfg.DesiredTransmitSetting.field.FixedTxMode;
return tx_mode;
}
/*
========================================================================
Routine Description:
Overwrite HT Tx Mode by Fixed Legency Tx Mode, if specified.
Arguments:
pAd
pEntry
Return Value:
TURE
FALSE
========================================================================
*/
VOID RTMPUpdateLegacyTxSetting(
UCHAR fixed_tx_mode,
PMAC_TABLE_ENTRY pEntry)
{
HTTRANSMIT_SETTING TransmitSetting;
if (fixed_tx_mode == FIXED_TXMODE_HT)
return;
TransmitSetting.word = 0;
TransmitSetting.field.MODE = pEntry->HTPhyMode.field.MODE;
TransmitSetting.field.MCS = pEntry->HTPhyMode.field.MCS;
if (fixed_tx_mode == FIXED_TXMODE_CCK)
{
TransmitSetting.field.MODE = MODE_CCK;
// CCK mode allow MCS 0~3
if (TransmitSetting.field.MCS > MCS_3)
TransmitSetting.field.MCS = MCS_3;
}
else
{
TransmitSetting.field.MODE = MODE_OFDM;
// OFDM mode allow MCS 0~7
if (TransmitSetting.field.MCS > MCS_7)
TransmitSetting.field.MCS = MCS_7;
}
if (pEntry->HTPhyMode.field.MODE >= TransmitSetting.field.MODE)
{
pEntry->HTPhyMode.word = TransmitSetting.word;
DBGPRINT(RT_DEBUG_TRACE, ("RTMPUpdateLegacyTxSetting : wcid-%d, MODE=%s, MCS=%d \n",
pEntry->Aid, GetPhyMode(pEntry->HTPhyMode.field.MODE), pEntry->HTPhyMode.field.MCS));
}
}
/*
==========================================================================
Description:
dynamic tune BBP R66 to find a balance between sensibility and
noise isolation
IRQL = DISPATCH_LEVEL
==========================================================================
*/
VOID AsicStaBbpTuning(
IN PRTMP_ADAPTER pAd)
{
UCHAR OrigR66Value = 0, R66;//, R66UpperBound = 0x30, R66LowerBound = 0x30;
CHAR Rssi;
// 2860C did not support Fase CCA, therefore can't tune
if (pAd->MACVersion == 0x28600100)
return;
//
// work as a STA
//
if (pAd->Mlme.CntlMachine.CurrState != CNTL_IDLE) // no R66 tuning when SCANNING
return;
if ((pAd->OpMode == OPMODE_STA)
&& (OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_MEDIA_STATE_CONNECTED)
)
&& !(OPSTATUS_TEST_FLAG(pAd, fOP_STATUS_DOZE))
#ifdef RT2860
&& (pAd->bPCIclkOff == FALSE))
#endif
#ifdef RT2870
)
#endif
{
RTMP_BBP_IO_READ8_BY_REG_ID(pAd, BBP_R66, &OrigR66Value);
R66 = OrigR66Value;
if (pAd->Antenna.field.RxPath > 1)
Rssi = (pAd->StaCfg.RssiSample.AvgRssi0 + pAd->StaCfg.RssiSample.AvgRssi1) >> 1;
else
Rssi = pAd->StaCfg.RssiSample.AvgRssi0;
if (pAd->LatchRfRegs.Channel <= 14)
{ //BG band
#ifdef RT2870
// RT3070 is a no LNA solution, it should have different control regarding to AGC gain control
// Otherwise, it will have some throughput side effect when low RSSI
if (IS_RT30xx(pAd))
{
if (Rssi > RSSI_FOR_MID_LOW_SENSIBILITY)
{
R66 = 0x1C + 2*GET_LNA_GAIN(pAd) + 0x20;
if (OrigR66Value != R66)
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
else
{
R66 = 0x1C + 2*GET_LNA_GAIN(pAd);
if (OrigR66Value != R66)
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
else
#endif // RT2870 //
{
if (Rssi > RSSI_FOR_MID_LOW_SENSIBILITY)
{
R66 = (0x2E + GET_LNA_GAIN(pAd)) + 0x10;
if (OrigR66Value != R66)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
else
{
R66 = 0x2E + GET_LNA_GAIN(pAd);
if (OrigR66Value != R66)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
}
}
else
{ //A band
if (pAd->CommonCfg.BBPCurrentBW == BW_20)
{
if (Rssi > RSSI_FOR_MID_LOW_SENSIBILITY)
{
R66 = 0x32 + (GET_LNA_GAIN(pAd)*5)/3 + 0x10;
if (OrigR66Value != R66)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
else
{
R66 = 0x32 + (GET_LNA_GAIN(pAd)*5)/3;
if (OrigR66Value != R66)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
}
else
{
if (Rssi > RSSI_FOR_MID_LOW_SENSIBILITY)
{
R66 = 0x3A + (GET_LNA_GAIN(pAd)*5)/3 + 0x10;
if (OrigR66Value != R66)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
else
{
R66 = 0x3A + (GET_LNA_GAIN(pAd)*5)/3;
if (OrigR66Value != R66)
{
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
}
}
}
}
#ifdef RT2860
VOID AsicResetFromDMABusy(
IN PRTMP_ADAPTER pAd)
{
UINT32 Data;
BOOLEAN bCtrl = FALSE;
DBGPRINT(RT_DEBUG_TRACE, ("---> AsicResetFromDMABusy !!!!!!!!!!!!!!!!!!!!!!! \n"));
// Be sure restore link control value so we can write register.
RTMP_CLEAR_PSFLAG(pAd, fRTMP_PS_CAN_GO_SLEEP);
if (RTMP_TEST_PSFLAG(pAd, fRTMP_PS_SET_PCI_CLK_OFF_COMMAND))
{
DBGPRINT(RT_DEBUG_TRACE,("AsicResetFromDMABusy==>\n"));
RTMPPCIeLinkCtrlValueRestore(pAd, RESTORE_HALT);
RTMPusecDelay(6000);
pAd->bPCIclkOff = FALSE;
bCtrl = TRUE;
}
// Reset DMA
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &Data);
Data |= 0x2;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
// After Reset DMA, DMA index will become Zero. So Driver need to reset all ring indexs too.
// Reset DMA/CPU ring index
RTMPRingCleanUp(pAd, QID_AC_BK);
RTMPRingCleanUp(pAd, QID_AC_BE);
RTMPRingCleanUp(pAd, QID_AC_VI);
RTMPRingCleanUp(pAd, QID_AC_VO);
RTMPRingCleanUp(pAd, QID_HCCA);
RTMPRingCleanUp(pAd, QID_MGMT);
RTMPRingCleanUp(pAd, QID_RX);
// Clear Reset
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &Data);
Data &= 0xfffffffd;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
// If in Radio off, should call RTMPPCIePowerLinkCtrl again.
if ((bCtrl == TRUE) && (pAd->StaCfg.bRadio == FALSE))
RTMPPCIeLinkCtrlSetting(pAd, 3);
RTMP_SET_PSFLAG(pAd, fRTMP_PS_CAN_GO_SLEEP);
RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_NIC_NOT_EXIST | fRTMP_ADAPTER_HALT_IN_PROGRESS);
DBGPRINT(RT_DEBUG_TRACE, ("<--- AsicResetFromDMABusy !!!!!!!!!!!!!!!!!!!!!!! \n"));
}
VOID AsicResetBBP(
IN PRTMP_ADAPTER pAd)
{
DBGPRINT(RT_DEBUG_TRACE, ("---> Asic HardReset BBP !!!!!!!!!!!!!!!!!!!!!!! \n"));
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0x0);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0x2);
RTMP_IO_WRITE32(pAd, MAC_SYS_CTRL, 0xc);
// After hard-reset BBP, initialize all BBP values.
NICRestoreBBPValue(pAd);
DBGPRINT(RT_DEBUG_TRACE, ("<--- Asic HardReset BBP !!!!!!!!!!!!!!!!!!!!!!! \n"));
}
VOID AsicResetMAC(
IN PRTMP_ADAPTER pAd)
{
ULONG Data;
DBGPRINT(RT_DEBUG_TRACE, ("---> AsicResetMAC !!!! \n"));
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &Data);
Data |= 0x4;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
Data &= 0xfffffffb;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
DBGPRINT(RT_DEBUG_TRACE, ("<--- AsicResetMAC !!!! \n"));
}
VOID AsicResetPBF(
IN PRTMP_ADAPTER pAd)
{
ULONG Value1, Value2;
ULONG Data;
RTMP_IO_READ32(pAd, TXRXQ_PCNT, &Value1);
RTMP_IO_READ32(pAd, PBF_DBG, &Value2);
Value2 &= 0xff;
// sum should be equals to 0xff, which is the total buffer size.
if ((Value1 + Value2) < 0xff)
{
DBGPRINT(RT_DEBUG_TRACE, ("---> Asic HardReset PBF !!!! \n"));
RTMP_IO_READ32(pAd, PBF_SYS_CTRL, &Data);
Data |= 0x8;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
Data &= 0xfffffff7;
RTMP_IO_WRITE32(pAd, PBF_SYS_CTRL, Data);
DBGPRINT(RT_DEBUG_TRACE, ("<--- Asic HardReset PBF !!!! \n"));
}
}
#endif /* RT2860 */
VOID RTMPSetAGCInitValue(
IN PRTMP_ADAPTER pAd,
IN UCHAR BandWidth)
{
UCHAR R66 = 0x30;
if (pAd->LatchRfRegs.Channel <= 14)
{ // BG band
R66 = 0x2E + GET_LNA_GAIN(pAd);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
else
{ //A band
if (BandWidth == BW_20)
{
R66 = (UCHAR)(0x32 + (GET_LNA_GAIN(pAd)*5)/3);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
else
{
R66 = (UCHAR)(0x3A + (GET_LNA_GAIN(pAd)*5)/3);
RTMP_BBP_IO_WRITE8_BY_REG_ID(pAd, BBP_R66, R66);
}
}
}
VOID AsicTurnOffRFClk(
IN PRTMP_ADAPTER pAd,
IN UCHAR Channel)
{
// RF R2 bit 18 = 0
UINT32 R1 = 0, R2 = 0, R3 = 0;
UCHAR index;
RTMP_RF_REGS *RFRegTable;
// The RF programming sequence is difference between 3xxx and 2xxx
if (IS_RT3090(pAd))
{
RT30xxLoadRFSleepModeSetup(pAd); // add by johnli, RF power sequence setup, load RF sleep-mode setup
return;
}
RFRegTable = RF2850RegTable;
switch (pAd->RfIcType)
{
case RFIC_2820:
case RFIC_2850:
case RFIC_2720:
case RFIC_2750:
for (index = 0; index < NUM_OF_2850_CHNL; index++)
{
if (Channel == RFRegTable[index].Channel)
{
R1 = RFRegTable[index].R1 & 0xffffdfff;
R2 = RFRegTable[index].R2 & 0xfffbffff;
R3 = RFRegTable[index].R3 & 0xfff3ffff;
RTMP_RF_IO_WRITE32(pAd, R1);
RTMP_RF_IO_WRITE32(pAd, R2);
// Program R1b13 to 1, R3/b18,19 to 0, R2b18 to 0.
// Set RF R2 bit18=0, R3 bit[18:19]=0
//if (pAd->StaCfg.bRadio == FALSE)
if (1)
{
RTMP_RF_IO_WRITE32(pAd, R3);
DBGPRINT(RT_DEBUG_TRACE, ("AsicTurnOffRFClk#%d(RF=%d, ) , R2=0x%08x, R3 = 0x%08x \n",
Channel, pAd->RfIcType, R2, R3));
}
else
DBGPRINT(RT_DEBUG_TRACE, ("AsicTurnOffRFClk#%d(RF=%d, ) , R2=0x%08x \n",
Channel, pAd->RfIcType, R2));
break;
}
}
break;
default:
break;
}
}
VOID AsicTurnOnRFClk(
IN PRTMP_ADAPTER pAd,
IN UCHAR Channel)
{
// RF R2 bit 18 = 0
UINT32 R1 = 0, R2 = 0, R3 = 0;
UCHAR index;
RTMP_RF_REGS *RFRegTable;
// The RF programming sequence is difference between 3xxx and 2xxx
if (IS_RT3090(pAd))
return;
RFRegTable = RF2850RegTable;
switch (pAd->RfIcType)
{
case RFIC_2820:
case RFIC_2850:
case RFIC_2720:
case RFIC_2750:
for (index = 0; index < NUM_OF_2850_CHNL; index++)
{
if (Channel == RFRegTable[index].Channel)
{
R3 = pAd->LatchRfRegs.R3;
R3 &= 0xfff3ffff;
R3 |= 0x00080000;
RTMP_RF_IO_WRITE32(pAd, R3);
R1 = RFRegTable[index].R1;
RTMP_RF_IO_WRITE32(pAd, R1);
R2 = RFRegTable[index].R2;
if (pAd->Antenna.field.TxPath == 1)
{
R2 |= 0x4000; // If TXpath is 1, bit 14 = 1;
}
if (pAd->Antenna.field.RxPath == 2)
{
R2 |= 0x40; // write 1 to off Rxpath.
}
else if (pAd->Antenna.field.RxPath == 1)
{
R2 |= 0x20040; // write 1 to off RxPath
}
RTMP_RF_IO_WRITE32(pAd, R2);
break;
}
}
break;
default:
break;
}
DBGPRINT(RT_DEBUG_TRACE, ("AsicTurnOnRFClk#%d(RF=%d, ) , R2=0x%08x\n",
Channel,
pAd->RfIcType,
R2));
}
|
gpl-2.0
|
ac100-ru/old_ac100_kernel
|
drivers/staging/rt2860/common/spectrum.c
|
509
|
45338
|
/*
*************************************************************************
* Ralink Tech Inc.
* 5F., No.36, Taiyuan St., Jhubei City,
* Hsinchu County 302,
* Taiwan, R.O.C.
*
* (c) Copyright 2002-2007, Ralink Technology, Inc.
*
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
*************************************************************************
Module Name:
action.c
Abstract:
Handle association related requests either from WSTA or from local MLME
Revision History:
Who When What
--------- ---------- ----------------------------------------------
Fonchi Wu 2008 created for 802.11h
*/
#include "../rt_config.h"
#include "action.h"
VOID MeasureReqTabInit(
IN PRTMP_ADAPTER pAd)
{
NdisAllocateSpinLock(&pAd->CommonCfg.MeasureReqTabLock);
pAd->CommonCfg.pMeasureReqTab = kmalloc(sizeof(MEASURE_REQ_TAB), GFP_ATOMIC);
if (pAd->CommonCfg.pMeasureReqTab)
NdisZeroMemory(pAd->CommonCfg.pMeasureReqTab, sizeof(MEASURE_REQ_TAB));
else
DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pMeasureReqTab.\n", __func__));
return;
}
VOID MeasureReqTabExit(
IN PRTMP_ADAPTER pAd)
{
NdisFreeSpinLock(pAd->CommonCfg.MeasureReqTabLock);
if (pAd->CommonCfg.pMeasureReqTab)
kfree(pAd->CommonCfg.pMeasureReqTab);
pAd->CommonCfg.pMeasureReqTab = NULL;
return;
}
static PMEASURE_REQ_ENTRY MeasureReqLookUp(
IN PRTMP_ADAPTER pAd,
IN UINT8 DialogToken)
{
UINT HashIdx;
PMEASURE_REQ_TAB pTab = pAd->CommonCfg.pMeasureReqTab;
PMEASURE_REQ_ENTRY pEntry = NULL;
PMEASURE_REQ_ENTRY pPrevEntry = NULL;
if (pTab == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
return NULL;
}
RTMP_SEM_LOCK(&pAd->CommonCfg.MeasureReqTabLock);
HashIdx = MQ_DIALOGTOKEN_HASH_INDEX(DialogToken);
pEntry = pTab->Hash[HashIdx];
while (pEntry)
{
if (pEntry->DialogToken == DialogToken)
break;
else
{
pPrevEntry = pEntry;
pEntry = pEntry->pNext;
}
}
RTMP_SEM_UNLOCK(&pAd->CommonCfg.MeasureReqTabLock);
return pEntry;
}
static PMEASURE_REQ_ENTRY MeasureReqInsert(
IN PRTMP_ADAPTER pAd,
IN UINT8 DialogToken)
{
INT i;
ULONG HashIdx;
PMEASURE_REQ_TAB pTab = pAd->CommonCfg.pMeasureReqTab;
PMEASURE_REQ_ENTRY pEntry = NULL, pCurrEntry;
ULONG Now;
if(pTab == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
return NULL;
}
pEntry = MeasureReqLookUp(pAd, DialogToken);
if (pEntry == NULL)
{
RTMP_SEM_LOCK(&pAd->CommonCfg.MeasureReqTabLock);
for (i = 0; i < MAX_MEASURE_REQ_TAB_SIZE; i++)
{
NdisGetSystemUpTime(&Now);
pEntry = &pTab->Content[i];
if ((pEntry->Valid == TRUE)
&& RTMP_TIME_AFTER((unsigned long)Now, (unsigned long)(pEntry->lastTime + MQ_REQ_AGE_OUT)))
{
PMEASURE_REQ_ENTRY pPrevEntry = NULL;
ULONG HashIdx = MQ_DIALOGTOKEN_HASH_INDEX(pEntry->DialogToken);
PMEASURE_REQ_ENTRY pProbeEntry = pTab->Hash[HashIdx];
// update Hash list
do
{
if (pProbeEntry == pEntry)
{
if (pPrevEntry == NULL)
{
pTab->Hash[HashIdx] = pEntry->pNext;
}
else
{
pPrevEntry->pNext = pEntry->pNext;
}
break;
}
pPrevEntry = pProbeEntry;
pProbeEntry = pProbeEntry->pNext;
} while (pProbeEntry);
NdisZeroMemory(pEntry, sizeof(MEASURE_REQ_ENTRY));
pTab->Size--;
break;
}
if (pEntry->Valid == FALSE)
break;
}
if (i < MAX_MEASURE_REQ_TAB_SIZE)
{
NdisGetSystemUpTime(&Now);
pEntry->lastTime = Now;
pEntry->Valid = TRUE;
pEntry->DialogToken = DialogToken;
pTab->Size++;
}
else
{
pEntry = NULL;
DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab tab full.\n", __func__));
}
// add this Neighbor entry into HASH table
if (pEntry)
{
HashIdx = MQ_DIALOGTOKEN_HASH_INDEX(DialogToken);
if (pTab->Hash[HashIdx] == NULL)
{
pTab->Hash[HashIdx] = pEntry;
}
else
{
pCurrEntry = pTab->Hash[HashIdx];
while (pCurrEntry->pNext != NULL)
pCurrEntry = pCurrEntry->pNext;
pCurrEntry->pNext = pEntry;
}
}
RTMP_SEM_UNLOCK(&pAd->CommonCfg.MeasureReqTabLock);
}
return pEntry;
}
static VOID MeasureReqDelete(
IN PRTMP_ADAPTER pAd,
IN UINT8 DialogToken)
{
PMEASURE_REQ_TAB pTab = pAd->CommonCfg.pMeasureReqTab;
PMEASURE_REQ_ENTRY pEntry = NULL;
if(pTab == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: pMeasureReqTab doesn't exist.\n", __func__));
return;
}
// if empty, return
if (pTab->Size == 0)
{
DBGPRINT(RT_DEBUG_ERROR, ("pMeasureReqTab empty.\n"));
return;
}
pEntry = MeasureReqLookUp(pAd, DialogToken);
if (pEntry != NULL)
{
PMEASURE_REQ_ENTRY pPrevEntry = NULL;
ULONG HashIdx = MQ_DIALOGTOKEN_HASH_INDEX(pEntry->DialogToken);
PMEASURE_REQ_ENTRY pProbeEntry = pTab->Hash[HashIdx];
RTMP_SEM_LOCK(&pAd->CommonCfg.MeasureReqTabLock);
// update Hash list
do
{
if (pProbeEntry == pEntry)
{
if (pPrevEntry == NULL)
{
pTab->Hash[HashIdx] = pEntry->pNext;
}
else
{
pPrevEntry->pNext = pEntry->pNext;
}
break;
}
pPrevEntry = pProbeEntry;
pProbeEntry = pProbeEntry->pNext;
} while (pProbeEntry);
NdisZeroMemory(pEntry, sizeof(MEASURE_REQ_ENTRY));
pTab->Size--;
RTMP_SEM_UNLOCK(&pAd->CommonCfg.MeasureReqTabLock);
}
return;
}
VOID TpcReqTabInit(
IN PRTMP_ADAPTER pAd)
{
NdisAllocateSpinLock(&pAd->CommonCfg.TpcReqTabLock);
pAd->CommonCfg.pTpcReqTab = kmalloc(sizeof(TPC_REQ_TAB), GFP_ATOMIC);
if (pAd->CommonCfg.pTpcReqTab)
NdisZeroMemory(pAd->CommonCfg.pTpcReqTab, sizeof(TPC_REQ_TAB));
else
DBGPRINT(RT_DEBUG_ERROR, ("%s Fail to alloc memory for pAd->CommonCfg.pTpcReqTab.\n", __func__));
return;
}
VOID TpcReqTabExit(
IN PRTMP_ADAPTER pAd)
{
NdisFreeSpinLock(pAd->CommonCfg.TpcReqTabLock);
if (pAd->CommonCfg.pTpcReqTab)
kfree(pAd->CommonCfg.pTpcReqTab);
pAd->CommonCfg.pTpcReqTab = NULL;
return;
}
static PTPC_REQ_ENTRY TpcReqLookUp(
IN PRTMP_ADAPTER pAd,
IN UINT8 DialogToken)
{
UINT HashIdx;
PTPC_REQ_TAB pTab = pAd->CommonCfg.pTpcReqTab;
PTPC_REQ_ENTRY pEntry = NULL;
PTPC_REQ_ENTRY pPrevEntry = NULL;
if (pTab == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
return NULL;
}
RTMP_SEM_LOCK(&pAd->CommonCfg.TpcReqTabLock);
HashIdx = TPC_DIALOGTOKEN_HASH_INDEX(DialogToken);
pEntry = pTab->Hash[HashIdx];
while (pEntry)
{
if (pEntry->DialogToken == DialogToken)
break;
else
{
pPrevEntry = pEntry;
pEntry = pEntry->pNext;
}
}
RTMP_SEM_UNLOCK(&pAd->CommonCfg.TpcReqTabLock);
return pEntry;
}
static PTPC_REQ_ENTRY TpcReqInsert(
IN PRTMP_ADAPTER pAd,
IN UINT8 DialogToken)
{
INT i;
ULONG HashIdx;
PTPC_REQ_TAB pTab = pAd->CommonCfg.pTpcReqTab;
PTPC_REQ_ENTRY pEntry = NULL, pCurrEntry;
ULONG Now;
if(pTab == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
return NULL;
}
pEntry = TpcReqLookUp(pAd, DialogToken);
if (pEntry == NULL)
{
RTMP_SEM_LOCK(&pAd->CommonCfg.TpcReqTabLock);
for (i = 0; i < MAX_TPC_REQ_TAB_SIZE; i++)
{
NdisGetSystemUpTime(&Now);
pEntry = &pTab->Content[i];
if ((pEntry->Valid == TRUE)
&& RTMP_TIME_AFTER((unsigned long)Now, (unsigned long)(pEntry->lastTime + TPC_REQ_AGE_OUT)))
{
PTPC_REQ_ENTRY pPrevEntry = NULL;
ULONG HashIdx = TPC_DIALOGTOKEN_HASH_INDEX(pEntry->DialogToken);
PTPC_REQ_ENTRY pProbeEntry = pTab->Hash[HashIdx];
// update Hash list
do
{
if (pProbeEntry == pEntry)
{
if (pPrevEntry == NULL)
{
pTab->Hash[HashIdx] = pEntry->pNext;
}
else
{
pPrevEntry->pNext = pEntry->pNext;
}
break;
}
pPrevEntry = pProbeEntry;
pProbeEntry = pProbeEntry->pNext;
} while (pProbeEntry);
NdisZeroMemory(pEntry, sizeof(TPC_REQ_ENTRY));
pTab->Size--;
break;
}
if (pEntry->Valid == FALSE)
break;
}
if (i < MAX_TPC_REQ_TAB_SIZE)
{
NdisGetSystemUpTime(&Now);
pEntry->lastTime = Now;
pEntry->Valid = TRUE;
pEntry->DialogToken = DialogToken;
pTab->Size++;
}
else
{
pEntry = NULL;
DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab tab full.\n", __func__));
}
// add this Neighbor entry into HASH table
if (pEntry)
{
HashIdx = TPC_DIALOGTOKEN_HASH_INDEX(DialogToken);
if (pTab->Hash[HashIdx] == NULL)
{
pTab->Hash[HashIdx] = pEntry;
}
else
{
pCurrEntry = pTab->Hash[HashIdx];
while (pCurrEntry->pNext != NULL)
pCurrEntry = pCurrEntry->pNext;
pCurrEntry->pNext = pEntry;
}
}
RTMP_SEM_UNLOCK(&pAd->CommonCfg.TpcReqTabLock);
}
return pEntry;
}
static VOID TpcReqDelete(
IN PRTMP_ADAPTER pAd,
IN UINT8 DialogToken)
{
PTPC_REQ_TAB pTab = pAd->CommonCfg.pTpcReqTab;
PTPC_REQ_ENTRY pEntry = NULL;
if(pTab == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: pTpcReqTab doesn't exist.\n", __func__));
return;
}
// if empty, return
if (pTab->Size == 0)
{
DBGPRINT(RT_DEBUG_ERROR, ("pTpcReqTab empty.\n"));
return;
}
pEntry = TpcReqLookUp(pAd, DialogToken);
if (pEntry != NULL)
{
PTPC_REQ_ENTRY pPrevEntry = NULL;
ULONG HashIdx = TPC_DIALOGTOKEN_HASH_INDEX(pEntry->DialogToken);
PTPC_REQ_ENTRY pProbeEntry = pTab->Hash[HashIdx];
RTMP_SEM_LOCK(&pAd->CommonCfg.TpcReqTabLock);
// update Hash list
do
{
if (pProbeEntry == pEntry)
{
if (pPrevEntry == NULL)
{
pTab->Hash[HashIdx] = pEntry->pNext;
}
else
{
pPrevEntry->pNext = pEntry->pNext;
}
break;
}
pPrevEntry = pProbeEntry;
pProbeEntry = pProbeEntry->pNext;
} while (pProbeEntry);
NdisZeroMemory(pEntry, sizeof(TPC_REQ_ENTRY));
pTab->Size--;
RTMP_SEM_UNLOCK(&pAd->CommonCfg.TpcReqTabLock);
}
return;
}
/*
==========================================================================
Description:
Get Current TimeS tamp.
Parametrs:
Return : Current Time Stamp.
==========================================================================
*/
static UINT64 GetCurrentTimeStamp(
IN PRTMP_ADAPTER pAd)
{
// get current time stamp.
return 0;
}
/*
==========================================================================
Description:
Get Current Transmit Power.
Parametrs:
Return : Current Time Stamp.
==========================================================================
*/
static UINT8 GetCurTxPwr(
IN PRTMP_ADAPTER pAd,
IN UINT8 Wcid)
{
return 16; /* 16 dBm */
}
/*
==========================================================================
Description:
Insert Dialog Token into frame.
Parametrs:
1. frame buffer pointer.
2. frame length.
3. Dialog token.
Return : None.
==========================================================================
*/
static VOID InsertDialogToken(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pFrameBuf,
OUT PULONG pFrameLen,
IN UINT8 DialogToken)
{
ULONG TempLen;
MakeOutgoingFrame(pFrameBuf, &TempLen,
1, &DialogToken,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
return;
}
/*
==========================================================================
Description:
Insert TPC Request IE into frame.
Parametrs:
1. frame buffer pointer.
2. frame length.
Return : None.
==========================================================================
*/
static VOID InsertTpcReqIE(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pFrameBuf,
OUT PULONG pFrameLen)
{
ULONG TempLen;
ULONG Len = 0;
UINT8 ElementID = IE_TPC_REQUEST;
MakeOutgoingFrame(pFrameBuf, &TempLen,
1, &ElementID,
1, &Len,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
return;
}
/*
==========================================================================
Description:
Insert TPC Report IE into frame.
Parametrs:
1. frame buffer pointer.
2. frame length.
3. Transmit Power.
4. Link Margin.
Return : None.
==========================================================================
*/
static VOID InsertTpcReportIE(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pFrameBuf,
OUT PULONG pFrameLen,
IN UINT8 TxPwr,
IN UINT8 LinkMargin)
{
ULONG TempLen;
ULONG Len = sizeof(TPC_REPORT_INFO);
UINT8 ElementID = IE_TPC_REPORT;
TPC_REPORT_INFO TpcReportIE;
TpcReportIE.TxPwr = TxPwr;
TpcReportIE.LinkMargin = LinkMargin;
MakeOutgoingFrame(pFrameBuf, &TempLen,
1, &ElementID,
1, &Len,
Len, &TpcReportIE,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
return;
}
/*
==========================================================================
Description:
Insert Channel Switch Announcement IE into frame.
Parametrs:
1. frame buffer pointer.
2. frame length.
3. channel switch announcement mode.
4. new selected channel.
5. channel switch announcement count.
Return : None.
==========================================================================
*/
static VOID InsertChSwAnnIE(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pFrameBuf,
OUT PULONG pFrameLen,
IN UINT8 ChSwMode,
IN UINT8 NewChannel,
IN UINT8 ChSwCnt)
{
ULONG TempLen;
ULONG Len = sizeof(CH_SW_ANN_INFO);
UINT8 ElementID = IE_CHANNEL_SWITCH_ANNOUNCEMENT;
CH_SW_ANN_INFO ChSwAnnIE;
ChSwAnnIE.ChSwMode = ChSwMode;
ChSwAnnIE.Channel = NewChannel;
ChSwAnnIE.ChSwCnt = ChSwCnt;
MakeOutgoingFrame(pFrameBuf, &TempLen,
1, &ElementID,
1, &Len,
Len, &ChSwAnnIE,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
return;
}
/*
==========================================================================
Description:
Insert Measure Request IE into frame.
Parametrs:
1. frame buffer pointer.
2. frame length.
3. Measure Token.
4. Measure Request Mode.
5. Measure Request Type.
6. Measure Channel.
7. Measure Start time.
8. Measure Duration.
Return : None.
==========================================================================
*/
static VOID InsertMeasureReqIE(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pFrameBuf,
OUT PULONG pFrameLen,
IN PMEASURE_REQ_INFO pMeasureReqIE)
{
ULONG TempLen;
UINT8 Len = sizeof(MEASURE_REQ_INFO);
UINT8 ElementID = IE_MEASUREMENT_REQUEST;
MakeOutgoingFrame(pFrameBuf, &TempLen,
1, &ElementID,
1, &Len,
Len, pMeasureReqIE,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
return;
}
/*
==========================================================================
Description:
Insert Measure Report IE into frame.
Parametrs:
1. frame buffer pointer.
2. frame length.
3. Measure Token.
4. Measure Request Mode.
5. Measure Request Type.
6. Length of Report Infomation
7. Pointer of Report Infomation Buffer.
Return : None.
==========================================================================
*/
static VOID InsertMeasureReportIE(
IN PRTMP_ADAPTER pAd,
OUT PUCHAR pFrameBuf,
OUT PULONG pFrameLen,
IN PMEASURE_REPORT_INFO pMeasureReportIE,
IN UINT8 ReportLnfoLen,
IN PUINT8 pReportInfo)
{
ULONG TempLen;
ULONG Len;
UINT8 ElementID = IE_MEASUREMENT_REPORT;
Len = sizeof(MEASURE_REPORT_INFO) + ReportLnfoLen;
MakeOutgoingFrame(pFrameBuf, &TempLen,
1, &ElementID,
1, &Len,
Len, pMeasureReportIE,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
if ((ReportLnfoLen > 0) && (pReportInfo != NULL))
{
MakeOutgoingFrame(pFrameBuf + *pFrameLen, &TempLen,
ReportLnfoLen, pReportInfo,
END_OF_ARGS);
*pFrameLen = *pFrameLen + TempLen;
}
return;
}
/*
==========================================================================
Description:
Prepare Measurement request action frame and enqueue it into
management queue waiting for transmition.
Parametrs:
1. the destination mac address of the frame.
Return : None.
==========================================================================
*/
VOID EnqueueMeasurementReq(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pDA,
IN UINT8 MeasureToken,
IN UINT8 MeasureReqMode,
IN UINT8 MeasureReqType,
IN UINT8 MeasureCh,
IN UINT16 MeasureDuration)
{
PUCHAR pOutBuffer = NULL;
NDIS_STATUS NStatus;
ULONG FrameLen;
HEADER_802_11 ActHdr;
MEASURE_REQ_INFO MeasureReqIE;
UINT8 RmReqDailogToken = RandomByte(pAd);
UINT64 MeasureStartTime = GetCurrentTimeStamp(pAd);
// build action frame header.
MgtMacHeaderInit(pAd, &ActHdr, SUBTYPE_ACTION, 0, pDA,
pAd->CurrentAddress);
NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer); //Get an unused nonpaged memory
if(NStatus != NDIS_STATUS_SUCCESS)
{
DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
return;
}
NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
FrameLen = sizeof(HEADER_802_11);
InsertActField(pAd, (pOutBuffer + FrameLen), &FrameLen, CATEGORY_SPECTRUM, SPEC_MRQ);
// fill Dialog Token
InsertDialogToken(pAd, (pOutBuffer + FrameLen), &FrameLen, MeasureToken);
// prepare Measurement IE.
NdisZeroMemory(&MeasureReqIE, sizeof(MEASURE_REQ_INFO));
MeasureReqIE.Token = RmReqDailogToken;
MeasureReqIE.ReqMode.word = MeasureReqMode;
MeasureReqIE.ReqType = MeasureReqType;
MeasureReqIE.MeasureReq.ChNum = MeasureCh;
MeasureReqIE.MeasureReq.MeasureStartTime = cpu2le64(MeasureStartTime);
MeasureReqIE.MeasureReq.MeasureDuration = cpu2le16(MeasureDuration);
InsertMeasureReqIE(pAd, (pOutBuffer + FrameLen), &FrameLen, &MeasureReqIE);
MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
MlmeFreeMemory(pAd, pOutBuffer);
return;
}
/*
==========================================================================
Description:
Prepare Measurement report action frame and enqueue it into
management queue waiting for transmition.
Parametrs:
1. the destination mac address of the frame.
Return : None.
==========================================================================
*/
VOID EnqueueMeasurementRep(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pDA,
IN UINT8 DialogToken,
IN UINT8 MeasureToken,
IN UINT8 MeasureReqMode,
IN UINT8 MeasureReqType,
IN UINT8 ReportInfoLen,
IN PUINT8 pReportInfo)
{
PUCHAR pOutBuffer = NULL;
NDIS_STATUS NStatus;
ULONG FrameLen;
HEADER_802_11 ActHdr;
MEASURE_REPORT_INFO MeasureRepIE;
// build action frame header.
MgtMacHeaderInit(pAd, &ActHdr, SUBTYPE_ACTION, 0, pDA,
pAd->CurrentAddress);
NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer); //Get an unused nonpaged memory
if(NStatus != NDIS_STATUS_SUCCESS)
{
DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
return;
}
NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
FrameLen = sizeof(HEADER_802_11);
InsertActField(pAd, (pOutBuffer + FrameLen), &FrameLen, CATEGORY_SPECTRUM, SPEC_MRP);
// fill Dialog Token
InsertDialogToken(pAd, (pOutBuffer + FrameLen), &FrameLen, DialogToken);
// prepare Measurement IE.
NdisZeroMemory(&MeasureRepIE, sizeof(MEASURE_REPORT_INFO));
MeasureRepIE.Token = MeasureToken;
MeasureRepIE.ReportMode.word = MeasureReqMode;
MeasureRepIE.ReportType = MeasureReqType;
InsertMeasureReportIE(pAd, (pOutBuffer + FrameLen), &FrameLen, &MeasureRepIE, ReportInfoLen, pReportInfo);
MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
MlmeFreeMemory(pAd, pOutBuffer);
return;
}
/*
==========================================================================
Description:
Prepare TPC Request action frame and enqueue it into
management queue waiting for transmition.
Parametrs:
1. the destination mac address of the frame.
Return : None.
==========================================================================
*/
VOID EnqueueTPCReq(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pDA,
IN UCHAR DialogToken)
{
PUCHAR pOutBuffer = NULL;
NDIS_STATUS NStatus;
ULONG FrameLen;
HEADER_802_11 ActHdr;
// build action frame header.
MgtMacHeaderInit(pAd, &ActHdr, SUBTYPE_ACTION, 0, pDA,
pAd->CurrentAddress);
NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer); //Get an unused nonpaged memory
if(NStatus != NDIS_STATUS_SUCCESS)
{
DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
return;
}
NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
FrameLen = sizeof(HEADER_802_11);
InsertActField(pAd, (pOutBuffer + FrameLen), &FrameLen, CATEGORY_SPECTRUM, SPEC_TPCRQ);
// fill Dialog Token
InsertDialogToken(pAd, (pOutBuffer + FrameLen), &FrameLen, DialogToken);
// Insert TPC Request IE.
InsertTpcReqIE(pAd, (pOutBuffer + FrameLen), &FrameLen);
MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
MlmeFreeMemory(pAd, pOutBuffer);
return;
}
/*
==========================================================================
Description:
Prepare TPC Report action frame and enqueue it into
management queue waiting for transmition.
Parametrs:
1. the destination mac address of the frame.
Return : None.
==========================================================================
*/
VOID EnqueueTPCRep(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pDA,
IN UINT8 DialogToken,
IN UINT8 TxPwr,
IN UINT8 LinkMargin)
{
PUCHAR pOutBuffer = NULL;
NDIS_STATUS NStatus;
ULONG FrameLen;
HEADER_802_11 ActHdr;
// build action frame header.
MgtMacHeaderInit(pAd, &ActHdr, SUBTYPE_ACTION, 0, pDA,
pAd->CurrentAddress);
NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer); //Get an unused nonpaged memory
if(NStatus != NDIS_STATUS_SUCCESS)
{
DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
return;
}
NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
FrameLen = sizeof(HEADER_802_11);
InsertActField(pAd, (pOutBuffer + FrameLen), &FrameLen, CATEGORY_SPECTRUM, SPEC_TPCRP);
// fill Dialog Token
InsertDialogToken(pAd, (pOutBuffer + FrameLen), &FrameLen, DialogToken);
// Insert TPC Request IE.
InsertTpcReportIE(pAd, (pOutBuffer + FrameLen), &FrameLen, TxPwr, LinkMargin);
MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
MlmeFreeMemory(pAd, pOutBuffer);
return;
}
/*
==========================================================================
Description:
Prepare Channel Switch Announcement action frame and enqueue it into
management queue waiting for transmition.
Parametrs:
1. the destination mac address of the frame.
2. Channel switch announcement mode.
2. a New selected channel.
Return : None.
==========================================================================
*/
VOID EnqueueChSwAnn(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pDA,
IN UINT8 ChSwMode,
IN UINT8 NewCh)
{
PUCHAR pOutBuffer = NULL;
NDIS_STATUS NStatus;
ULONG FrameLen;
HEADER_802_11 ActHdr;
// build action frame header.
MgtMacHeaderInit(pAd, &ActHdr, SUBTYPE_ACTION, 0, pDA,
pAd->CurrentAddress);
NStatus = MlmeAllocateMemory(pAd, (PVOID)&pOutBuffer); //Get an unused nonpaged memory
if(NStatus != NDIS_STATUS_SUCCESS)
{
DBGPRINT(RT_DEBUG_TRACE, ("%s() allocate memory failed \n", __func__));
return;
}
NdisMoveMemory(pOutBuffer, (PCHAR)&ActHdr, sizeof(HEADER_802_11));
FrameLen = sizeof(HEADER_802_11);
InsertActField(pAd, (pOutBuffer + FrameLen), &FrameLen, CATEGORY_SPECTRUM, SPEC_CHANNEL_SWITCH);
InsertChSwAnnIE(pAd, (pOutBuffer + FrameLen), &FrameLen, ChSwMode, NewCh, 0);
MiniportMMRequest(pAd, QID_AC_BE, pOutBuffer, FrameLen);
MlmeFreeMemory(pAd, pOutBuffer);
return;
}
static BOOLEAN DfsRequirementCheck(
IN PRTMP_ADAPTER pAd,
IN UINT8 Channel)
{
BOOLEAN Result = FALSE;
INT i;
do
{
// check DFS procedure is running.
// make sure DFS procedure won't start twice.
if (pAd->CommonCfg.RadarDetect.RDMode != RD_NORMAL_MODE)
{
Result = FALSE;
break;
}
// check the new channel carried from Channel Switch Announcemnet is valid.
for (i=0; i<pAd->ChannelListNum; i++)
{
if ((Channel == pAd->ChannelList[i].Channel)
&&(pAd->ChannelList[i].RemainingTimeForUse == 0))
{
// found radar signal in the channel. the channel can't use at least for 30 minutes.
pAd->ChannelList[i].RemainingTimeForUse = 1800;//30 min = 1800 sec
Result = TRUE;
break;
}
}
} while(FALSE);
return Result;
}
VOID NotifyChSwAnnToPeerAPs(
IN PRTMP_ADAPTER pAd,
IN PUCHAR pRA,
IN PUCHAR pTA,
IN UINT8 ChSwMode,
IN UINT8 Channel)
{
}
static VOID StartDFSProcedure(
IN PRTMP_ADAPTER pAd,
IN UCHAR Channel,
IN UINT8 ChSwMode)
{
// start DFS procedure
pAd->CommonCfg.Channel = Channel;
N_ChannelCheck(pAd);
pAd->CommonCfg.RadarDetect.RDMode = RD_SWITCHING_MODE;
pAd->CommonCfg.RadarDetect.CSCount = 0;
}
/*
==========================================================================
Description:
Channel Switch Announcement action frame sanity check.
Parametrs:
1. MLME message containing the received frame
2. message length.
3. Channel switch announcement infomation buffer.
Return : None.
==========================================================================
*/
/*
Channel Switch Announcement IE.
+----+-----+-----------+------------+-----------+
| ID | Len |Ch Sw Mode | New Ch Num | Ch Sw Cnt |
+----+-----+-----------+------------+-----------+
1 1 1 1 1
*/
static BOOLEAN PeerChSwAnnSanity(
IN PRTMP_ADAPTER pAd,
IN VOID *pMsg,
IN ULONG MsgLen,
OUT PCH_SW_ANN_INFO pChSwAnnInfo)
{
PFRAME_802_11 Fr = (PFRAME_802_11)pMsg;
PUCHAR pFramePtr = Fr->Octet;
BOOLEAN result = FALSE;
PEID_STRUCT eid_ptr;
// skip 802.11 header.
MsgLen -= sizeof(HEADER_802_11);
// skip category and action code.
pFramePtr += 2;
MsgLen -= 2;
if (pChSwAnnInfo == NULL)
return result;
eid_ptr = (PEID_STRUCT)pFramePtr;
while (((UCHAR*)eid_ptr + eid_ptr->Len + 1) < ((PUCHAR)pFramePtr + MsgLen))
{
switch(eid_ptr->Eid)
{
case IE_CHANNEL_SWITCH_ANNOUNCEMENT:
NdisMoveMemory(&pChSwAnnInfo->ChSwMode, eid_ptr->Octet, 1);
NdisMoveMemory(&pChSwAnnInfo->Channel, eid_ptr->Octet + 1, 1);
NdisMoveMemory(&pChSwAnnInfo->ChSwCnt, eid_ptr->Octet + 2, 1);
result = TRUE;
break;
default:
break;
}
eid_ptr = (PEID_STRUCT)((UCHAR*)eid_ptr + 2 + eid_ptr->Len);
}
return result;
}
/*
==========================================================================
Description:
Measurement request action frame sanity check.
Parametrs:
1. MLME message containing the received frame
2. message length.
3. Measurement request infomation buffer.
Return : None.
==========================================================================
*/
static BOOLEAN PeerMeasureReqSanity(
IN PRTMP_ADAPTER pAd,
IN VOID *pMsg,
IN ULONG MsgLen,
OUT PUINT8 pDialogToken,
OUT PMEASURE_REQ_INFO pMeasureReqInfo)
{
PFRAME_802_11 Fr = (PFRAME_802_11)pMsg;
PUCHAR pFramePtr = Fr->Octet;
BOOLEAN result = FALSE;
PEID_STRUCT eid_ptr;
PUCHAR ptr;
UINT64 MeasureStartTime;
UINT16 MeasureDuration;
// skip 802.11 header.
MsgLen -= sizeof(HEADER_802_11);
// skip category and action code.
pFramePtr += 2;
MsgLen -= 2;
if (pMeasureReqInfo == NULL)
return result;
NdisMoveMemory(pDialogToken, pFramePtr, 1);
pFramePtr += 1;
MsgLen -= 1;
eid_ptr = (PEID_STRUCT)pFramePtr;
while (((UCHAR*)eid_ptr + eid_ptr->Len + 1) < ((PUCHAR)pFramePtr + MsgLen))
{
switch(eid_ptr->Eid)
{
case IE_MEASUREMENT_REQUEST:
NdisMoveMemory(&pMeasureReqInfo->Token, eid_ptr->Octet, 1);
NdisMoveMemory(&pMeasureReqInfo->ReqMode.word, eid_ptr->Octet + 1, 1);
NdisMoveMemory(&pMeasureReqInfo->ReqType, eid_ptr->Octet + 2, 1);
ptr = eid_ptr->Octet + 3;
NdisMoveMemory(&pMeasureReqInfo->MeasureReq.ChNum, ptr, 1);
NdisMoveMemory(&MeasureStartTime, ptr + 1, 8);
pMeasureReqInfo->MeasureReq.MeasureStartTime = SWAP64(MeasureStartTime);
NdisMoveMemory(&MeasureDuration, ptr + 9, 2);
pMeasureReqInfo->MeasureReq.MeasureDuration = SWAP16(MeasureDuration);
result = TRUE;
break;
default:
break;
}
eid_ptr = (PEID_STRUCT)((UCHAR*)eid_ptr + 2 + eid_ptr->Len);
}
return result;
}
/*
==========================================================================
Description:
Measurement report action frame sanity check.
Parametrs:
1. MLME message containing the received frame
2. message length.
3. Measurement report infomation buffer.
4. basic report infomation buffer.
Return : None.
==========================================================================
*/
/*
Measurement Report IE.
+----+-----+-------+-------------+--------------+----------------+
| ID | Len | Token | Report Mode | Measure Type | Measure Report |
+----+-----+-------+-------------+--------------+----------------+
1 1 1 1 1 variable
Basic Report.
+--------+------------+----------+-----+
| Ch Num | Start Time | Duration | Map |
+--------+------------+----------+-----+
1 8 2 1
Map Field Bit Format.
+-----+---------------+---------------------+-------+------------+----------+
| Bss | OFDM Preamble | Unidentified signal | Radar | Unmeasured | Reserved |
+-----+---------------+---------------------+-------+------------+----------+
0 1 2 3 4 5-7
*/
static BOOLEAN PeerMeasureReportSanity(
IN PRTMP_ADAPTER pAd,
IN VOID *pMsg,
IN ULONG MsgLen,
OUT PUINT8 pDialogToken,
OUT PMEASURE_REPORT_INFO pMeasureReportInfo,
OUT PUINT8 pReportBuf)
{
PFRAME_802_11 Fr = (PFRAME_802_11)pMsg;
PUCHAR pFramePtr = Fr->Octet;
BOOLEAN result = FALSE;
PEID_STRUCT eid_ptr;
PUCHAR ptr;
// skip 802.11 header.
MsgLen -= sizeof(HEADER_802_11);
// skip category and action code.
pFramePtr += 2;
MsgLen -= 2;
if (pMeasureReportInfo == NULL)
return result;
NdisMoveMemory(pDialogToken, pFramePtr, 1);
pFramePtr += 1;
MsgLen -= 1;
eid_ptr = (PEID_STRUCT)pFramePtr;
while (((UCHAR*)eid_ptr + eid_ptr->Len + 1) < ((PUCHAR)pFramePtr + MsgLen))
{
switch(eid_ptr->Eid)
{
case IE_MEASUREMENT_REPORT:
NdisMoveMemory(&pMeasureReportInfo->Token, eid_ptr->Octet, 1);
NdisMoveMemory(&pMeasureReportInfo->ReportMode, eid_ptr->Octet + 1, 1);
NdisMoveMemory(&pMeasureReportInfo->ReportType, eid_ptr->Octet + 2, 1);
if (pMeasureReportInfo->ReportType == RM_BASIC)
{
PMEASURE_BASIC_REPORT pReport = (PMEASURE_BASIC_REPORT)pReportBuf;
ptr = eid_ptr->Octet + 3;
NdisMoveMemory(&pReport->ChNum, ptr, 1);
NdisMoveMemory(&pReport->MeasureStartTime, ptr + 1, 8);
NdisMoveMemory(&pReport->MeasureDuration, ptr + 9, 2);
NdisMoveMemory(&pReport->Map, ptr + 11, 1);
}
else if (pMeasureReportInfo->ReportType == RM_CCA)
{
PMEASURE_CCA_REPORT pReport = (PMEASURE_CCA_REPORT)pReportBuf;
ptr = eid_ptr->Octet + 3;
NdisMoveMemory(&pReport->ChNum, ptr, 1);
NdisMoveMemory(&pReport->MeasureStartTime, ptr + 1, 8);
NdisMoveMemory(&pReport->MeasureDuration, ptr + 9, 2);
NdisMoveMemory(&pReport->CCA_Busy_Fraction, ptr + 11, 1);
}
else if (pMeasureReportInfo->ReportType == RM_RPI_HISTOGRAM)
{
PMEASURE_RPI_REPORT pReport = (PMEASURE_RPI_REPORT)pReportBuf;
ptr = eid_ptr->Octet + 3;
NdisMoveMemory(&pReport->ChNum, ptr, 1);
NdisMoveMemory(&pReport->MeasureStartTime, ptr + 1, 8);
NdisMoveMemory(&pReport->MeasureDuration, ptr + 9, 2);
NdisMoveMemory(&pReport->RPI_Density, ptr + 11, 8);
}
result = TRUE;
break;
default:
break;
}
eid_ptr = (PEID_STRUCT)((UCHAR*)eid_ptr + 2 + eid_ptr->Len);
}
return result;
}
/*
==========================================================================
Description:
TPC Request action frame sanity check.
Parametrs:
1. MLME message containing the received frame
2. message length.
3. Dialog Token.
Return : None.
==========================================================================
*/
static BOOLEAN PeerTpcReqSanity(
IN PRTMP_ADAPTER pAd,
IN VOID *pMsg,
IN ULONG MsgLen,
OUT PUINT8 pDialogToken)
{
PFRAME_802_11 Fr = (PFRAME_802_11)pMsg;
PUCHAR pFramePtr = Fr->Octet;
BOOLEAN result = FALSE;
PEID_STRUCT eid_ptr;
MsgLen -= sizeof(HEADER_802_11);
// skip category and action code.
pFramePtr += 2;
MsgLen -= 2;
if (pDialogToken == NULL)
return result;
NdisMoveMemory(pDialogToken, pFramePtr, 1);
pFramePtr += 1;
MsgLen -= 1;
eid_ptr = (PEID_STRUCT)pFramePtr;
while (((UCHAR*)eid_ptr + eid_ptr->Len + 1) < ((PUCHAR)pFramePtr + MsgLen))
{
switch(eid_ptr->Eid)
{
case IE_TPC_REQUEST:
result = TRUE;
break;
default:
break;
}
eid_ptr = (PEID_STRUCT)((UCHAR*)eid_ptr + 2 + eid_ptr->Len);
}
return result;
}
/*
==========================================================================
Description:
TPC Report action frame sanity check.
Parametrs:
1. MLME message containing the received frame
2. message length.
3. Dialog Token.
4. TPC Report IE.
Return : None.
==========================================================================
*/
static BOOLEAN PeerTpcRepSanity(
IN PRTMP_ADAPTER pAd,
IN VOID *pMsg,
IN ULONG MsgLen,
OUT PUINT8 pDialogToken,
OUT PTPC_REPORT_INFO pTpcRepInfo)
{
PFRAME_802_11 Fr = (PFRAME_802_11)pMsg;
PUCHAR pFramePtr = Fr->Octet;
BOOLEAN result = FALSE;
PEID_STRUCT eid_ptr;
MsgLen -= sizeof(HEADER_802_11);
// skip category and action code.
pFramePtr += 2;
MsgLen -= 2;
if (pDialogToken == NULL)
return result;
NdisMoveMemory(pDialogToken, pFramePtr, 1);
pFramePtr += 1;
MsgLen -= 1;
eid_ptr = (PEID_STRUCT)pFramePtr;
while (((UCHAR*)eid_ptr + eid_ptr->Len + 1) < ((PUCHAR)pFramePtr + MsgLen))
{
switch(eid_ptr->Eid)
{
case IE_TPC_REPORT:
NdisMoveMemory(&pTpcRepInfo->TxPwr, eid_ptr->Octet, 1);
NdisMoveMemory(&pTpcRepInfo->LinkMargin, eid_ptr->Octet + 1, 1);
result = TRUE;
break;
default:
break;
}
eid_ptr = (PEID_STRUCT)((UCHAR*)eid_ptr + 2 + eid_ptr->Len);
}
return result;
}
/*
==========================================================================
Description:
Channel Switch Announcement action frame handler.
Parametrs:
Elme - MLME message containing the received frame
Return : None.
==========================================================================
*/
static VOID PeerChSwAnnAction(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
CH_SW_ANN_INFO ChSwAnnInfo;
PFRAME_802_11 pFr = (PFRAME_802_11)Elem->Msg;
UCHAR index = 0, Channel = 0, NewChannel = 0;
ULONG Bssidx = 0;
NdisZeroMemory(&ChSwAnnInfo, sizeof(CH_SW_ANN_INFO));
if (! PeerChSwAnnSanity(pAd, Elem->Msg, Elem->MsgLen, &ChSwAnnInfo))
{
DBGPRINT(RT_DEBUG_TRACE, ("Invalid Channel Switch Action Frame.\n"));
return;
}
if (pAd->OpMode == OPMODE_STA)
{
Bssidx = BssTableSearch(&pAd->ScanTab, pFr->Hdr.Addr3, pAd->CommonCfg.Channel);
if (Bssidx == BSS_NOT_FOUND)
{
DBGPRINT(RT_DEBUG_TRACE, ("PeerChSwAnnAction - Bssidx is not found\n"));
return;
}
DBGPRINT(RT_DEBUG_TRACE, ("\n****Bssidx is %d, Channel = %d\n", index, pAd->ScanTab.BssEntry[Bssidx].Channel));
hex_dump("SSID",pAd->ScanTab.BssEntry[Bssidx].Bssid ,6);
Channel = pAd->CommonCfg.Channel;
NewChannel = ChSwAnnInfo.Channel;
if ((pAd->CommonCfg.bIEEE80211H == 1) && (NewChannel != 0) && (Channel != NewChannel))
{
// Switching to channel 1 can prevent from rescanning the current channel immediately (by auto reconnection).
// In addition, clear the MLME queue and the scan table to discard the RX packets and previous scanning results.
AsicSwitchChannel(pAd, 1, FALSE);
AsicLockChannel(pAd, 1);
LinkDown(pAd, FALSE);
MlmeQueueInit(&pAd->Mlme.Queue);
BssTableInit(&pAd->ScanTab);
RTMPusecDelay(1000000); // use delay to prevent STA do reassoc
// channel sanity check
for (index = 0 ; index < pAd->ChannelListNum; index++)
{
if (pAd->ChannelList[index].Channel == NewChannel)
{
pAd->ScanTab.BssEntry[Bssidx].Channel = NewChannel;
pAd->CommonCfg.Channel = NewChannel;
AsicSwitchChannel(pAd, pAd->CommonCfg.Channel, FALSE);
AsicLockChannel(pAd, pAd->CommonCfg.Channel);
DBGPRINT(RT_DEBUG_TRACE, ("&&&&&&&&&&&&&&&&PeerChSwAnnAction - STA receive channel switch announcement IE (New Channel =%d)\n", NewChannel));
break;
}
}
if (index >= pAd->ChannelListNum)
{
DBGPRINT_ERR(("&&&&&&&&&&&&&&&&&&&&&&&&&&PeerChSwAnnAction(can not find New Channel=%d in ChannelList[%d]\n", pAd->CommonCfg.Channel, pAd->ChannelListNum));
}
}
}
return;
}
/*
==========================================================================
Description:
Measurement Request action frame handler.
Parametrs:
Elme - MLME message containing the received frame
Return : None.
==========================================================================
*/
static VOID PeerMeasureReqAction(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
PFRAME_802_11 pFr = (PFRAME_802_11)Elem->Msg;
UINT8 DialogToken;
MEASURE_REQ_INFO MeasureReqInfo;
MEASURE_REPORT_MODE ReportMode;
if(PeerMeasureReqSanity(pAd, Elem->Msg, Elem->MsgLen, &DialogToken, &MeasureReqInfo))
{
ReportMode.word = 0;
ReportMode.field.Incapable = 1;
EnqueueMeasurementRep(pAd, pFr->Hdr.Addr2, DialogToken, MeasureReqInfo.Token, ReportMode.word, MeasureReqInfo.ReqType, 0, NULL);
}
return;
}
/*
==========================================================================
Description:
Measurement Report action frame handler.
Parametrs:
Elme - MLME message containing the received frame
Return : None.
==========================================================================
*/
static VOID PeerMeasureReportAction(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
MEASURE_REPORT_INFO MeasureReportInfo;
PFRAME_802_11 pFr = (PFRAME_802_11)Elem->Msg;
UINT8 DialogToken;
PUINT8 pMeasureReportInfo;
// if (pAd->CommonCfg.bIEEE80211H != TRUE)
// return;
if ((pMeasureReportInfo = kmalloc(sizeof(MEASURE_RPI_REPORT), GFP_ATOMIC)) == NULL)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s unable to alloc memory for measure report buffer (size=%zu).\n", __func__, sizeof(MEASURE_RPI_REPORT)));
return;
}
NdisZeroMemory(&MeasureReportInfo, sizeof(MEASURE_REPORT_INFO));
NdisZeroMemory(pMeasureReportInfo, sizeof(MEASURE_RPI_REPORT));
if (PeerMeasureReportSanity(pAd, Elem->Msg, Elem->MsgLen, &DialogToken, &MeasureReportInfo, pMeasureReportInfo))
{
do {
PMEASURE_REQ_ENTRY pEntry = NULL;
// Not a autonomous measure report.
// check the dialog token field. drop it if the dialog token doesn't match.
if ((DialogToken != 0)
&& ((pEntry = MeasureReqLookUp(pAd, DialogToken)) == NULL))
break;
if (pEntry != NULL)
MeasureReqDelete(pAd, pEntry->DialogToken);
if (MeasureReportInfo.ReportType == RM_BASIC)
{
PMEASURE_BASIC_REPORT pBasicReport = (PMEASURE_BASIC_REPORT)pMeasureReportInfo;
if ((pBasicReport->Map.field.Radar)
&& (DfsRequirementCheck(pAd, pBasicReport->ChNum) == TRUE))
{
NotifyChSwAnnToPeerAPs(pAd, pFr->Hdr.Addr1, pFr->Hdr.Addr2, 1, pBasicReport->ChNum);
StartDFSProcedure(pAd, pBasicReport->ChNum, 1);
}
}
} while (FALSE);
}
else
DBGPRINT(RT_DEBUG_TRACE, ("Invalid Measurement Report Frame.\n"));
kfree(pMeasureReportInfo);
return;
}
/*
==========================================================================
Description:
TPC Request action frame handler.
Parametrs:
Elme - MLME message containing the received frame
Return : None.
==========================================================================
*/
static VOID PeerTpcReqAction(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
PFRAME_802_11 pFr = (PFRAME_802_11)Elem->Msg;
PUCHAR pFramePtr = pFr->Octet;
UINT8 DialogToken;
UINT8 TxPwr = GetCurTxPwr(pAd, Elem->Wcid);
UINT8 LinkMargin = 0;
CHAR RealRssi;
// link margin: Ratio of the received signal power to the minimum desired by the station (STA). The
// STA may incorporate rate information and channel conditions, including interference, into its computation
// of link margin.
RealRssi = RTMPMaxRssi(pAd, ConvertToRssi(pAd, Elem->Rssi0, RSSI_0),
ConvertToRssi(pAd, Elem->Rssi1, RSSI_1),
ConvertToRssi(pAd, Elem->Rssi2, RSSI_2));
// skip Category and action code.
pFramePtr += 2;
// Dialog token.
NdisMoveMemory(&DialogToken, pFramePtr, 1);
LinkMargin = (RealRssi / MIN_RCV_PWR);
if (PeerTpcReqSanity(pAd, Elem->Msg, Elem->MsgLen, &DialogToken))
EnqueueTPCRep(pAd, pFr->Hdr.Addr2, DialogToken, TxPwr, LinkMargin);
return;
}
/*
==========================================================================
Description:
TPC Report action frame handler.
Parametrs:
Elme - MLME message containing the received frame
Return : None.
==========================================================================
*/
static VOID PeerTpcRepAction(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
UINT8 DialogToken;
TPC_REPORT_INFO TpcRepInfo;
PTPC_REQ_ENTRY pEntry = NULL;
NdisZeroMemory(&TpcRepInfo, sizeof(TPC_REPORT_INFO));
if (PeerTpcRepSanity(pAd, Elem->Msg, Elem->MsgLen, &DialogToken, &TpcRepInfo))
{
if ((pEntry = TpcReqLookUp(pAd, DialogToken)) != NULL)
{
TpcReqDelete(pAd, pEntry->DialogToken);
DBGPRINT(RT_DEBUG_TRACE, ("%s: DialogToken=%x, TxPwr=%d, LinkMargin=%d\n",
__func__, DialogToken, TpcRepInfo.TxPwr, TpcRepInfo.LinkMargin));
}
}
return;
}
/*
==========================================================================
Description:
Spectrun action frames Handler such as channel switch annoucement,
measurement report, measurement request actions frames.
Parametrs:
Elme - MLME message containing the received frame
Return : None.
==========================================================================
*/
VOID PeerSpectrumAction(
IN PRTMP_ADAPTER pAd,
IN MLME_QUEUE_ELEM *Elem)
{
UCHAR Action = Elem->Msg[LENGTH_802_11+1];
if (pAd->CommonCfg.bIEEE80211H != TRUE)
return;
switch(Action)
{
case SPEC_MRQ:
// current rt2860 unable do such measure specified in Measurement Request.
// reject all measurement request.
PeerMeasureReqAction(pAd, Elem);
break;
case SPEC_MRP:
PeerMeasureReportAction(pAd, Elem);
break;
case SPEC_TPCRQ:
PeerTpcReqAction(pAd, Elem);
break;
case SPEC_TPCRP:
PeerTpcRepAction(pAd, Elem);
break;
case SPEC_CHANNEL_SWITCH:
{
}
PeerChSwAnnAction(pAd, Elem);
break;
}
return;
}
/*
==========================================================================
Description:
Parametrs:
Return : None.
==========================================================================
*/
INT Set_MeasureReq_Proc(
IN PRTMP_ADAPTER pAd,
IN PUCHAR arg)
{
UINT Aid = 1;
UINT ArgIdx;
PUCHAR thisChar;
MEASURE_REQ_MODE MeasureReqMode;
UINT8 MeasureReqToken = RandomByte(pAd);
UINT8 MeasureReqType = RM_BASIC;
UINT8 MeasureCh = 1;
ArgIdx = 1;
while ((thisChar = strsep((char **)&arg, "-")) != NULL)
{
switch(ArgIdx)
{
case 1: // Aid.
Aid = simple_strtol(thisChar, 0, 16);
break;
case 2: // Measurement Request Type.
MeasureReqType = simple_strtol(thisChar, 0, 16);
if (MeasureReqType > 3)
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow MeasureReqType(%d)\n", __func__, MeasureReqType));
return TRUE;
}
break;
case 3: // Measurement channel.
MeasureCh = simple_strtol(thisChar, 0, 16);
break;
}
ArgIdx++;
}
DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d, MeasureReqType=%d MeasureCh=%d\n", __func__, Aid, MeasureReqType, MeasureCh));
if (!VALID_WCID(Aid))
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __func__, Aid));
return TRUE;
}
MeasureReqMode.word = 0;
MeasureReqMode.field.Enable = 1;
MeasureReqInsert(pAd, MeasureReqToken);
EnqueueMeasurementReq(pAd, pAd->MacTab.Content[Aid].Addr,
MeasureReqToken, MeasureReqMode.word, MeasureReqType, MeasureCh, 2000);
return TRUE;
}
INT Set_TpcReq_Proc(
IN PRTMP_ADAPTER pAd,
IN PUCHAR arg)
{
UINT Aid;
UINT8 TpcReqToken = RandomByte(pAd);
Aid = simple_strtol(arg, 0, 16);
DBGPRINT(RT_DEBUG_TRACE, ("%s::Aid = %d\n", __func__, Aid));
if (!VALID_WCID(Aid))
{
DBGPRINT(RT_DEBUG_ERROR, ("%s: unknow sta of Aid(%d)\n", __func__, Aid));
return TRUE;
}
TpcReqInsert(pAd, TpcReqToken);
EnqueueTPCReq(pAd, pAd->MacTab.Content[Aid].Addr, TpcReqToken);
return TRUE;
}
|
gpl-2.0
|
AmeriCanAndroid/aca-evo3d-omega-htc-35
|
drivers/gpu/drm/nouveau/nouveau_gem.c
|
765
|
21031
|
/*
* Copyright (C) 2008 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
#define nouveau_gem_pushbuf_sync(chan) 0
int
nouveau_gem_object_new(struct drm_gem_object *gem)
{
return 0;
}
void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
if (!nvbo)
return;
nvbo->gem = NULL;
if (unlikely(nvbo->cpu_filp))
ttm_bo_synccpu_write_release(bo);
if (unlikely(nvbo->pin_refcnt)) {
nvbo->pin_refcnt = 1;
nouveau_bo_unpin(nvbo);
}
ttm_bo_unref(&bo);
drm_gem_object_release(gem);
kfree(gem);
}
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
uint32_t tile_flags, bool no_vm, bool mappable,
struct nouveau_bo **pnvbo)
{
struct nouveau_bo *nvbo;
int ret;
ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
tile_flags, no_vm, mappable, pnvbo);
if (ret)
return ret;
nvbo = *pnvbo;
nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
if (!nvbo->gem) {
nouveau_bo_ref(NULL, pnvbo);
return -ENOMEM;
}
nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
nvbo->gem->driver_private = nvbo;
return 0;
}
static int
nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
rep->offset = nvbo->bo.offset;
rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
}
static bool
nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
switch (tile_flags) {
case 0x0000:
case 0x1800:
case 0x2800:
case 0x4800:
case 0x7000:
case 0x7400:
case 0x7a00:
case 0xe000:
break;
default:
NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
return false;
}
return true;
}
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_new *req = data;
struct nouveau_bo *nvbo = NULL;
struct nouveau_channel *chan = NULL;
uint32_t flags = 0;
int ret = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
if (req->channel_hint) {
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
file_priv, chan);
}
if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
flags |= TTM_PL_FLAG_SYSTEM;
if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
return -EINVAL;
ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
req->info.tile_mode, req->info.tile_flags, false,
(req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
&nvbo);
if (ret)
return ret;
ret = nouveau_gem_info(nvbo->gem, &req->info);
if (ret)
goto out;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
out:
drm_gem_object_handle_unreference_unlocked(nvbo->gem);
if (ret)
drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
static int
nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
uint32_t write_domains, uint32_t valid_domains)
{
struct nouveau_bo *nvbo = gem->driver_private;
struct ttm_buffer_object *bo = &nvbo->bo;
uint32_t domains = valid_domains &
(write_domains ? write_domains : read_domains);
uint32_t pref_flags = 0, valid_flags = 0;
if (!domains)
return -EINVAL;
if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
valid_flags |= TTM_PL_FLAG_VRAM;
if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
valid_flags |= TTM_PL_FLAG_TT;
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
pref_flags |= TTM_PL_FLAG_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
pref_flags |= TTM_PL_FLAG_TT;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
pref_flags |= TTM_PL_FLAG_VRAM;
else
pref_flags |= TTM_PL_FLAG_TT;
nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
return 0;
}
struct validate_op {
struct list_head vram_list;
struct list_head gart_list;
struct list_head both_list;
};
static void
validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
{
struct list_head *entry, *tmp;
struct nouveau_bo *nvbo;
list_for_each_safe(entry, tmp, list) {
nvbo = list_entry(entry, struct nouveau_bo, entry);
if (likely(fence)) {
struct nouveau_fence *prev_fence;
spin_lock(&nvbo->bo.lock);
prev_fence = nvbo->bo.sync_obj;
nvbo->bo.sync_obj = nouveau_fence_ref(fence);
spin_unlock(&nvbo->bo.lock);
nouveau_fence_unref((void *)&prev_fence);
}
if (unlikely(nvbo->validate_mapped)) {
ttm_bo_kunmap(&nvbo->kmap);
nvbo->validate_mapped = false;
}
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
drm_gem_object_unreference(nvbo->gem);
}
}
static void
validate_fini(struct validate_op *op, struct nouveau_fence* fence)
{
validate_fini_list(&op->vram_list, fence);
validate_fini_list(&op->gart_list, fence);
validate_fini_list(&op->both_list, fence);
}
static int
validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
int nr_buffers, struct validate_op *op)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t sequence;
int trycnt = 0;
int ret, i;
sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
retry:
if (++trycnt > 100000) {
NV_ERROR(dev, "%s failed and gave up.\n", __func__);
return -EINVAL;
}
for (i = 0; i < nr_buffers; i++) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
gem = drm_gem_object_lookup(dev, file_priv, b->handle);
if (!gem) {
NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
return -EINVAL;
}
nvbo = gem->driver_private;
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_ERROR(dev, "multiple instances of buffer %d on "
"validation list\n", b->handle);
validate_fini(op, NULL);
return -EINVAL;
}
ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
if (ret) {
validate_fini(op, NULL);
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
drm_gem_object_unreference(gem);
if (ret) {
NV_ERROR(dev, "fail reserve\n");
return ret;
}
goto retry;
}
b->user_priv = (uint64_t)(unsigned long)nvbo;
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
(b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
list_add_tail(&nvbo->entry, &op->both_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
list_add_tail(&nvbo->entry, &op->vram_list);
else
if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
list_add_tail(&nvbo->entry, &op->gart_list);
else {
NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
b->valid_domains);
list_add_tail(&nvbo->entry, &op->both_list);
validate_fini(op, NULL);
return -EINVAL;
}
if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
validate_fini(op, NULL);
if (nvbo->cpu_filp == file_priv) {
NV_ERROR(dev, "bo %p mapped by process trying "
"to validate it!\n", nvbo);
return -EINVAL;
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
if (ret) {
NV_ERROR(dev, "fail wait_cpu\n");
return ret;
}
goto retry;
}
}
return 0;
}
static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
struct drm_device *dev = chan->dev;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.lock);
if (unlikely(ret)) {
NV_ERROR(dev, "fail wait other chan\n");
return ret;
}
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
if (unlikely(ret)) {
NV_ERROR(dev, "fail set_domain\n");
return ret;
}
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");
return ret;
}
if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
b->presumed.offset = nvbo->bo.offset;
b->presumed.valid = 0;
relocs++;
if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
&b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
return relocs;
}
static int
nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
struct drm_file *file_priv,
struct drm_nouveau_gem_pushbuf_bo *pbbo,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
struct drm_device *dev = chan->dev;
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
INIT_LIST_HEAD(&op->gart_list);
INIT_LIST_HEAD(&op->both_list);
if (nr_buffers == 0)
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
if (unlikely(ret)) {
NV_ERROR(dev, "validate_init\n");
return ret;
}
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
relocs += ret;
*apply_relocs = relocs;
return 0;
}
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
mem = kmalloc(nmemb * size, GFP_KERNEL);
if (!mem)
return ERR_PTR(-ENOMEM);
if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
kfree(mem);
return ERR_PTR(-EFAULT);
}
return mem;
}
static int
nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
struct drm_nouveau_gem_pushbuf *req,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
int ret = 0;
unsigned i;
reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
return PTR_ERR(reloc);
for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
struct nouveau_bo *nvbo;
uint32_t data;
if (unlikely(r->bo_index > req->nr_buffers)) {
NV_ERROR(dev, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
if (b->presumed.valid)
continue;
if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
NV_ERROR(dev, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
}
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
NV_ERROR(dev, "reloc outside of bo\n");
ret = -EINVAL;
break;
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
&nvbo->kmap);
if (ret) {
NV_ERROR(dev, "failed kmap for reloc\n");
break;
}
nvbo->validate_mapped = true;
}
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.lock);
if (ret) {
NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
break;
}
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
kfree(reloc);
return ret;
}
int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_pushbuf *req = data;
struct drm_nouveau_gem_pushbuf_push *push;
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
struct nouveau_fence *fence = 0;
int i, j, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
req->vram_available = dev_priv->fb_aper_free;
req->gart_available = dev_priv->gart_info.aper_free;
if (unlikely(req->nr_push == 0))
goto out_next;
if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
req->nr_push, NOUVEAU_GEM_MAX_PUSH);
return -EINVAL;
}
if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
return -EINVAL;
}
if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return -EINVAL;
}
push = u_memcpya(req->push, req->nr_push, sizeof(*push));
if (IS_ERR(push))
return PTR_ERR(push);
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
kfree(push);
return PTR_ERR(bo);
}
mutex_lock(&dev->struct_mutex);
/* Validate buffer list */
ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
req->nr_buffers, &op, &do_reloc);
if (ret) {
NV_ERROR(dev, "validate: %d\n", ret);
goto out;
}
/* Apply any relocations that are required */
if (do_reloc) {
ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
if (ret) {
NV_ERROR(dev, "reloc apply: %d\n", ret);
goto out;
}
}
if (chan->dma.ib_max) {
ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
if (ret) {
NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
nv50_dma_push(chan, nvbo, push[i].offset,
push[i].length);
}
} else
if (dev_priv->card_type >= NV_20) {
ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
push[i].offset) | 2);
OUT_RING(chan, 0);
}
} else {
ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_ERROR(dev, "jmp_space: %d\n", ret);
goto out;
}
for (i = 0; i < req->nr_push; i++) {
struct nouveau_bo *nvbo = (void *)(unsigned long)
bo[push[i].bo_index].user_priv;
struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
uint32_t cmd;
cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
cmd |= 0x20000000;
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
nvbo->bo.mem.
num_pages,
&nvbo->kmap);
if (ret) {
WIND_RING(chan);
goto out;
}
nvbo->validate_mapped = true;
}
nouveau_bo_wr32(nvbo, (push[i].offset +
push[i].length - 8) / 4, cmd);
}
OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
push[i].offset) | 0x20000000);
OUT_RING(chan, 0);
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
OUT_RING(chan, 0);
}
}
ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
goto out;
}
out:
validate_fini(&op, fence);
nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
kfree(push);
out_next:
if (chan->dma.ib_max) {
req->suffix0 = 0x00000000;
req->suffix1 = 0x00000000;
} else
if (dev_priv->card_type >= NV_20) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
req->suffix0 = 0x20000000 |
(chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
req->suffix1 = 0x00000000;
}
return ret;
}
static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
uint32_t flags = 0;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
flags |= TTM_PL_FLAG_VRAM;
if (domain & NOUVEAU_GEM_DOMAIN_GART)
flags |= TTM_PL_FLAG_TT;
return flags;
}
int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_prep *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
int ret = -EINVAL;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return ret;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp) {
if (nvbo->cpu_filp == file_priv)
goto out;
ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
if (ret)
goto out;
}
if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
spin_unlock(&nvbo->bo.lock);
} else {
ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
if (ret == 0)
nvbo->cpu_filp = file_priv;
}
out:
drm_gem_object_unreference_unlocked(gem);
return ret;
}
int
nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_cpu_prep *req = data;
struct drm_gem_object *gem;
struct nouveau_bo *nvbo;
int ret = -EINVAL;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return ret;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp != file_priv)
goto out;
nvbo->cpu_filp = NULL;
ttm_bo_synccpu_write_release(&nvbo->bo);
ret = 0;
out:
drm_gem_object_unreference_unlocked(gem);
return ret;
}
int
nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_nouveau_gem_info *req = data;
struct drm_gem_object *gem;
int ret;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
return -EINVAL;
ret = nouveau_gem_info(gem, req);
drm_gem_object_unreference_unlocked(gem);
return ret;
}
|
gpl-2.0
|
Validus-Kernel/kernel_htc_flounder
|
arch/arm/mach-omap2/board-omap3evm.c
|
2045
|
20361
|
/*
* linux/arch/arm/mach-omap2/board-omap3evm.c
*
* Copyright (C) 2008 Texas Instruments
*
* Modified from mach-omap2/board-3430sdp.c
*
* Initial code: Syed Mohammed Khasim
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/input.h>
#include <linux/input/matrix_keypad.h>
#include <linux/leds.h>
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/i2c/twl.h>
#include <linux/usb/otg.h>
#include <linux/usb/musb.h>
#include <linux/usb/nop-usb-xceiv.h>
#include <linux/smsc911x.h>
#include <linux/wl12xx.h>
#include <linux/regulator/fixed.h>
#include <linux/regulator/machine.h>
#include <linux/mmc/host.h>
#include <linux/export.h>
#include <linux/usb/phy.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <linux/platform_data/mtd-nand-omap2.h>
#include "common.h"
#include <linux/platform_data/spi-omap2-mcspi.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
#include "soc.h"
#include "mux.h"
#include "sdram-micron-mt46h32m32lf-6.h"
#include "hsmmc.h"
#include "common-board-devices.h"
#include "board-flash.h"
#define NAND_CS 0
#define OMAP3_EVM_TS_GPIO 175
#define OMAP3_EVM_EHCI_VBUS 22
#define OMAP3_EVM_EHCI_SELECT 61
#define OMAP3EVM_ETHR_START 0x2c000000
#define OMAP3EVM_ETHR_SIZE 1024
#define OMAP3EVM_ETHR_ID_REV 0x50
#define OMAP3EVM_ETHR_GPIO_IRQ 176
#define OMAP3EVM_SMSC911X_CS 5
/*
* Eth Reset signal
* 64 = Generation 1 (<=RevD)
* 7 = Generation 2 (>=RevE)
*/
#define OMAP3EVM_GEN1_ETHR_GPIO_RST 64
#define OMAP3EVM_GEN2_ETHR_GPIO_RST 7
/*
* OMAP35x EVM revision
* Run time detection of EVM revision is done by reading Ethernet
* PHY ID -
* GEN_1 = 0x01150000
* GEN_2 = 0x92200000
*/
enum {
OMAP3EVM_BOARD_GEN_1 = 0, /* EVM Rev between A - D */
OMAP3EVM_BOARD_GEN_2, /* EVM Rev >= Rev E */
};
static u8 omap3_evm_version;
static u8 get_omap3_evm_rev(void)
{
return omap3_evm_version;
}
static void __init omap3_evm_get_revision(void)
{
void __iomem *ioaddr;
unsigned int smsc_id;
/* Ethernet PHY ID is stored at ID_REV register */
ioaddr = ioremap_nocache(OMAP3EVM_ETHR_START, SZ_1K);
if (!ioaddr)
return;
smsc_id = readl(ioaddr + OMAP3EVM_ETHR_ID_REV) & 0xFFFF0000;
iounmap(ioaddr);
switch (smsc_id) {
/*SMSC9115 chipset*/
case 0x01150000:
omap3_evm_version = OMAP3EVM_BOARD_GEN_1;
break;
/*SMSC 9220 chipset*/
case 0x92200000:
default:
omap3_evm_version = OMAP3EVM_BOARD_GEN_2;
}
}
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
#include "gpmc-smsc911x.h"
static struct omap_smsc911x_platform_data smsc911x_cfg = {
.cs = OMAP3EVM_SMSC911X_CS,
.gpio_irq = OMAP3EVM_ETHR_GPIO_IRQ,
.gpio_reset = -EINVAL,
.flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
};
static inline void __init omap3evm_init_smsc911x(void)
{
/* Configure ethernet controller reset gpio */
if (cpu_is_omap3430()) {
if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
smsc911x_cfg.gpio_reset = OMAP3EVM_GEN1_ETHR_GPIO_RST;
else
smsc911x_cfg.gpio_reset = OMAP3EVM_GEN2_ETHR_GPIO_RST;
}
gpmc_smsc911x_init(&smsc911x_cfg);
}
#else
static inline void __init omap3evm_init_smsc911x(void) { return; }
#endif
/*
* OMAP3EVM LCD Panel control signals
*/
#define OMAP3EVM_LCD_PANEL_LR 2
#define OMAP3EVM_LCD_PANEL_UD 3
#define OMAP3EVM_LCD_PANEL_INI 152
#define OMAP3EVM_LCD_PANEL_QVGA 154
#define OMAP3EVM_LCD_PANEL_RESB 155
#define OMAP3EVM_LCD_PANEL_ENVDD 153
#define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO 210
/*
* OMAP3EVM DVI control signals
*/
#define OMAP3EVM_DVI_PANEL_EN_GPIO 199
static struct panel_sharp_ls037v7dw01_data omap3_evm_lcd_data = {
.resb_gpio = OMAP3EVM_LCD_PANEL_RESB,
.ini_gpio = OMAP3EVM_LCD_PANEL_INI,
.mo_gpio = OMAP3EVM_LCD_PANEL_QVGA,
.lr_gpio = OMAP3EVM_LCD_PANEL_LR,
.ud_gpio = OMAP3EVM_LCD_PANEL_UD,
};
static void __init omap3_evm_display_init(void)
{
int r;
r = gpio_request_one(OMAP3EVM_LCD_PANEL_ENVDD, GPIOF_OUT_INIT_LOW,
"lcd_panel_envdd");
if (r)
pr_err("failed to get lcd_panel_envdd GPIO\n");
r = gpio_request_one(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO,
GPIOF_OUT_INIT_LOW, "lcd_panel_bklight");
if (r)
pr_err("failed to get lcd_panel_bklight GPIO\n");
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 0);
else
gpio_set_value_cansleep(OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO, 1);
}
static struct omap_dss_device omap3_evm_lcd_device = {
.name = "lcd",
.driver_name = "sharp_ls_panel",
.type = OMAP_DISPLAY_TYPE_DPI,
.phy.dpi.data_lines = 18,
.data = &omap3_evm_lcd_data,
};
static struct omap_dss_device omap3_evm_tv_device = {
.name = "tv",
.driver_name = "venc",
.type = OMAP_DISPLAY_TYPE_VENC,
.phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
};
static struct tfp410_platform_data dvi_panel = {
.power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
.i2c_bus_num = -1,
};
static struct omap_dss_device omap3_evm_dvi_device = {
.name = "dvi",
.type = OMAP_DISPLAY_TYPE_DPI,
.driver_name = "tfp410",
.data = &dvi_panel,
.phy.dpi.data_lines = 24,
};
static struct omap_dss_device *omap3_evm_dss_devices[] = {
&omap3_evm_lcd_device,
&omap3_evm_tv_device,
&omap3_evm_dvi_device,
};
static struct omap_dss_board_info omap3_evm_dss_data = {
.num_devices = ARRAY_SIZE(omap3_evm_dss_devices),
.devices = omap3_evm_dss_devices,
.default_device = &omap3_evm_lcd_device,
};
static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"),
};
static struct regulator_consumer_supply omap3evm_vsim_supply[] = {
REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.0"),
};
/* VMMC1 for MMC1 pins CMD, CLK, DAT0..DAT3 (20 mA, plus card == max 220 mA) */
static struct regulator_init_data omap3evm_vmmc1 = {
.constraints = {
.min_uV = 1850000,
.max_uV = 3150000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3evm_vmmc1_supply),
.consumer_supplies = omap3evm_vmmc1_supply,
};
/* VSIM for MMC1 pins DAT4..DAT7 (2 mA, plus card == max 50 mA) */
static struct regulator_init_data omap3evm_vsim = {
.constraints = {
.min_uV = 1800000,
.max_uV = 3000000,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE
| REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3evm_vsim_supply),
.consumer_supplies = omap3evm_vsim_supply,
};
static struct omap2_hsmmc_info mmc[] = {
{
.mmc = 1,
.caps = MMC_CAP_4_BIT_DATA,
.gpio_cd = -EINVAL,
.gpio_wp = 63,
.deferred = true,
},
#ifdef CONFIG_WILINK_PLATFORM_DATA
{
.name = "wl1271",
.mmc = 2,
.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_POWER_OFF_CARD,
.gpio_wp = -EINVAL,
.gpio_cd = -EINVAL,
.nonremovable = true,
},
#endif
{} /* Terminator */
};
static struct gpio_led gpio_leds[] = {
{
.name = "omap3evm::ledb",
/* normally not visible (board underside) */
.default_trigger = "default-on",
.gpio = -EINVAL, /* gets replaced */
.active_low = true,
},
};
static struct gpio_led_platform_data gpio_led_info = {
.leds = gpio_leds,
.num_leds = ARRAY_SIZE(gpio_leds),
};
static struct platform_device leds_gpio = {
.name = "leds-gpio",
.id = -1,
.dev = {
.platform_data = &gpio_led_info,
},
};
static int omap3evm_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
int r, lcd_bl_en;
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
mmc[0].gpio_cd = gpio + 0;
omap_hsmmc_late_init(mmc);
/*
* Most GPIOs are for USB OTG. Some are mostly sent to
* the P2 connector; notably LEDA for the LCD backlight.
*/
/* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
lcd_bl_en = get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2 ?
GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
r = gpio_request_one(gpio + TWL4030_GPIO_MAX, lcd_bl_en, "EN_LCD_BKL");
if (r)
printk(KERN_ERR "failed to get/set lcd_bkl gpio\n");
/* gpio + 7 == DVI Enable */
gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
/* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
gpio_leds[0].gpio = gpio + TWL4030_GPIO_MAX + 1;
platform_device_register(&leds_gpio);
/* Enable VBUS switch by setting TWL4030.GPIO2DIR as output
* for starting USB tranceiver
*/
#ifdef CONFIG_TWL4030_CORE
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
u8 val;
twl_i2c_read_u8(TWL4030_MODULE_GPIO, &val, REG_GPIODATADIR1);
val |= 0x04; /* TWL4030.GPIO2DIR BIT at GPIODATADIR1(0x9B) */
twl_i2c_write_u8(TWL4030_MODULE_GPIO, val, REG_GPIODATADIR1);
}
#endif
return 0;
}
static struct twl4030_gpio_platform_data omap3evm_gpio_data = {
.use_leds = true,
.setup = omap3evm_twl_gpio_setup,
};
static uint32_t board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_DOWN),
KEY(0, 2, KEY_ENTER),
KEY(0, 3, KEY_M),
KEY(1, 0, KEY_RIGHT),
KEY(1, 1, KEY_UP),
KEY(1, 2, KEY_I),
KEY(1, 3, KEY_N),
KEY(2, 0, KEY_A),
KEY(2, 1, KEY_E),
KEY(2, 2, KEY_J),
KEY(2, 3, KEY_O),
KEY(3, 0, KEY_B),
KEY(3, 1, KEY_F),
KEY(3, 2, KEY_K),
KEY(3, 3, KEY_P)
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data omap3evm_kp_data = {
.keymap_data = &board_map_data,
.rows = 4,
.cols = 4,
.rep = 1,
};
/* ads7846 on SPI */
static struct regulator_consumer_supply omap3evm_vio_supply[] = {
REGULATOR_SUPPLY("vcc", "spi1.0"),
};
/* VIO for ads7846 */
static struct regulator_init_data omap3evm_vio = {
.constraints = {
.min_uV = 1800000,
.max_uV = 1800000,
.apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3evm_vio_supply),
.consumer_supplies = omap3evm_vio_supply,
};
#ifdef CONFIG_WILINK_PLATFORM_DATA
#define OMAP3EVM_WLAN_PMENA_GPIO (150)
#define OMAP3EVM_WLAN_IRQ_GPIO (149)
static struct regulator_consumer_supply omap3evm_vmmc2_supply[] = {
REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
};
/* VMMC2 for driving the WL12xx module */
static struct regulator_init_data omap3evm_vmmc2 = {
.constraints = {
.valid_ops_mask = REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3evm_vmmc2_supply),
.consumer_supplies = omap3evm_vmmc2_supply,
};
static struct fixed_voltage_config omap3evm_vwlan = {
.supply_name = "vwl1271",
.microvolts = 1800000, /* 1.80V */
.gpio = OMAP3EVM_WLAN_PMENA_GPIO,
.startup_delay = 70000, /* 70ms */
.enable_high = 1,
.enabled_at_boot = 0,
.init_data = &omap3evm_vmmc2,
};
static struct platform_device omap3evm_wlan_regulator = {
.name = "reg-fixed-voltage",
.id = 1,
.dev = {
.platform_data = &omap3evm_vwlan,
},
};
struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
.board_ref_clock = WL12XX_REFCLOCK_38, /* 38.4 MHz */
};
#endif
/* VAUX2 for USB */
static struct regulator_consumer_supply omap3evm_vaux2_supplies[] = {
REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */
REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */
REGULATOR_SUPPLY("vcc", "nop_usb_xceiv.2"), /* hsusb port 2 */
REGULATOR_SUPPLY("vaux2", NULL),
};
static struct regulator_init_data omap3evm_vaux2 = {
.constraints = {
.min_uV = 2800000,
.max_uV = 2800000,
.apply_uV = true,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_MODE
| REGULATOR_CHANGE_STATUS,
},
.num_consumer_supplies = ARRAY_SIZE(omap3evm_vaux2_supplies),
.consumer_supplies = omap3evm_vaux2_supplies,
};
static struct twl4030_platform_data omap3evm_twldata = {
/* platform_data for children goes here */
.keypad = &omap3evm_kp_data,
.gpio = &omap3evm_gpio_data,
.vio = &omap3evm_vio,
.vmmc1 = &omap3evm_vmmc1,
.vsim = &omap3evm_vsim,
};
static int __init omap3_evm_i2c_init(void)
{
omap3_pmic_get_config(&omap3evm_twldata,
TWL_COMMON_PDATA_USB | TWL_COMMON_PDATA_MADC |
TWL_COMMON_PDATA_AUDIO,
TWL_COMMON_REGULATOR_VDAC | TWL_COMMON_REGULATOR_VPLL2);
omap3evm_twldata.vdac->constraints.apply_uV = true;
omap3evm_twldata.vpll2->constraints.apply_uV = true;
omap3_pmic_init("twl4030", &omap3evm_twldata);
omap_register_i2c_bus(2, 400, NULL, 0);
omap_register_i2c_bus(3, 400, NULL, 0);
return 0;
}
static struct usbhs_phy_data phy_data[] __initdata = {
{
.port = 2,
.reset_gpio = -1, /* set at runtime */
.vcc_gpio = -EINVAL,
},
};
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
.port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
};
#ifdef CONFIG_OMAP_MUX
static struct omap_board_mux omap35x_board_mux[] __initdata = {
OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
OMAP_PIN_OFF_WAKEUPENABLE),
OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
OMAP_PIN_OFF_WAKEUPENABLE),
OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_NONE),
OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_NONE),
#ifdef CONFIG_WILINK_PLATFORM_DATA
/* WLAN IRQ - GPIO 149 */
OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
/* WLAN POWER ENABLE - GPIO 150 */
OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
/* MMC2 SDIO pin muxes for WL12xx */
OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
#endif
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
static struct omap_board_mux omap36x_board_mux[] __initdata = {
OMAP3_MUX(SYS_NIRQ, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
OMAP_PIN_OFF_WAKEUPENABLE),
OMAP3_MUX(MCSPI1_CS1, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |
OMAP_PIN_OFF_INPUT_PULLUP | OMAP_PIN_OFF_OUTPUT_LOW |
OMAP_PIN_OFF_WAKEUPENABLE),
/* AM/DM37x EVM: DSS data bus muxed with sys_boot */
OMAP3_MUX(DSS_DATA18, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(DSS_DATA19, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(DSS_DATA21, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(DSS_DATA22, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(DSS_DATA23, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT0, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT1, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT3, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),
#ifdef CONFIG_WILINK_PLATFORM_DATA
/* WLAN IRQ - GPIO 149 */
OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT),
/* WLAN POWER ENABLE - GPIO 150 */
OMAP3_MUX(UART1_CTS, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
/* MMC2 SDIO pin muxes for WL12xx */
OMAP3_MUX(SDMMC2_CLK, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_CMD, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT0, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT1, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT2, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
OMAP3_MUX(SDMMC2_DAT3, OMAP_MUX_MODE0 | OMAP_PIN_INPUT_PULLUP),
#endif
{ .reg_offset = OMAP_MUX_TERMINATOR },
};
#else
#define omap35x_board_mux NULL
#define omap36x_board_mux NULL
#endif
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
.mode = MUSB_OTG,
.power = 100,
};
static struct gpio omap3_evm_ehci_gpios[] __initdata = {
{ OMAP3_EVM_EHCI_VBUS, GPIOF_OUT_INIT_HIGH, "enable EHCI VBUS" },
{ OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" },
};
static void __init omap3_evm_wl12xx_init(void)
{
#ifdef CONFIG_WILINK_PLATFORM_DATA
int ret;
/* WL12xx WLAN Init */
omap3evm_wlan_data.irq = gpio_to_irq(OMAP3EVM_WLAN_IRQ_GPIO);
ret = wl12xx_set_platform_data(&omap3evm_wlan_data);
if (ret)
pr_err("error setting wl12xx data: %d\n", ret);
ret = platform_device_register(&omap3evm_wlan_regulator);
if (ret)
pr_err("error registering wl12xx device: %d\n", ret);
#endif
}
static struct regulator_consumer_supply dummy_supplies[] = {
REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
REGULATOR_SUPPLY("vdd33a", "smsc911x.0"),
};
static struct mtd_partition omap3evm_nand_partitions[] = {
/* All the partition sizes are listed in terms of NAND block size */
{
.name = "X-Loader",
.offset = 0,
.size = 4*(SZ_128K),
.mask_flags = MTD_WRITEABLE
},
{
.name = "U-Boot",
.offset = MTDPART_OFS_APPEND,
.size = 14*(SZ_128K),
.mask_flags = MTD_WRITEABLE
},
{
.name = "U-Boot Env",
.offset = MTDPART_OFS_APPEND,
.size = 2*(SZ_128K)
},
{
.name = "Kernel",
.offset = MTDPART_OFS_APPEND,
.size = 40*(SZ_128K)
},
{
.name = "File system",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
},
};
static void __init omap3_evm_init(void)
{
struct omap_board_mux *obm;
omap3_evm_get_revision();
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
obm = (cpu_is_omap3630()) ? omap36x_board_mux : omap35x_board_mux;
omap3_mux_init(obm, OMAP_PACKAGE_CBB);
omap_mux_init_gpio(63, OMAP_PIN_INPUT);
omap_hsmmc_init(mmc);
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2)
omap3evm_twldata.vaux2 = &omap3evm_vaux2;
omap3_evm_i2c_init();
omap_display_init(&omap3_evm_dss_data);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
/* OMAP3EVM uses ISP1504 phy and so register nop transceiver */
usb_nop_xceiv_register();
if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
/* enable EHCI VBUS using GPIO22 */
omap_mux_init_gpio(OMAP3_EVM_EHCI_VBUS, OMAP_PIN_INPUT_PULLUP);
/* Select EHCI port on main board */
omap_mux_init_gpio(OMAP3_EVM_EHCI_SELECT,
OMAP_PIN_INPUT_PULLUP);
gpio_request_array(omap3_evm_ehci_gpios,
ARRAY_SIZE(omap3_evm_ehci_gpios));
/* setup EHCI phy reset config */
omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP);
phy_data[0].reset_gpio = 21;
/* EVM REV >= E can supply 500mA with EXTVBUS programming */
musb_board_data.power = 500;
musb_board_data.extvbus = 1;
} else {
/* setup EHCI phy reset on MDC */
omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);
phy_data[0].reset_gpio = 135;
}
usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");
usb_musb_init(&musb_board_data);
usbhs_init_phys(phy_data, ARRAY_SIZE(phy_data));
usbhs_init(&usbhs_bdata);
board_nand_init(omap3evm_nand_partitions,
ARRAY_SIZE(omap3evm_nand_partitions), NAND_CS,
NAND_BUSWIDTH_16, NULL);
omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
omap3evm_init_smsc911x();
omap3_evm_display_init();
omap3_evm_wl12xx_init();
omap_twl4030_audio_init("omap3evm", NULL);
}
MACHINE_START(OMAP3EVM, "OMAP3 EVM")
/* Maintainer: Syed Mohammed Khasim - Texas Instruments */
.atag_offset = 0x100,
.reserve = omap_reserve,
.map_io = omap3_map_io,
.init_early = omap35xx_init_early,
.init_irq = omap3_init_irq,
.handle_irq = omap3_intc_handle_irq,
.init_machine = omap3_evm_init,
.init_late = omap35xx_init_late,
.init_time = omap3_sync32k_timer_init,
.restart = omap3xxx_restart,
MACHINE_END
|
gpl-2.0
|
jrior001/android_kernel_asus_moorefield
|
arch/arm/mach-ux500/usb.c
|
2045
|
4023
|
/*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com>
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/platform_device.h>
#include <linux/usb/musb.h>
#include <linux/dma-mapping.h>
#include <linux/platform_data/usb-musb-ux500.h>
#include <linux/platform_data/dma-ste-dma40.h>
#include "db8500-regs.h"
#define MUSB_DMA40_RX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
.dir = STEDMA40_PERIPH_TO_MEM, \
.dst_dev_type = STEDMA40_DEV_DST_MEMORY, \
.src_info.data_width = STEDMA40_WORD_WIDTH, \
.dst_info.data_width = STEDMA40_WORD_WIDTH, \
.src_info.psize = STEDMA40_PSIZE_LOG_16, \
.dst_info.psize = STEDMA40_PSIZE_LOG_16, \
}
#define MUSB_DMA40_TX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
.dir = STEDMA40_MEM_TO_PERIPH, \
.src_dev_type = STEDMA40_DEV_SRC_MEMORY, \
.src_info.data_width = STEDMA40_WORD_WIDTH, \
.dst_info.data_width = STEDMA40_WORD_WIDTH, \
.src_info.psize = STEDMA40_PSIZE_LOG_16, \
.dst_info.psize = STEDMA40_PSIZE_LOG_16, \
}
static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS]
= {
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH
};
static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_TX_CHANNELS]
= {
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
};
static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_CHANNELS] = {
&musb_dma_rx_ch[0],
&musb_dma_rx_ch[1],
&musb_dma_rx_ch[2],
&musb_dma_rx_ch[3],
&musb_dma_rx_ch[4],
&musb_dma_rx_ch[5],
&musb_dma_rx_ch[6],
&musb_dma_rx_ch[7]
};
static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_TX_CHANNELS] = {
&musb_dma_tx_ch[0],
&musb_dma_tx_ch[1],
&musb_dma_tx_ch[2],
&musb_dma_tx_ch[3],
&musb_dma_tx_ch[4],
&musb_dma_tx_ch[5],
&musb_dma_tx_ch[6],
&musb_dma_tx_ch[7]
};
static struct ux500_musb_board_data musb_board_data = {
.dma_rx_param_array = ux500_dma_rx_param_array,
.dma_tx_param_array = ux500_dma_tx_param_array,
.num_rx_channels = UX500_MUSB_DMA_NUM_RX_CHANNELS,
.num_tx_channels = UX500_MUSB_DMA_NUM_TX_CHANNELS,
.dma_filter = stedma40_filter,
};
static u64 ux500_musb_dmamask = DMA_BIT_MASK(32);
static struct musb_hdrc_config musb_hdrc_config = {
.multipoint = true,
.dyn_fifo = true,
.num_eps = 16,
.ram_bits = 16,
};
static struct musb_hdrc_platform_data musb_platform_data = {
.mode = MUSB_OTG,
.config = &musb_hdrc_config,
.board_data = &musb_board_data,
};
static struct resource usb_resources[] = {
[0] = {
.name = "usb-mem",
.flags = IORESOURCE_MEM,
},
[1] = {
.name = "mc", /* hard-coded in musb */
.flags = IORESOURCE_IRQ,
},
};
struct platform_device ux500_musb_device = {
.name = "musb-ux500",
.id = 0,
.dev = {
.platform_data = &musb_platform_data,
.dma_mask = &ux500_musb_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usb_resources),
.resource = usb_resources,
};
static inline void ux500_usb_dma_update_rx_ch_config(int *src_dev_type)
{
u32 idx;
for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_CHANNELS; idx++)
musb_dma_rx_ch[idx].src_dev_type = src_dev_type[idx];
}
static inline void ux500_usb_dma_update_tx_ch_config(int *dst_dev_type)
{
u32 idx;
for (idx = 0; idx < UX500_MUSB_DMA_NUM_TX_CHANNELS; idx++)
musb_dma_tx_ch[idx].dst_dev_type = dst_dev_type[idx];
}
void ux500_add_usb(struct device *parent, resource_size_t base, int irq,
int *dma_rx_cfg, int *dma_tx_cfg)
{
ux500_musb_device.resource[0].start = base;
ux500_musb_device.resource[0].end = base + SZ_64K - 1;
ux500_musb_device.resource[1].start = irq;
ux500_musb_device.resource[1].end = irq;
ux500_usb_dma_update_rx_ch_config(dma_rx_cfg);
ux500_usb_dma_update_tx_ch_config(dma_tx_cfg);
ux500_musb_device.dev.parent = parent;
platform_device_register(&ux500_musb_device);
}
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.