code_text
stringlengths
604
999k
repo_name
stringlengths
4
100
file_path
stringlengths
4
873
language
stringclasses
23 values
license
stringclasses
15 values
size
int32
1.02k
999k
# # subunit.sh: shell functions to report test status via the subunit protocol. # Copyright (C) 2006 Robert Collins <robertc@robertcollins.net> # Copyright (C) 2008 Jelmer Vernooij <jelmer@samba.org> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # subunit_start_test () { # emit the current protocol start-marker for test $1 echo "test: $1" } subunit_pass_test () { # emit the current protocol test passed marker for test $1 echo "success: $1" } subunit_fail_test () { # emit the current protocol fail-marker for test $1, and emit stdin as # the error text. # we use stdin because the failure message can be arbitrarily long, and this # makes it convenient to write in scripts (using <<END syntax. echo "failure: $1 [" cat - echo "]" } subunit_error_test () { # emit the current protocol error-marker for test $1, and emit stdin as # the error text. # we use stdin because the failure message can be arbitrarily long, and this # makes it convenient to write in scripts (using <<END syntax. echo "error: $1 [" cat - echo "]" } testit () { name="$1" shift cmdline="$*" subunit_start_test "$name" output=`$cmdline 2>&1` status=$? if [ x$status = x0 ]; then subunit_pass_test "$name" else echo "$output" | subunit_fail_test "$name" fi return $status } testit_expect_failure () { name="$1" shift cmdline="$*" subunit_start_test "$name" output=`$cmdline 2>&1` status=$? if [ x$status = x0 ]; then echo "$output" | subunit_fail_test "$name" else subunit_pass_test "$name" fi return $status }
artemh/asuswrt-merlin
release/src/router/samba-3.5.8/testprogs/blackbox/subunit.sh
Shell
gpl-2.0
2,230
/* * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /* * Woodside Networks, Inc proprietary. All rights reserved. * $File: //depot/software/projects/feature_branches/gen5_phase1/os/linux/classic/ap/apps/ssm/auth8021x/ani8021xSuppRsnFsm.c $ * * Contains definitions for the RSN EAPOL-Key FSM on the * supplicant side. This is based on 802.11i. * * Author: Mayank D. Upadhyay * Date: 19-December-2002 * History:- * Date Modified by Modification Information * ------------------------------------------------------ * */ #include "bapRsnSsmServices.h" #include "bapRsnSsmEapol.h" #include "bapRsnErrors.h" #include "bapRsn8021xSuppRsnFsm.h" #include "vos_utils.h" #include "bapRsnTxRx.h" #include "btampFsm.h" // The different states that this FSM transitions through #define INITIALIZE 0 #define AUTHENTICATION 1 #define GOT_PMK 2 #define STA_KEY_START 3 #define STA_KEY_SET 4 #define KEY_UPDATE 5 #define REKEY_MSG 6 #define GROUP_KEY_SET 7 #define NUM_STATES (GROUP_KEY_SET + 1) static tSuppRsnFsmConsts suppConsts = { 2000, 3 }; //timeout, retry limit int gReadToSetKey; /************************************** * Static functions in this module **************************************/ static int zeroOutPtk(tSuppRsnFsm *fsm); static int checkMic(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data, v_BOOL_t pwKeyExchange); static int checkInfoElement(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data); static int checkPeerReplayCounter(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data, v_BOOL_t *retransmit, v_BOOL_t actualMicFlag, v_BOOL_t reTxMicFlag ); static int derivePtk(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data); static int checkTransition(tSuppRsnFsm *fsm, void *arg); static int gotoStateInit(tSuppRsnFsm *fsm); static int suppRsnRxFrameHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket ); static int suppRsnTxCompleteHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, VOS_STATUS retStatus ); /************************* * Internal Functions *************************/ int suppRsnAuthStartEventHandler(tSuppRsnFsm *fsm); /************************* * The exported functions *************************/ /** * suppRsnFsmInit * * FUNCTION: * Initializes the constants and the callbacks needed by this FSM * module. * * @param cb callbacks to the various procedures needed by this FSM * * @return ANI_OK if the operation succeeds */ int suppRsnFsmInit(void) { // TODO: Read the constants in from config // consts = constsIn; suppConsts.timeoutPeriod = 2000; suppConsts.maxTries = 3; return ANI_OK; } /** * suppRsnFsmCreate * * FUNCTION * Allocates and initializes the state of an RSN key FSM instance for * the given BP context. * * @parm ctx the BP context whose instance is being created * @param pskBased pass in eANI_BOOLEAN_TRUE is this BP is to be * authenticated based on a pre-shared key as opposed to EAP. * * @return ANI_OK if the operation succeeds */ int suppRsnFsmCreate(tBtampContext *ctx) { int retVal = ANI_OK; tSuppRsnFsm *fsm = &ctx->uFsm.suppFsm; // First, clear everything out vos_mem_zero( fsm, sizeof(tSuppRsnFsm)); if( !VOS_IS_STATUS_SUCCESS( bapRsnRegisterTxRxCallbacks( suppRsnTxCompleteHandler, suppRsnRxFrameHandler ) ) ) { return ANI_ERROR; } if( !VOS_IS_STATUS_SUCCESS( bapRsnRegisterRxCallback( ctx->pvosGCtx ) ) ) { return ANI_ERROR; } // Allocate the supplicant context fsm->suppCtx = (tSuppContext *)vos_mem_malloc( sizeof(tSuppContext) ); if (fsm->suppCtx == NULL) { retVal = ANI_E_MALLOC_FAILED; VOS_ASSERT( 0 ); goto error; } // Clear out the supplicant context vos_mem_zero( fsm->suppCtx, sizeof(tSuppContext)); fsm->ctx = ctx; //Only support CCMP fsm->suppCtx->pwCipherType = eCSR_ENCRYPT_TYPE_AES; retVal = aniAsfPacketAllocateExplicit(&fsm->lastEapol, RSN_MAX_PACKET_SIZE, EAPOL_TX_HEADER_SIZE); if (retVal != ANI_OK) { VOS_ASSERT( 0 ); goto error; } aniAsfPacketAllocate(&fsm->suppCtx->pmk); if (fsm->suppCtx->pmk == NULL) { retVal = ANI_E_MALLOC_FAILED; VOS_ASSERT( 0 ); goto error; } fsm->suppCtx->ieAp = NULL; fsm->cryptHandle = 0; if( !VOS_IS_STATUS_SUCCESS( vos_crypto_init( &fsm->cryptHandle ) ) ) { retVal = ANI_E_FAILED; VOS_ASSERT( 0 ); } fsm->currentState = INITIALIZE; gotoStateInit(fsm); suppRsnFsmProcessEvent( fsm, RSN_FSM_AUTH_START, NULL ); return ANI_OK; error: suppRsnFsmFree( ctx ); return retVal; } /** * suppRsnFsmFree * * FUNCTION * Frees a previously allocated RSN Key FSM in a BP context. If the * RSN Key FSM is not yet allocated, then this is an error. * * @param ctx the BP context whose FSM instance is to be freed * * @return ANI_OK if the operation succeeds */ int suppRsnFsmFree(tBtampContext *ctx) { tSuppRsnFsm *fsm; fsm = &ctx->uFsm.suppFsm; if( fsm->cryptHandle ) { vos_crypto_deinit( fsm->cryptHandle ); } bapRsnClearTxRxCallbacks(); if (fsm->lastEapol) aniAsfPacketFree(fsm->lastEapol); if( fsm->suppCtx ) { if ( fsm->suppCtx->pmk ) { aniAsfPacketFree(fsm->suppCtx->pmk); } vos_mem_free( fsm->suppCtx ); } // Finally, clear everything out vos_mem_zero( fsm, sizeof(tSuppRsnFsm)); return ANI_OK; } /** * suppRsnFsmProcessEvent * * FUNCTION * Passes an event to the RSN key FSM instance for immediate processing. * * @param fsm the RSN Key FSM instance * @param eventId the AAG event to process * @param arg an optional argument for this event * * @return ANI_OK if the operation succeeds */ int suppRsnFsmProcessEvent(tSuppRsnFsm *fsm, tRsnFsmEvent eventId, void *arg) { switch (eventId) { case RSN_FSM_TIMER_EXPIRED: // Proceed straight to checkTransition break; case RSN_FSM_AUTH_START: fsm->authReq = eANI_BOOLEAN_TRUE; suppRsnAuthStartEventHandler(fsm); break; case RSN_FSM_EAPOL_FRAME_AVAILABLE: fsm->eapolAvail = eANI_BOOLEAN_TRUE; break; case RSN_FSM_INTEG_FAILED: fsm->integFailed = eANI_BOOLEAN_TRUE; break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp unknown event for SuppFsm: %d\n", eventId); VOS_ASSERT( 0 ); return ANI_E_ILLEGAL_ARG; break; } checkTransition(fsm, arg); return ANI_OK; } int suppRsnAuthStartEventHandler(tSuppRsnFsm *fsm) { // Copy required info vos_mem_copy( &fsm->suppCtx->authMac, fsm->ctx->peer_mac_addr, 6); vos_mem_copy( &fsm->suppCtx->suppMac, fsm->ctx->self_mac_addr, 6); aniAsfPacketAppendBuffer( fsm->suppCtx->pmk, fsm->ctx->key_material, fsm->ctx->key_length); return ANI_OK; } /*********************** * The static functions ***********************/ static int gotoStateInit(tSuppRsnFsm *fsm) { fsm->currentState = INITIALIZE; fsm->authReq = eANI_BOOLEAN_FALSE; fsm->eapolAvail = eANI_BOOLEAN_FALSE; fsm->integFailed = eANI_BOOLEAN_FALSE; fsm->pmkAvail = eANI_BOOLEAN_FALSE; // Create two replay counter's..one for our requests, and another // for STA's requests. Initialize the first one randomly. aniSsmReplayCtrCreate(fsm->cryptHandle, &fsm->localReplayCtr, ANI_EAPOL_KEY_RSN_RSC_SIZE, 0); aniSsmReplayCtrCreate(fsm->cryptHandle, &fsm->peerReplayCtr, ANI_EAPOL_KEY_RSN_RSC_SIZE, 0); return ANI_OK; } static int gotoStateAuthentication(tSuppRsnFsm *fsm) { fsm->currentState = AUTHENTICATION; if( VOS_IS_STATUS_SUCCESS( vos_rand_get_bytes( fsm->cryptHandle, fsm->sNonce, ANI_EAPOL_KEY_RSN_NONCE_SIZE ) ) ) { zeroOutPtk(fsm); // TODO: Zero out all GTK's fsm->authReq = eANI_BOOLEAN_FALSE; /////getPmk(fsm->suppCtx); } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp fail to random number\n" ); return ANI_ERROR; } return ANI_OK; } static int gotoStateGotPmk(tSuppRsnFsm *fsm) { fsm->currentState = GOT_PMK; return ANI_OK; } static int gotoStateStaKeyStart(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data, v_BOOL_t retransmit) { int retVal; tAniEapolRsnKeyDesc txDesc; tAniEapolRsnKeyDesc *rxDesc; static v_U8_t btampRSNIE[] = {0x30, 0x14, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x04, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x04, 0x01, 0x00, 0x00, 0x0f, 0xac, 0x02, 0x00, 0x00 }; fsm->currentState = STA_KEY_START; // Create a new EAPOL frame if we don't have to retransmit // if (!retransmit) //{ rxDesc = data->keyDesc; if( NULL == rxDesc) { return ANI_E_NULL_VALUE; } aniAsfPacketEmptyExplicit( fsm->lastEapol, EAPOL_TX_HEADER_SIZE ); retVal = derivePtk(fsm, data); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp derivePtk failed with code %d!\n", retVal); return retVal; } vos_mem_zero( &txDesc, sizeof(txDesc) ); // The Key Information bits... if (fsm->suppCtx->pwCipherType == eCSR_ENCRYPT_TYPE_AES) { txDesc.info.keyDescVers = ANI_EAPOL_KEY_DESC_VERS_AES; } txDesc.info.unicastFlag = eANI_BOOLEAN_TRUE; txDesc.info.micFlag = eANI_BOOLEAN_TRUE; txDesc.keyLen = 0; //RSN_80211_KEY_LEN; // Send back the same replayCtr that the authenticator sent vos_mem_copy(txDesc.replayCounter, rxDesc->replayCounter, sizeof(txDesc.replayCounter)); vos_mem_copy(txDesc.keyNonce, fsm->sNonce, sizeof(txDesc.keyNonce)); txDesc.keyDataLen = sizeof(btampRSNIE);//aniAsfPacketGetBytes(fsm->suppCtx->ieBp, //&txDesc.keyData); txDesc.keyData = btampRSNIE; retVal = aniEapolWriteKey(fsm->cryptHandle, fsm->lastEapol, fsm->suppCtx->authMac, fsm->suppCtx->suppMac, ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW, &txDesc, fsm->suppCtx->ptk, CSR_AES_KEY_LEN); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp gotoStateStaKeyStart fail to write key %d\n", retVal); return retVal; } //} if( VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) ) { retVal = ANI_OK; } else { retVal = ANI_ERROR; } return retVal; } static int gotoStateStaKeySet(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data, v_BOOL_t retransmit) { int retVal=0; tAniEapolRsnKeyDesc txDesc; tAniEapolRsnKeyDesc *rxDesc = NULL; fsm->currentState = STA_KEY_SET; if (data == NULL) { // We don't need to do anything return ANI_OK; } // Create a new EAPOL frame if we don't have to retransmit if (!retransmit) { // First check the IE that the AP sent retVal = checkInfoElement(fsm, data); if (retVal != ANI_OK) { //FIX_RSN aagSetSuppFailureAndCleanup(fsm->suppCtx); // FSM does not exist after this... return retVal; } // Create a new EAPOL frame rxDesc = data->keyDesc; if( NULL == rxDesc ) return ANI_E_NULL_VALUE; aniAsfPacketEmptyExplicit(fsm->lastEapol, EAPOL_TX_HEADER_SIZE ); vos_mem_zero( &txDesc, sizeof(txDesc) ); // The Key Information bits... if (fsm->suppCtx->pwCipherType == eCSR_ENCRYPT_TYPE_AES) { txDesc.info.keyDescVers = ANI_EAPOL_KEY_DESC_VERS_AES; } txDesc.info.unicastFlag = eANI_BOOLEAN_TRUE; txDesc.info.micFlag = eANI_BOOLEAN_TRUE; txDesc.info.secureFlag = eANI_BOOLEAN_TRUE; txDesc.keyLen = 0; //RSN_80211_KEY_LEN; // Send back the same replayCtr that the authenticator sent vos_mem_copy(txDesc.replayCounter, rxDesc->replayCounter, sizeof(txDesc.replayCounter)); retVal = aniEapolWriteKey(fsm->cryptHandle, fsm->lastEapol, fsm->suppCtx->authMac, fsm->suppCtx->suppMac, ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW, &txDesc, fsm->suppCtx->ptk, CSR_AES_KEY_LEN); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) { return retVal; } } gReadToSetKey = BAP_SET_RSN_KEY; if( !VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) ) { /* making it global to access in bapTxRx file */ #if 0 tCsrRoamSetKey setKeyInfo; vos_mem_zero( &setKeyInfo, sizeof( tCsrRoamSetKey ) ); setKeyInfo.encType = eCSR_ENCRYPT_TYPE_AES; setKeyInfo.keyDirection = eSIR_TX_RX; vos_mem_copy( setKeyInfo.peerMac, fsm->suppCtx->authMac, sizeof( tAniMacAddr ) ); setKeyInfo.paeRole = 0; //this is a supplicant setKeyInfo.keyId = 0; //always setKeyInfo.keyLength = CSR_AES_KEY_LEN; vos_mem_copy( setKeyInfo.Key, (v_U8_t *)fsm->suppCtx->ptk + (2 * CSR_AES_KEY_LEN ), CSR_AES_KEY_LEN ); //fsm->suppCtx->ptk contains the 3 16-bytes keys. We need the last one. /* We will move the Set key to EAPOL Completion handler. We found a race condition betweem sending EAPOL frame and setting Key */ if( !VOS_IS_STATUS_SUCCESS( bapSetKey( fsm->ctx->pvosGCtx, &setKeyInfo ) ) ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, " Supp: gotoStateStaKeySet fail to set key\n" ); retVal = ANI_ERROR; } #endif gReadToSetKey = BAP_RESET_RSN_KEY; retVal = ANI_ERROR; } return retVal; } static int gotoStateGroupKeySet(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data) { int retVal; tAniEapolRsnKeyDesc txDesc; tAniEapolRsnKeyDesc *rxDesc; int groupKeyLen; fsm->currentState = GROUP_KEY_SET; do { rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc; if( NULL == rxDesc) { retVal = ANI_E_NULL_VALUE; break; } if (rxDesc->keyDataLen == 0 || rxDesc->keyData == NULL) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp: AP sent no group key in group EAPOL-Key message!\n" ); retVal = ANI_E_ILLEGAL_ARG; break; } if ( rxDesc->info.keyDescVers == ANI_EAPOL_KEY_DESC_VERS_AES ) { groupKeyLen = rxDesc->keyDataLen - ANI_SSM_AES_KEY_WRAP_BLOCK_SIZE; if( groupKeyLen <= 0 ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp: AP sent GTK too short\n" ); retVal = ANI_E_ILLEGAL_ARG; break; } } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp: AP sent unsupported keyDescVer %d!\n", rxDesc->info.keyDescVers ); retVal = ANI_E_ILLEGAL_ARG; break; } // Always create a new EAPOL frame aniAsfPacketEmptyExplicit( fsm->lastEapol, EAPOL_TX_HEADER_SIZE ); vos_mem_zero( &txDesc, sizeof(txDesc) ); // The Key Information bits... if (fsm->suppCtx->grpCipherType == eCSR_ENCRYPT_TYPE_AES) { txDesc.info.keyDescVers = ANI_EAPOL_KEY_DESC_VERS_AES; } txDesc.info.unicastFlag = eANI_BOOLEAN_FALSE; txDesc.info.keyId = rxDesc->info.keyId; txDesc.info.micFlag = eANI_BOOLEAN_TRUE; txDesc.info.secureFlag = eANI_BOOLEAN_TRUE; txDesc.keyLen = RSN_80211_KEY_LEN; // Send back the same replayCtr that the authenticator sent vos_mem_copy(txDesc.replayCounter, rxDesc->replayCounter, sizeof(txDesc.replayCounter)); retVal = aniEapolWriteKey(fsm->cryptHandle, fsm->lastEapol, fsm->suppCtx->authMac, fsm->suppCtx->suppMac, ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW, &txDesc, fsm->suppCtx->ptk, CSR_AES_KEY_LEN); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) break; if( !VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) ) { retVal = ANI_ERROR; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp could not send eapol. Disconnect\n" ); break; } //FIX_RSN there is no need to set GTK retVal = setGtk(fsm->suppCtx, rxDesc->keyRecvSeqCounter); // This is never retransmitted aniAsfPacketEmptyExplicit( fsm->lastEapol, EAPOL_TX_HEADER_SIZE ); checkTransition(fsm, NULL); // UCT rule }while( 0 ); return retVal; } static int gotoStateKeyUpdate(tSuppRsnFsm *fsm, tSirMicFailureInfo *micFailureInfo) { //we don't update keys bapSuppDisconnect( fsm->ctx ); return ANI_OK; } static int gotoStateRekeyMsg(tSuppRsnFsm *fsm, tSirMicFailureInfo *micFailureInfo) { //We don't support rekey, simply disconnect bapSuppDisconnect( fsm->ctx ); return ANI_OK; } static int zeroOutPtk(tSuppRsnFsm *fsm) { return ANI_OK; } static int derivePtk(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data) { v_U32_t prfLen; tAniEapolRsnKeyDesc *rxDesc; switch (fsm->suppCtx->pwCipherType) { case eCSR_ENCRYPT_TYPE_AES: prfLen = AAG_RSN_PTK_PRF_LEN_CCMP; fsm->suppCtx->pwKeyLen = AAG_RSN_KEY_MATERIAL_LEN_CCMP; break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Cannot generate PTK for BP for invalid algorithm %d\n", fsm->suppCtx->pwCipherType); return ANI_E_ILLEGAL_ARG; break; }; rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc; return aagPtkPrf(fsm->cryptHandle, fsm->suppCtx->ptk, prfLen, fsm->suppCtx->pmk, fsm->suppCtx->authMac, fsm->suppCtx->suppMac, rxDesc->keyNonce, fsm->sNonce); } static int checkMic(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data, v_BOOL_t pwKeyExchange) { int retVal; retVal = aniEapolKeyCheckMic(fsm->cryptHandle, data->eapolFrame, ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW, data->keyDesc, fsm->suppCtx->ptk, CSR_AES_KEY_LEN); return retVal; } static int checkInfoElement(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data) { tAniEapolRsnKeyDesc *desc; v_U8_t *ieApBytes; int ieApLen; desc = (tAniEapolRsnKeyDesc *) data->keyDesc; if( NULL == desc ) { return ANI_E_NULL_VALUE; } ieApLen = aniAsfPacketGetBytes(fsm->suppCtx->ieAp, &ieApBytes); if( ANI_IS_STATUS_SUCCESS( ieApLen ) ) { if ((desc->keyDataLen != ieApLen) || ( vos_mem_compare(desc->keyData, ieApBytes, ieApLen) )) { // TODO: Send a fault here VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp AP sent inconsistent RSN IE!\n" ); return ANI_E_FAILED; } } return ANI_OK; } static int checkPeerReplayCounter(tSuppRsnFsm *fsm, tAniEapolKeyAvailEventData *data, v_BOOL_t *retransmit, v_BOOL_t actualMicFlag, v_BOOL_t reTxMicFlag) { int retVal = ANI_OK; int cmp; tAniEapolRsnKeyDesc *rxDesc; rxDesc = data->keyDesc; if( NULL == rxDesc ) { return ANI_E_NULL_VALUE; } *retransmit = eANI_BOOLEAN_FALSE; cmp = aniSsmReplayCtrCmp(fsm->peerReplayCtr, rxDesc->replayCounter); // The AP should send us a replay counter greater than or equal to // the last one it sent /*Unless we are forgiving with this we will have interop issues with some vendros like CSR*/ if (cmp > 0) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "BP got old EAPOL replay counter from AP" ); retVal = ANI_E_REPLAY_CHECK_FAILED; } else if (cmp <= 0) { if ( actualMicFlag == reTxMicFlag ) { *retransmit = eANI_BOOLEAN_TRUE; } } return retVal; } static int checkTransition(tSuppRsnFsm *fsm, void *arg) { tAniEapolKeyAvailEventData *data; tAniEapolRsnKeyDesc *rxDesc; v_BOOL_t retransmit; int retVal; if (fsm->authReq) { gotoStateAuthentication(fsm); return ANI_OK; } switch (fsm->currentState) { case INITIALIZE: break; case AUTHENTICATION: gotoStateGotPmk(fsm); checkTransition(fsm, arg); break; case GOT_PMK: if (fsm->eapolAvail) { fsm->eapolAvail = eANI_BOOLEAN_FALSE; data = (tAniEapolKeyAvailEventData *) arg; rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc; if (rxDesc->info.ackFlag) { aniSsmReplayCtrUpdate(fsm->peerReplayCtr, rxDesc->replayCounter); // Going from one state to another cannot be a retransmit retVal = gotoStateStaKeyStart(fsm, data, eANI_BOOLEAN_FALSE); } } break; case STA_KEY_START: if (fsm->eapolAvail) { fsm->eapolAvail = eANI_BOOLEAN_FALSE; data = (tAniEapolKeyAvailEventData *) arg; rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc; if (rxDesc->info.ackFlag) { retVal = checkPeerReplayCounter( fsm, data, &retransmit, rxDesc->info.micFlag, 0); // MIC not set means check for re-Tx M1. if (retVal != ANI_OK) return ANI_OK; // Caller should not fail if (retransmit) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Resending EAPOL-Key Msg2 from " "supplicant to AP" ); retVal = gotoStateStaKeyStart(fsm, data, eANI_BOOLEAN_TRUE); } else { retVal = checkMic(fsm, data, rxDesc->info.unicastFlag); if (retVal != ANI_OK) { bapSuppDisconnect( fsm->ctx ); return retVal; } aniSsmReplayCtrUpdate(fsm->peerReplayCtr, rxDesc->replayCounter); gotoStateStaKeySet(fsm, data, eANI_BOOLEAN_FALSE); } } } break; case STA_KEY_SET: if (fsm->eapolAvail) { fsm->eapolAvail = eANI_BOOLEAN_FALSE; data = (tAniEapolKeyAvailEventData *) arg; rxDesc = (tAniEapolRsnKeyDesc *) data->keyDesc; retVal = checkPeerReplayCounter( fsm, data, &retransmit, rxDesc->info.micFlag, 1); // MIC set means check for re-Tx M3. if (retVal != ANI_OK) return ANI_OK; // Caller should not fail if (!retransmit) { retVal = checkMic(fsm, data, rxDesc->info.unicastFlag); if (retVal != ANI_OK) { bapSuppDisconnect( fsm->ctx ); return retVal; } aniSsmReplayCtrUpdate(fsm->peerReplayCtr, rxDesc->replayCounter); } if (rxDesc->info.unicastFlag) { /* * Handle pairwise key message...in this state * pairwise key messages can only be for retransmissions. */ if (retransmit) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Resending EAPOL-Key Msg4 from " "supplicant \n" ); retVal = gotoStateStaKeySet(fsm, data, eANI_BOOLEAN_TRUE); } } else { /* * Handle group key message...with group key messages, * the replay counter has to change on * retransmissions. */ if (!retransmit) { retVal = gotoStateGroupKeySet(fsm, data); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) { bapSuppDisconnect( fsm->ctx ); return retVal; } } } } else { if (fsm->integFailed) { gotoStateKeyUpdate(fsm, arg); } } break; case GROUP_KEY_SET: gotoStateStaKeySet(fsm, NULL, eANI_BOOLEAN_FALSE); break; case KEY_UPDATE: gotoStateRekeyMsg(fsm, arg); break; default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Illegal state for SuppRsnFsm: %d", fsm->currentState); VOS_ASSERT( 0 ); return ANI_E_FAILED; } return ANI_OK; } static int suppEapolKeyHandler( tSuppRsnFsm *fsm, tAniPacket *eapolFrame, tAniMacAddr suppMac) { int retVal; int descType; void *keyDesc; tAniEapolRsnKeyDesc *rsnDesc; tAniEapolKeyAvailEventData data; do { retVal = aniEapolParseKey(eapolFrame, &descType, &keyDesc); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) { return retVal; } if (descType == ANI_EAPOL_KEY_DESC_TYPE_RSN_NEW) { rsnDesc = (tAniEapolRsnKeyDesc *) keyDesc; /* * Pass on the event to the RSN FSM irrespective if it is * pairwise or not. */ data.keyDesc = keyDesc; data.eapolFrame = eapolFrame; retVal = suppRsnFsmProcessEvent(fsm, RSN_FSM_EAPOL_FRAME_AVAILABLE, &data); } else { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp: Got unexpected 802.1x RC4 Key message \n" ); retVal = ANI_E_FAILED; break; } }while( 0 ); aniEapolKeyFreeDesc(descType, keyDesc); return retVal; } // //This function alwasy assume the incoming vos_packet is 802_3 frame. static int suppRsnRxFrameHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket ) { int retVal = ANI_ERROR; tAniPacket *pAniPacket; tBtampContext *ctx; tSuppRsnFsm *fsm; /* Validate params */ if ((pvosGCtx == NULL) || (NULL == pPacket)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "param is NULL in %s", __func__); return retVal; } ctx = (tBtampContext *)VOS_GET_BAP_CB( pvosGCtx ); if (NULL == ctx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "ctx is NULL in %s", __func__); return retVal; } fsm = &ctx->uFsm.suppFsm; if (NULL == fsm) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "fsm is NULL in %s", __func__); return retVal; } do { //ToDO: We need to synchronize this. For now, use the simplest form, drop the packet comes later. if( fsm->fReceiving ) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, " ******suppRsnRxFrameHandler receive eapol packet while processing. Drop the new comer\n" ); break; } fsm->fReceiving = VOS_TRUE; retVal = bapRsnFormPktFromVosPkt( &pAniPacket, pPacket ); if( !ANI_IS_STATUS_SUCCESS( retVal ) ) break; //Now we can process the eapol frame //handler will free the pAniPacket bapRsnEapolHandler( fsm, pAniPacket, VOS_FALSE ); }while( 0 ); fsm->fReceiving = VOS_FALSE; vos_pkt_return_packet( pPacket ); return retVal; } static int suppRsnTxCompleteHandler( v_PVOID_t pvosGCtx, vos_pkt_t *pPacket, VOS_STATUS retStatus ) { tBtampContext *ctx = (tBtampContext *)VOS_GET_BAP_CB( pvosGCtx ); tAuthRsnFsm *fsm; vos_pkt_return_packet( pPacket ); if (pvosGCtx == NULL) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "param is NULL in %s", __func__); return ANI_ERROR; } if (NULL == ctx) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "ctx is NULL in %s", __func__); return ANI_ERROR; } fsm = &ctx->uFsm.authFsm; if (NULL == fsm) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "fsm is NULL in %s", __func__); return ANI_ERROR; } //Synchronization needed if(!VOS_IS_STATUS_SUCCESS( retStatus ) ) { //This is bad. VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp: TL Tx complete with error %d current state is %d\n", retStatus, fsm->currentState ); if( fsm->numTries <= suppConsts.maxTries ) { //retransmit fsm->numTries++; if( !VOS_IS_STATUS_SUCCESS( bapRsnSendEapolFrame( fsm->ctx->pvosGCtx, fsm->lastEapol ) ) ) { bapSuppDisconnect( fsm->ctx->pvosGCtx ); } } else { bapSuppDisconnect( fsm->ctx->pvosGCtx ); } } return ANI_OK; } /** * suppEapolHandler * * Handles an incoming EAPOL frame on the supplicant side. * * @param eapolFrame the packet containing the EAPOL frame, with the * head of the packet still at the start of the EAPOL frame * @param dstMac the dstMac pointing inside the frame * @param srcMac the srcMac pointing inside the frame * @param type the type pointing inside the frame at the type field * * @return ANI_OK if the operation succeeds */ void suppEapolHandler( tSuppRsnFsm *fsm, tAniPacket *eapolFrame, tAniMacAddr dstMac, tAniMacAddr srcMac, v_U8_t *type ) { switch (*type) { case ANI_EAPOL_TYPE_PACKET: // Ignore EAP becasue it is only WPA2-PSK break; case ANI_EAPOL_TYPE_KEY: suppEapolKeyHandler( fsm, eapolFrame, dstMac ); break; case ANI_EAPOL_TYPE_ASF_ALERT: default: VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "Supp: EAPOL type not implemented: %.2x\n", *type); break; } }
BrateloSlava/kernel_apq8064
drivers/staging/prima/CORE/BAP/src/bapRsn8021xSuppRsnFsm.c
C
gpl-2.0
34,570
<?php /** * BackPress Styles enqueue. * * These classes were refactored from the WordPress WP_Scripts and WordPress * script enqueue API. * * @package BackPress * @since r74 */ /** * BackPress Styles enqueue class. * * @package BackPress * @uses WP_Dependencies * @since r74 */ class WP_Styles extends WP_Dependencies { var $base_url; var $content_url; var $default_version; var $text_direction = 'ltr'; var $concat = ''; var $concat_version = ''; var $do_concat = false; var $print_html = ''; var $print_code = ''; var $default_dirs; function __construct() { /** * Fires when the WP_Styles instance is initialized. * * @since 2.6.0 * * @param WP_Styles &$this WP_Styles instance, passed by reference. */ do_action_ref_array( 'wp_default_styles', array(&$this) ); } function do_item( $handle ) { if ( !parent::do_item($handle) ) return false; $obj = $this->registered[$handle]; if ( null === $obj->ver ) $ver = ''; else $ver = $obj->ver ? $obj->ver : $this->default_version; if ( isset($this->args[$handle]) ) $ver = $ver ? $ver . '&amp;' . $this->args[$handle] : $this->args[$handle]; if ( $this->do_concat ) { if ( $this->in_default_dir($obj->src) && !isset($obj->extra['conditional']) && !isset($obj->extra['alt']) ) { $this->concat .= "$handle,"; $this->concat_version .= "$handle$ver"; $this->print_code .= $this->print_inline_style( $handle, false ); return true; } } if ( isset($obj->args) ) $media = esc_attr( $obj->args ); else $media = 'all'; $href = $this->_css_href( $obj->src, $ver, $handle ); if ( empty( $href ) ) { // Turns out there is nothing to print. return true; } $rel = isset($obj->extra['alt']) && $obj->extra['alt'] ? 'alternate stylesheet' : 'stylesheet'; $title = isset($obj->extra['title']) ? "title='" . esc_attr( $obj->extra['title'] ) . "'" : ''; /** * Filter the HTML link tag of an enqueued style. * * @since 2.6.0 * * @param string The link tag for the enqueued style. * @param string $handle The style's registered handle. */ $tag = apply_filters( 'style_loader_tag', "<link rel='$rel' id='$handle-css' $title href='$href' type='text/css' media='$media' />\n", $handle ); if ( 'rtl' === $this->text_direction && isset($obj->extra['rtl']) && $obj->extra['rtl'] ) { if ( is_bool( $obj->extra['rtl'] ) || 'replace' === $obj->extra['rtl'] ) { $suffix = isset( $obj->extra['suffix'] ) ? $obj->extra['suffix'] : ''; $rtl_href = str_replace( "{$suffix}.css", "-rtl{$suffix}.css", $this->_css_href( $obj->src , $ver, "$handle-rtl" )); } else { $rtl_href = $this->_css_href( $obj->extra['rtl'], $ver, "$handle-rtl" ); } /** * Filter the right-to-left (RTL) HTML link tag of an enqueued style. * * @since 2.6.0 * * @param string $rtl_style The right to left link tag for the enqueued style. * @param string $handle The style's registered handle. */ $rtl_tag = apply_filters( 'style_loader_tag', "<link rel='$rel' id='$handle-rtl-css' $title href='$rtl_href' type='text/css' media='$media' />\n", $handle ); if ( $obj->extra['rtl'] === 'replace' ) { $tag = $rtl_tag; } else { $tag .= $rtl_tag; } } if ( isset($obj->extra['conditional']) && $obj->extra['conditional'] ) { $tag = "<!--[if {$obj->extra['conditional']}]>\n" . $tag . "<![endif]-->\n"; } if ( $this->do_concat ) { $this->print_html .= $tag; if ( $inline_style = $this->print_inline_style( $handle, false ) ) $this->print_html .= sprintf( "<style type='text/css'>\n%s\n</style>\n", $inline_style ); } else { echo $tag; $this->print_inline_style( $handle ); } return true; } function add_inline_style( $handle, $code ) { if ( !$code ) return false; $after = $this->get_data( $handle, 'after' ); if ( !$after ) $after = array(); $after[] = $code; return $this->add_data( $handle, 'after', $after ); } function print_inline_style( $handle, $echo = true ) { $output = $this->get_data( $handle, 'after' ); if ( empty( $output ) ) return false; $output = implode( "\n", $output ); if ( !$echo ) return $output; echo "<style type='text/css'>\n"; echo "$output\n"; echo "</style>\n"; return true; } function all_deps( $handles, $recursion = false, $group = false ) { $r = parent::all_deps( $handles, $recursion ); if ( !$recursion ) { /** * Filter the array of enqueued styles before processing for output. * * @since 2.6.0 * * @param array $to_do The list of enqueued styles about to be processed. */ $this->to_do = apply_filters( 'print_styles_array', $this->to_do ); } return $r; } function _css_href( $src, $ver, $handle ) { if ( !is_bool($src) && !preg_match('|^(https?:)?//|', $src) && ! ( $this->content_url && 0 === strpos($src, $this->content_url) ) ) { $src = $this->base_url . $src; } if ( !empty($ver) ) $src = add_query_arg('ver', $ver, $src); /** * Filter an enqueued style's fully-qualified URL. * * @since 2.6.0 * * @param string $src The source URL of the enqueued style. * @param string $handle The style's registered handle. */ $src = apply_filters( 'style_loader_src', $src, $handle ); return esc_url( $src ); } function in_default_dir($src) { if ( ! $this->default_dirs ) return true; foreach ( (array) $this->default_dirs as $test ) { if ( 0 === strpos($src, $test) ) return true; } return false; } function do_footer_items() { // HTML 5 allows styles in the body, grab late enqueued items and output them in the footer. $this->do_items(false, 1); return $this->done; } function reset() { $this->do_concat = false; $this->concat = ''; $this->concat_version = ''; $this->print_html = ''; } }
kaluli/Wordpress-Site
original/wp-includes/class.wp-styles.php
PHP
gpl-2.0
5,853
/* * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/err.h> #include <linux/uuid.h> #include "ctree.h" #include "transaction.h" #include "disk-io.h" #include "print-tree.h" /* * Read a root item from the tree. In case we detect a root item smaller then * sizeof(root_item), we know it's an old version of the root structure and * initialize all new fields to zero. The same happens if we detect mismatching * generation numbers as then we know the root was once mounted with an older * kernel that was not aware of the root item structure change. */ static void btrfs_read_root_item(struct extent_buffer *eb, int slot, struct btrfs_root_item *item) { uuid_le uuid; int len; int need_reset = 0; len = btrfs_item_size_nr(eb, slot); read_extent_buffer(eb, item, btrfs_item_ptr_offset(eb, slot), min_t(int, len, (int)sizeof(*item))); if (len < sizeof(*item)) need_reset = 1; if (!need_reset && btrfs_root_generation(item) != btrfs_root_generation_v2(item)) { if (btrfs_root_generation_v2(item) != 0) { printk(KERN_WARNING "BTRFS: mismatching " "generation and generation_v2 " "found in root item. This root " "was probably mounted with an " "older kernel. Resetting all " "new fields.\n"); } need_reset = 1; } if (need_reset) { memset(&item->generation_v2, 0, sizeof(*item) - offsetof(struct btrfs_root_item, generation_v2)); uuid_le_gen(&uuid); memcpy(item->uuid, uuid.b, BTRFS_UUID_SIZE); } } /* * btrfs_find_root - lookup the root by the key. * root: the root of the root tree * search_key: the key to search * path: the path we search * root_item: the root item of the tree we look for * root_key: the reak key of the tree we look for * * If ->offset of 'seach_key' is -1ULL, it means we are not sure the offset * of the search key, just lookup the root with the highest offset for a * given objectid. * * If we find something return 0, otherwise > 0, < 0 on error. */ int btrfs_find_root(struct btrfs_root *root, struct btrfs_key *search_key, struct btrfs_path *path, struct btrfs_root_item *root_item, struct btrfs_key *root_key) { struct btrfs_key found_key; struct extent_buffer *l; int ret; int slot; ret = btrfs_search_slot(NULL, root, search_key, path, 0, 0); if (ret < 0) return ret; if (search_key->offset != -1ULL) { /* the search key is exact */ if (ret > 0) goto out; } else { BUG_ON(ret == 0); /* Logical error */ if (path->slots[0] == 0) goto out; path->slots[0]--; ret = 0; } l = path->nodes[0]; slot = path->slots[0]; btrfs_item_key_to_cpu(l, &found_key, slot); if (found_key.objectid != search_key->objectid || found_key.type != BTRFS_ROOT_ITEM_KEY) { ret = 1; goto out; } if (root_item) btrfs_read_root_item(l, slot, root_item); if (root_key) memcpy(root_key, &found_key, sizeof(found_key)); out: btrfs_release_path(path); return ret; } void btrfs_set_root_node(struct btrfs_root_item *item, struct extent_buffer *node) { btrfs_set_root_bytenr(item, node->start); btrfs_set_root_level(item, btrfs_header_level(node)); btrfs_set_root_generation(item, btrfs_header_generation(node)); } /* * copy the data in 'item' into the btree */ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_root_item *item) { struct btrfs_path *path; struct extent_buffer *l; int ret; int slot; unsigned long ptr; int old_len; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, root, key, path, 0, 1); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); goto out; } if (ret != 0) { btrfs_print_leaf(root, path->nodes[0]); btrfs_crit(root->fs_info, "unable to update root key %llu %u %llu", key->objectid, key->type, key->offset); BUG_ON(1); } l = path->nodes[0]; slot = path->slots[0]; ptr = btrfs_item_ptr_offset(l, slot); old_len = btrfs_item_size_nr(l, slot); /* * If this is the first time we update the root item which originated * from an older kernel, we need to enlarge the item size to make room * for the added fields. */ if (old_len < sizeof(*item)) { btrfs_release_path(path); ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); goto out; } ret = btrfs_del_item(trans, root, path); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); goto out; } btrfs_release_path(path); ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); if (ret < 0) { btrfs_abort_transaction(trans, root, ret); goto out; } l = path->nodes[0]; slot = path->slots[0]; ptr = btrfs_item_ptr_offset(l, slot); } /* * Update generation_v2 so at the next mount we know the new root * fields are valid. */ btrfs_set_root_generation_v2(item, btrfs_root_generation(item)); write_extent_buffer(l, item, ptr, sizeof(*item)); btrfs_mark_buffer_dirty(path->nodes[0]); out: btrfs_free_path(path); return ret; } int btrfs_insert_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key, struct btrfs_root_item *item) { /* * Make sure generation v1 and v2 match. See update_root for details. */ btrfs_set_root_generation_v2(item, btrfs_root_generation(item)); return btrfs_insert_item(trans, root, key, item, sizeof(*item)); } int btrfs_find_orphan_roots(struct btrfs_root *tree_root) { struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_key key; struct btrfs_key root_key; struct btrfs_root *root; int err = 0; int ret; bool can_recover = true; if (tree_root->fs_info->sb->s_flags & MS_RDONLY) can_recover = false; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = BTRFS_ORPHAN_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = 0; root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.offset = (u64)-1; while (1) { ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); if (ret < 0) { err = ret; break; } leaf = path->nodes[0]; if (path->slots[0] >= btrfs_header_nritems(leaf)) { ret = btrfs_next_leaf(tree_root, path); if (ret < 0) err = ret; if (ret != 0) break; leaf = path->nodes[0]; } btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); btrfs_release_path(path); if (key.objectid != BTRFS_ORPHAN_OBJECTID || key.type != BTRFS_ORPHAN_ITEM_KEY) break; root_key.objectid = key.offset; key.offset++; root = btrfs_read_fs_root(tree_root, &root_key); err = PTR_ERR_OR_ZERO(root); if (err && err != -ENOENT) { break; } else if (err == -ENOENT) { struct btrfs_trans_handle *trans; btrfs_release_path(path); trans = btrfs_join_transaction(tree_root); if (IS_ERR(trans)) { err = PTR_ERR(trans); btrfs_error(tree_root->fs_info, err, "Failed to start trans to delete " "orphan item"); break; } err = btrfs_del_orphan_item(trans, tree_root, root_key.objectid); btrfs_end_transaction(trans, tree_root); if (err) { btrfs_error(tree_root->fs_info, err, "Failed to delete root orphan " "item"); break; } continue; } err = btrfs_init_fs_root(root); if (err) { btrfs_free_fs_root(root); break; } set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); err = btrfs_insert_fs_root(root->fs_info, root); if (err) { BUG_ON(err == -EEXIST); btrfs_free_fs_root(root); break; } if (btrfs_root_refs(&root->root_item) == 0) btrfs_add_dead_root(root); } btrfs_free_path(path); return err; } /* drop the root item for 'key' from 'root' */ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_key *key) { struct btrfs_path *path; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = btrfs_search_slot(trans, root, key, path, -1, 1); if (ret < 0) goto out; BUG_ON(ret != 0); ret = btrfs_del_item(trans, root, path); out: btrfs_free_path(path); return ret; } int btrfs_del_root_ref(struct btrfs_trans_handle *trans, struct btrfs_root *tree_root, u64 root_id, u64 ref_id, u64 dirid, u64 *sequence, const char *name, int name_len) { struct btrfs_path *path; struct btrfs_root_ref *ref; struct extent_buffer *leaf; struct btrfs_key key; unsigned long ptr; int err = 0; int ret; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = root_id; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = ref_id; again: ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1); BUG_ON(ret < 0); if (ret == 0) { leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); WARN_ON(btrfs_root_ref_dirid(leaf, ref) != dirid); WARN_ON(btrfs_root_ref_name_len(leaf, ref) != name_len); ptr = (unsigned long)(ref + 1); WARN_ON(memcmp_extent_buffer(leaf, name, ptr, name_len)); *sequence = btrfs_root_ref_sequence(leaf, ref); ret = btrfs_del_item(trans, tree_root, path); if (ret) { err = ret; goto out; } } else err = -ENOENT; if (key.type == BTRFS_ROOT_BACKREF_KEY) { btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; goto again; } out: btrfs_free_path(path); return err; } /* * add a btrfs_root_ref item. type is either BTRFS_ROOT_REF_KEY * or BTRFS_ROOT_BACKREF_KEY. * * The dirid, sequence, name and name_len refer to the directory entry * that is referencing the root. * * For a forward ref, the root_id is the id of the tree referencing * the root and ref_id is the id of the subvol or snapshot. * * For a back ref the root_id is the id of the subvol or snapshot and * ref_id is the id of the tree referencing it. * * Will return 0, -ENOMEM, or anything from the CoW path */ int btrfs_add_root_ref(struct btrfs_trans_handle *trans, struct btrfs_root *tree_root, u64 root_id, u64 ref_id, u64 dirid, u64 sequence, const char *name, int name_len) { struct btrfs_key key; int ret; struct btrfs_path *path; struct btrfs_root_ref *ref; struct extent_buffer *leaf; unsigned long ptr; path = btrfs_alloc_path(); if (!path) return -ENOMEM; key.objectid = root_id; key.type = BTRFS_ROOT_BACKREF_KEY; key.offset = ref_id; again: ret = btrfs_insert_empty_item(trans, tree_root, path, &key, sizeof(*ref) + name_len); if (ret) { btrfs_abort_transaction(trans, tree_root, ret); btrfs_free_path(path); return ret; } leaf = path->nodes[0]; ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); btrfs_set_root_ref_dirid(leaf, ref, dirid); btrfs_set_root_ref_sequence(leaf, ref, sequence); btrfs_set_root_ref_name_len(leaf, ref, name_len); ptr = (unsigned long)(ref + 1); write_extent_buffer(leaf, name, ptr, name_len); btrfs_mark_buffer_dirty(leaf); if (key.type == BTRFS_ROOT_BACKREF_KEY) { btrfs_release_path(path); key.objectid = ref_id; key.type = BTRFS_ROOT_REF_KEY; key.offset = root_id; goto again; } btrfs_free_path(path); return 0; } /* * Old btrfs forgets to init root_item->flags and root_item->byte_limit * for subvolumes. To work around this problem, we steal a bit from * root_item->inode_item->flags, and use it to indicate if those fields * have been properly initialized. */ void btrfs_check_and_init_root_item(struct btrfs_root_item *root_item) { u64 inode_flags = btrfs_stack_inode_flags(&root_item->inode); if (!(inode_flags & BTRFS_INODE_ROOT_ITEM_INIT)) { inode_flags |= BTRFS_INODE_ROOT_ITEM_INIT; btrfs_set_stack_inode_flags(&root_item->inode, inode_flags); btrfs_set_root_flags(root_item, 0); btrfs_set_root_limit(root_item, 0); } } void btrfs_update_root_times(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_root_item *item = &root->root_item; struct timespec ct = CURRENT_TIME; spin_lock(&root->root_item_lock); btrfs_set_root_ctransid(item, trans->transid); btrfs_set_stack_timespec_sec(&item->ctime, ct.tv_sec); btrfs_set_stack_timespec_nsec(&item->ctime, ct.tv_nsec); spin_unlock(&root->root_item_lock); }
radiohap/prd
fs/btrfs/root-tree.c
C
gpl-2.0
12,885
/* * Copyright IBM Corp. 2011 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> */ #include <linux/module.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <asm/pgtable.h> static void change_page_attr(unsigned long addr, int numpages, pte_t (*set) (pte_t)) { pte_t *ptep, pte; pmd_t *pmdp; pud_t *pudp; pgd_t *pgdp; int i; for (i = 0; i < numpages; i++) { pgdp = pgd_offset(&init_mm, addr); pudp = pud_offset(pgdp, addr); pmdp = pmd_offset(pudp, addr); if (pmd_huge(*pmdp)) { WARN_ON_ONCE(1); continue; } ptep = pte_offset_kernel(pmdp, addr); pte = *ptep; pte = set(pte); __ptep_ipte(addr, ptep); *ptep = pte; addr += PAGE_SIZE; } } int set_memory_ro(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_wrprotect); return 0; } EXPORT_SYMBOL_GPL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { change_page_attr(addr, numpages, pte_mkwrite); return 0; } EXPORT_SYMBOL_GPL(set_memory_rw); /* not possible */ int set_memory_nx(unsigned long addr, int numpages) { return 0; } EXPORT_SYMBOL_GPL(set_memory_nx); int set_memory_x(unsigned long addr, int numpages) { return 0; }
EPDCenter/android_kernel_woxter_nimbus_98q
arch/s390/mm/pageattr.c
C
gpl-2.0
1,184
/* * linux/fs/fat/misc.c * * Written 1992,1993 by Werner Almesberger * 22/11/2000 - Fixed fat_date_unix2dos for dates earlier than 01/01/1980 * and date_dos2unix for date==0 by Igor Zhbanov(bsg@uniyar.ac.ru) */ #include <linux/module.h> #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/time.h> #include "fat.h" /* * fat_fs_error reports a file system problem that might indicate fa data * corruption/inconsistency. Depending on 'errors' mount option the * panic() is called, or error message is printed FAT and nothing is done, * or filesystem is remounted read-only (default behavior). * In case the file system is remounted read-only, it can be made writable * again by remounting it. */ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...) { struct fat_mount_options *opts = &MSDOS_SB(sb)->options; va_list args; struct va_format vaf; if (report) { va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk(KERN_ERR "FAT-fs (%s): error, %pV\n", sb->s_id, &vaf); va_end(args); } if (opts->errors == FAT_ERRORS_PANIC) panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id); else if (opts->errors == FAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) { sb->s_flags |= MS_RDONLY; printk(KERN_ERR "FAT-fs (%s): Filesystem has been " "set read-only\n", sb->s_id); } } EXPORT_SYMBOL_GPL(__fat_fs_error); /** * fat_msg() - print preformated FAT specific messages. Every thing what is * not fat_fs_error() should be fat_msg(). */ void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; printk("%sFAT-fs (%s): %pV\n", level, sb->s_id, &vaf); va_end(args); } /* Flushes the number of free clusters on FAT32 */ /* XXX: Need to write one per FSINFO block. Currently only writes 1 */ int fat_clusters_flush(struct super_block *sb) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh; struct fat_boot_fsinfo *fsinfo; if (sbi->fat_bits != 32) return 0; bh = sb_bread(sb, sbi->fsinfo_sector); if (bh == NULL) { fat_msg(sb, KERN_ERR, "bread failed in fat_clusters_flush"); return -EIO; } fsinfo = (struct fat_boot_fsinfo *)bh->b_data; /* Sanity check */ if (!IS_FSINFO(fsinfo)) { fat_msg(sb, KERN_ERR, "Invalid FSINFO signature: " "0x%08x, 0x%08x (sector = %lu)", le32_to_cpu(fsinfo->signature1), le32_to_cpu(fsinfo->signature2), sbi->fsinfo_sector); } else { if (sbi->free_clusters != -1) fsinfo->free_clusters = cpu_to_le32(sbi->free_clusters); if (sbi->prev_free != -1) fsinfo->next_cluster = cpu_to_le32(sbi->prev_free); mark_buffer_dirty(bh); } brelse(bh); return 0; } /* * fat_chain_add() adds a new cluster to the chain of clusters represented * by inode. */ int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); int ret, new_fclus, last; /* * We must locate the last cluster of the file to add this new * one (new_dclus) to the end of the link list (the FAT). */ last = new_fclus = 0; if (MSDOS_I(inode)->i_start) { int fclus, dclus; ret = fat_get_cluster(inode, FAT_ENT_EOF, &fclus, &dclus); if (ret < 0) return ret; new_fclus = fclus + 1; last = dclus; } /* add new one to the last of the cluster chain */ if (last) { struct fat_entry fatent; fatent_init(&fatent); ret = fat_ent_read(inode, &fatent, last); if (ret >= 0) { int wait = inode_needs_sync(inode); ret = fat_ent_write(inode, &fatent, new_dclus, wait); fatent_brelse(&fatent); } if (ret < 0) return ret; // fat_cache_add(inode, new_fclus, new_dclus); } else { MSDOS_I(inode)->i_start = new_dclus; MSDOS_I(inode)->i_logstart = new_dclus; /* * Since generic_write_sync() synchronizes regular files later, * we sync here only directories. */ if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) { ret = fat_sync_inode(inode); if (ret) return ret; } else mark_inode_dirty(inode); } if (new_fclus != (inode->i_blocks >> (sbi->cluster_bits - 9))) { fat_fs_error(sb, "clusters badly computed (%d != %llu)", new_fclus, (llu)(inode->i_blocks >> (sbi->cluster_bits - 9))); fat_cache_inval_inode(inode); } inode->i_blocks += nr_cluster << (sbi->cluster_bits - 9); return 0; } extern struct timezone sys_tz; /* * The epoch of FAT timestamp is 1980. * : bits : value * date: 0 - 4: day (1 - 31) * date: 5 - 8: month (1 - 12) * date: 9 - 15: year (0 - 127) from 1980 * time: 0 - 4: sec (0 - 29) 2sec counts * time: 5 - 10: min (0 - 59) * time: 11 - 15: hour (0 - 23) */ #define SECS_PER_MIN 60 #define SECS_PER_HOUR (60 * 60) #define SECS_PER_DAY (SECS_PER_HOUR * 24) /* days between 1.1.70 and 1.1.80 (2 leap days) */ #define DAYS_DELTA (365 * 10 + 2) /* 120 (2100 - 1980) isn't leap year */ #define YEAR_2100 120 #define IS_LEAP_YEAR(y) (!((y) & 3) && (y) != YEAR_2100) /* Linear day numbers of the respective 1sts in non-leap years. */ static time_t days_in_year[] = { /* Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec */ 0, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0, }; /* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts, __le16 __time, __le16 __date, u8 time_cs) { u16 time = le16_to_cpu(__time), date = le16_to_cpu(__date); time_t second, day, leap_day, month, year; year = date >> 9; month = max(1, (date >> 5) & 0xf); day = max(1, date & 0x1f) - 1; leap_day = (year + 3) / 4; if (year > YEAR_2100) /* 2100 isn't leap year */ leap_day--; if (IS_LEAP_YEAR(year) && month > 2) leap_day++; second = (time & 0x1f) << 1; second += ((time >> 5) & 0x3f) * SECS_PER_MIN; second += (time >> 11) * SECS_PER_HOUR; second += (year * 365 + leap_day + days_in_year[month] + day + DAYS_DELTA) * SECS_PER_DAY; if (!sbi->options.tz_utc) second += sys_tz.tz_minuteswest * SECS_PER_MIN; if (time_cs) { ts->tv_sec = second + (time_cs / 100); ts->tv_nsec = (time_cs % 100) * 10000000; } else { ts->tv_sec = second; ts->tv_nsec = 0; } } /* Convert linear UNIX date to a FAT time/date pair. */ void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts, __le16 *time, __le16 *date, u8 *time_cs) { struct tm tm; time_to_tm(ts->tv_sec, sbi->options.tz_utc ? 0 : -sys_tz.tz_minuteswest * 60, &tm); /* FAT can only support year between 1980 to 2107 */ if (tm.tm_year < 1980 - 1900) { *time = 0; *date = cpu_to_le16((0 << 9) | (1 << 5) | 1); if (time_cs) *time_cs = 0; return; } if (tm.tm_year > 2107 - 1900) { *time = cpu_to_le16((23 << 11) | (59 << 5) | 29); *date = cpu_to_le16((127 << 9) | (12 << 5) | 31); if (time_cs) *time_cs = 199; return; } /* from 1900 -> from 1980 */ tm.tm_year -= 80; /* 0~11 -> 1~12 */ tm.tm_mon++; /* 0~59 -> 0~29(2sec counts) */ tm.tm_sec >>= 1; *time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec); *date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday); if (time_cs) *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000; } EXPORT_SYMBOL_GPL(fat_time_unix2fat); int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs) { int i, err = 0; for (i = 0; i < nr_bhs; i++) write_dirty_buffer(bhs[i], WRITE); for (i = 0; i < nr_bhs; i++) { wait_on_buffer(bhs[i]); if (!err && !buffer_uptodate(bhs[i])) err = -EIO; } return err; }
anikolop/gt-p51xx_stock_kernel
fs/fat/misc.c
C
gpl-2.0
7,646
/* pinnacle-color.h - Keytable for pinnacle_color Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> static struct rc_map_table pinnacle_color[] = { { 0x59, KEY_MUTE }, { 0x4a, KEY_POWER }, { 0x18, KEY_TEXT }, { 0x26, KEY_TV }, { 0x3d, KEY_PRINT }, { 0x48, KEY_RED }, { 0x04, KEY_GREEN }, { 0x11, KEY_YELLOW }, { 0x00, KEY_BLUE }, { 0x2d, KEY_VOLUMEUP }, { 0x1e, KEY_VOLUMEDOWN }, { 0x49, KEY_MENU }, { 0x16, KEY_CHANNELUP }, { 0x17, KEY_CHANNELDOWN }, { 0x20, KEY_UP }, { 0x21, KEY_DOWN }, { 0x22, KEY_LEFT }, { 0x23, KEY_RIGHT }, { 0x0d, KEY_SELECT }, { 0x08, KEY_BACK }, { 0x07, KEY_REFRESH }, { 0x2f, KEY_ZOOM }, { 0x29, KEY_RECORD }, { 0x4b, KEY_PAUSE }, { 0x4d, KEY_REWIND }, { 0x2e, KEY_PLAY }, { 0x4e, KEY_FORWARD }, { 0x53, KEY_PREVIOUS }, { 0x4c, KEY_STOP }, { 0x54, KEY_NEXT }, { 0x69, KEY_0 }, { 0x6a, KEY_1 }, { 0x6b, KEY_2 }, { 0x6c, KEY_3 }, { 0x6d, KEY_4 }, { 0x6e, KEY_5 }, { 0x6f, KEY_6 }, { 0x70, KEY_7 }, { 0x71, KEY_8 }, { 0x72, KEY_9 }, { 0x74, KEY_CHANNEL }, { 0x0a, KEY_BACKSPACE }, }; static struct rc_map_list pinnacle_color_map = { .map = { .scan = pinnacle_color, .size = ARRAY_SIZE(pinnacle_color), .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_PINNACLE_COLOR, } }; static int __init init_rc_map_pinnacle_color(void) { return rc_map_register(&pinnacle_color_map); } static void __exit exit_rc_map_pinnacle_color(void) { rc_map_unregister(&pinnacle_color_map); } module_init(init_rc_map_pinnacle_color) module_exit(exit_rc_map_pinnacle_color) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
halaszk/SM-V700
drivers/media/rc/keymaps/rc-pinnacle-color.c
C
gpl-2.0
2,017
<?php /** * The template for displaying posts in the Chat post format * * @package WordPress * @subpackage Twenty_Thirteen * @since Twenty Thirteen 1.0 */ ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <header class="entry-header"> <?php if ( is_single() ) : ?> <h1 class="entry-title"><?php the_title(); ?></h1> <?php else : ?> <h1 class="entry-title"> <a href="<?php the_permalink(); ?>" rel="bookmark"><?php the_title(); ?></a> </h1> <?php endif; // is_single() ?> </header><!-- .entry-header --> <div class="entry-content"> <?php the_content(); ?> <?php wp_link_pages( array( 'before' => '<div class="page-links"><span class="page-links-title">' . __( 'Pages:', 'twentythirteen' ) . '</span>', 'after' => '</div>', 'link_before' => '<span>', 'link_after' => '</span>' ) ); ?> </div><!-- .entry-content --> <footer class="entry-meta"> <?php twentythirteen_entry_meta(); ?> <?php edit_post_link( __( 'Edit', 'twentythirteen' ), '<span class="edit-link">', '</span>' ); ?> </footer><!-- .entry-meta --> </article><!-- #post -->
Mashpy/academica-pro-demo
wp-content/themes/twentythirteen/content-chat.php
PHP
gpl-2.0
1,085
/* * The Serio abstraction module * * Copyright (c) 1999-2004 Vojtech Pavlik * Copyright (c) 2004 Dmitry Torokhov * Copyright (c) 2003 Daniele Bellucci */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/stddef.h> #include <linux/module.h> #include <linux/serio.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/mutex.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("Serio abstraction core"); MODULE_LICENSE("GPL"); /* * serio_mutex protects entire serio subsystem and is taken every time * serio port or driver registered or unregistered. */ static DEFINE_MUTEX(serio_mutex); static LIST_HEAD(serio_list); static struct bus_type serio_bus; static void serio_add_port(struct serio *serio); static int serio_reconnect_port(struct serio *serio); static void serio_disconnect_port(struct serio *serio); static void serio_reconnect_subtree(struct serio *serio); static void serio_attach_driver(struct serio_driver *drv); static int serio_connect_driver(struct serio *serio, struct serio_driver *drv) { int retval; mutex_lock(&serio->drv_mutex); retval = drv->connect(serio, drv); mutex_unlock(&serio->drv_mutex); return retval; } static int serio_reconnect_driver(struct serio *serio) { int retval = -1; mutex_lock(&serio->drv_mutex); if (serio->drv && serio->drv->reconnect) retval = serio->drv->reconnect(serio); mutex_unlock(&serio->drv_mutex); return retval; } static void serio_disconnect_driver(struct serio *serio) { mutex_lock(&serio->drv_mutex); if (serio->drv) serio->drv->disconnect(serio); mutex_unlock(&serio->drv_mutex); } static int serio_match_port(const struct serio_device_id *ids, struct serio *serio) { while (ids->type || ids->proto) { if ((ids->type == SERIO_ANY || ids->type == serio->id.type) && (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) && (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) && (ids->id == SERIO_ANY || ids->id == serio->id.id)) return 1; ids++; } return 0; } /* * Basic serio -> driver core mappings */ static int serio_bind_driver(struct serio *serio, struct serio_driver *drv) { int error; if (serio_match_port(drv->id_table, serio)) { serio->dev.driver = &drv->driver; if (serio_connect_driver(serio, drv)) { serio->dev.driver = NULL; return -ENODEV; } error = device_bind_driver(&serio->dev); if (error) { dev_warn(&serio->dev, "device_bind_driver() failed for %s (%s) and %s, error: %d\n", serio->phys, serio->name, drv->description, error); serio_disconnect_driver(serio); serio->dev.driver = NULL; return error; } } return 0; } static void serio_find_driver(struct serio *serio) { int error; error = device_attach(&serio->dev); if (error < 0) dev_warn(&serio->dev, "device_attach() failed for %s (%s), error: %d\n", serio->phys, serio->name, error); } /* * Serio event processing. */ enum serio_event_type { SERIO_RESCAN_PORT, SERIO_RECONNECT_PORT, SERIO_RECONNECT_SUBTREE, SERIO_REGISTER_PORT, SERIO_ATTACH_DRIVER, }; struct serio_event { enum serio_event_type type; void *object; struct module *owner; struct list_head node; }; static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */ static LIST_HEAD(serio_event_list); static struct serio_event *serio_get_event(void) { struct serio_event *event = NULL; unsigned long flags; spin_lock_irqsave(&serio_event_lock, flags); if (!list_empty(&serio_event_list)) { event = list_first_entry(&serio_event_list, struct serio_event, node); list_del_init(&event->node); } spin_unlock_irqrestore(&serio_event_lock, flags); return event; } static void serio_free_event(struct serio_event *event) { module_put(event->owner); kfree(event); } static void serio_remove_duplicate_events(void *object, enum serio_event_type type) { struct serio_event *e, *next; unsigned long flags; spin_lock_irqsave(&serio_event_lock, flags); list_for_each_entry_safe(e, next, &serio_event_list, node) { if (object == e->object) { /* * If this event is of different type we should not * look further - we only suppress duplicate events * that were sent back-to-back. */ if (type != e->type) break; list_del_init(&e->node); serio_free_event(e); } } spin_unlock_irqrestore(&serio_event_lock, flags); } static void serio_handle_event(struct work_struct *work) { struct serio_event *event; mutex_lock(&serio_mutex); while ((event = serio_get_event())) { switch (event->type) { case SERIO_REGISTER_PORT: serio_add_port(event->object); break; case SERIO_RECONNECT_PORT: serio_reconnect_port(event->object); break; case SERIO_RESCAN_PORT: serio_disconnect_port(event->object); serio_find_driver(event->object); break; case SERIO_RECONNECT_SUBTREE: serio_reconnect_subtree(event->object); break; case SERIO_ATTACH_DRIVER: serio_attach_driver(event->object); break; } serio_remove_duplicate_events(event->object, event->type); serio_free_event(event); } mutex_unlock(&serio_mutex); } static DECLARE_WORK(serio_event_work, serio_handle_event); static int serio_queue_event(void *object, struct module *owner, enum serio_event_type event_type) { unsigned long flags; struct serio_event *event; int retval = 0; spin_lock_irqsave(&serio_event_lock, flags); /* * Scan event list for the other events for the same serio port, * starting with the most recent one. If event is the same we * do not need add new one. If event is of different type we * need to add this event and should not look further because * we need to preseve sequence of distinct events. */ list_for_each_entry_reverse(event, &serio_event_list, node) { if (event->object == object) { if (event->type == event_type) goto out; break; } } event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC); if (!event) { pr_err("Not enough memory to queue event %d\n", event_type); retval = -ENOMEM; goto out; } if (!try_module_get(owner)) { pr_warning("Can't get module reference, dropping event %d\n", event_type); kfree(event); retval = -EINVAL; goto out; } event->type = event_type; event->object = object; event->owner = owner; list_add_tail(&event->node, &serio_event_list); queue_work(system_long_wq, &serio_event_work); out: spin_unlock_irqrestore(&serio_event_lock, flags); return retval; } /* * Remove all events that have been submitted for a given * object, be it serio port or driver. */ static void serio_remove_pending_events(void *object) { struct serio_event *event, *next; unsigned long flags; spin_lock_irqsave(&serio_event_lock, flags); list_for_each_entry_safe(event, next, &serio_event_list, node) { if (event->object == object) { list_del_init(&event->node); serio_free_event(event); } } spin_unlock_irqrestore(&serio_event_lock, flags); } /* * Locate child serio port (if any) that has not been fully registered yet. * * Children are registered by driver's connect() handler so there can't be a * grandchild pending registration together with a child. */ static struct serio *serio_get_pending_child(struct serio *parent) { struct serio_event *event; struct serio *serio, *child = NULL; unsigned long flags; spin_lock_irqsave(&serio_event_lock, flags); list_for_each_entry(event, &serio_event_list, node) { if (event->type == SERIO_REGISTER_PORT) { serio = event->object; if (serio->parent == parent) { child = serio; break; } } } spin_unlock_irqrestore(&serio_event_lock, flags); return child; } /* * Serio port operations */ static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "%s\n", serio->name); } static ssize_t serio_show_modalias(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n", serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); } static ssize_t serio_show_id_type(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "%02x\n", serio->id.type); } static ssize_t serio_show_id_proto(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "%02x\n", serio->id.proto); } static ssize_t serio_show_id_id(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "%02x\n", serio->id.id); } static ssize_t serio_show_id_extra(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "%02x\n", serio->id.extra); } static DEVICE_ATTR(type, S_IRUGO, serio_show_id_type, NULL); static DEVICE_ATTR(proto, S_IRUGO, serio_show_id_proto, NULL); static DEVICE_ATTR(id, S_IRUGO, serio_show_id_id, NULL); static DEVICE_ATTR(extra, S_IRUGO, serio_show_id_extra, NULL); static struct attribute *serio_device_id_attrs[] = { &dev_attr_type.attr, &dev_attr_proto.attr, &dev_attr_id.attr, &dev_attr_extra.attr, NULL }; static struct attribute_group serio_id_attr_group = { .name = "id", .attrs = serio_device_id_attrs, }; static const struct attribute_group *serio_device_attr_groups[] = { &serio_id_attr_group, NULL }; static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct serio *serio = to_serio_port(dev); struct device_driver *drv; int error; error = mutex_lock_interruptible(&serio_mutex); if (error) return error; if (!strncmp(buf, "none", count)) { serio_disconnect_port(serio); } else if (!strncmp(buf, "reconnect", count)) { serio_reconnect_subtree(serio); } else if (!strncmp(buf, "rescan", count)) { serio_disconnect_port(serio); serio_find_driver(serio); serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { serio_disconnect_port(serio); error = serio_bind_driver(serio, to_serio_driver(drv)); serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT); } else { error = -EINVAL; } mutex_unlock(&serio_mutex); return error ? error : count; } static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct serio *serio = to_serio_port(dev); return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto"); } static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct serio *serio = to_serio_port(dev); int retval; retval = count; if (!strncmp(buf, "manual", count)) { serio->manual_bind = true; } else if (!strncmp(buf, "auto", count)) { serio->manual_bind = false; } else { retval = -EINVAL; } return retval; } static struct device_attribute serio_device_attrs[] = { __ATTR(description, S_IRUGO, serio_show_description, NULL), __ATTR(modalias, S_IRUGO, serio_show_modalias, NULL), __ATTR(drvctl, S_IWUSR, NULL, serio_rebind_driver), __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode), __ATTR_NULL }; static void serio_release_port(struct device *dev) { struct serio *serio = to_serio_port(dev); kfree(serio); module_put(THIS_MODULE); } /* * Prepare serio port for registration. */ static void serio_init_port(struct serio *serio) { static atomic_t serio_no = ATOMIC_INIT(0); __module_get(THIS_MODULE); INIT_LIST_HEAD(&serio->node); INIT_LIST_HEAD(&serio->child_node); INIT_LIST_HEAD(&serio->children); spin_lock_init(&serio->lock); mutex_init(&serio->drv_mutex); device_initialize(&serio->dev); dev_set_name(&serio->dev, "serio%ld", (long)atomic_inc_return(&serio_no) - 1); serio->dev.bus = &serio_bus; serio->dev.release = serio_release_port; serio->dev.groups = serio_device_attr_groups; if (serio->parent) { serio->dev.parent = &serio->parent->dev; serio->depth = serio->parent->depth + 1; } else serio->depth = 0; lockdep_set_subclass(&serio->lock, serio->depth); } /* * Complete serio port registration. * Driver core will attempt to find appropriate driver for the port. */ static void serio_add_port(struct serio *serio) { struct serio *parent = serio->parent; int error; if (parent) { serio_pause_rx(parent); list_add_tail(&serio->child_node, &parent->children); serio_continue_rx(parent); } list_add_tail(&serio->node, &serio_list); if (serio->start) serio->start(serio); error = device_add(&serio->dev); if (error) dev_err(&serio->dev, "device_add() failed for %s (%s), error: %d\n", serio->phys, serio->name, error); } /* * serio_destroy_port() completes unregistration process and removes * port from the system */ static void serio_destroy_port(struct serio *serio) { struct serio *child; while ((child = serio_get_pending_child(serio)) != NULL) { serio_remove_pending_events(child); put_device(&child->dev); } if (serio->stop) serio->stop(serio); if (serio->parent) { serio_pause_rx(serio->parent); list_del_init(&serio->child_node); serio_continue_rx(serio->parent); serio->parent = NULL; } if (device_is_registered(&serio->dev)) device_del(&serio->dev); list_del_init(&serio->node); serio_remove_pending_events(serio); put_device(&serio->dev); } /* * Reconnect serio port (re-initialize attached device). * If reconnect fails (old device is no longer attached or * there was no device to begin with) we do full rescan in * hope of finding a driver for the port. */ static int serio_reconnect_port(struct serio *serio) { int error = serio_reconnect_driver(serio); if (error) { serio_disconnect_port(serio); serio_find_driver(serio); } return error; } /* * Reconnect serio port and all its children (re-initialize attached * devices). */ static void serio_reconnect_subtree(struct serio *root) { struct serio *s = root; int error; do { error = serio_reconnect_port(s); if (!error) { /* * Reconnect was successful, move on to do the * first child. */ if (!list_empty(&s->children)) { s = list_first_entry(&s->children, struct serio, child_node); continue; } } /* * Either it was a leaf node or reconnect failed and it * became a leaf node. Continue reconnecting starting with * the next sibling of the parent node. */ while (s != root) { struct serio *parent = s->parent; if (!list_is_last(&s->child_node, &parent->children)) { s = list_entry(s->child_node.next, struct serio, child_node); break; } s = parent; } } while (s != root); } /* * serio_disconnect_port() unbinds a port from its driver. As a side effect * all children ports are unbound and destroyed. */ static void serio_disconnect_port(struct serio *serio) { struct serio *s = serio; /* * Children ports should be disconnected and destroyed * first; we travel the tree in depth-first order. */ while (!list_empty(&serio->children)) { /* Locate a leaf */ while (!list_empty(&s->children)) s = list_first_entry(&s->children, struct serio, child_node); /* * Prune this leaf node unless it is the one we * started with. */ if (s != serio) { struct serio *parent = s->parent; device_release_driver(&s->dev); serio_destroy_port(s); s = parent; } } /* * OK, no children left, now disconnect this port. */ device_release_driver(&serio->dev); } void serio_rescan(struct serio *serio) { serio_queue_event(serio, NULL, SERIO_RESCAN_PORT); } EXPORT_SYMBOL(serio_rescan); void serio_reconnect(struct serio *serio) { serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE); } EXPORT_SYMBOL(serio_reconnect); /* * Submits register request to kseriod for subsequent execution. * Note that port registration is always asynchronous. */ void __serio_register_port(struct serio *serio, struct module *owner) { serio_init_port(serio); serio_queue_event(serio, owner, SERIO_REGISTER_PORT); } EXPORT_SYMBOL(__serio_register_port); /* * Synchronously unregisters serio port. */ void serio_unregister_port(struct serio *serio) { mutex_lock(&serio_mutex); serio_disconnect_port(serio); serio_destroy_port(serio); mutex_unlock(&serio_mutex); } EXPORT_SYMBOL(serio_unregister_port); /* * Safely unregisters children ports if they are present. */ void serio_unregister_child_port(struct serio *serio) { struct serio *s, *next; mutex_lock(&serio_mutex); list_for_each_entry_safe(s, next, &serio->children, child_node) { serio_disconnect_port(s); serio_destroy_port(s); } mutex_unlock(&serio_mutex); } EXPORT_SYMBOL(serio_unregister_child_port); /* * Serio driver operations */ static ssize_t serio_driver_show_description(struct device_driver *drv, char *buf) { struct serio_driver *driver = to_serio_driver(drv); return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)"); } static ssize_t serio_driver_show_bind_mode(struct device_driver *drv, char *buf) { struct serio_driver *serio_drv = to_serio_driver(drv); return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto"); } static ssize_t serio_driver_set_bind_mode(struct device_driver *drv, const char *buf, size_t count) { struct serio_driver *serio_drv = to_serio_driver(drv); int retval; retval = count; if (!strncmp(buf, "manual", count)) { serio_drv->manual_bind = true; } else if (!strncmp(buf, "auto", count)) { serio_drv->manual_bind = false; } else { retval = -EINVAL; } return retval; } static struct driver_attribute serio_driver_attrs[] = { __ATTR(description, S_IRUGO, serio_driver_show_description, NULL), __ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_driver_show_bind_mode, serio_driver_set_bind_mode), __ATTR_NULL }; static int serio_driver_probe(struct device *dev) { struct serio *serio = to_serio_port(dev); struct serio_driver *drv = to_serio_driver(dev->driver); return serio_connect_driver(serio, drv); } static int serio_driver_remove(struct device *dev) { struct serio *serio = to_serio_port(dev); serio_disconnect_driver(serio); return 0; } static void serio_cleanup(struct serio *serio) { mutex_lock(&serio->drv_mutex); if (serio->drv && serio->drv->cleanup) serio->drv->cleanup(serio); mutex_unlock(&serio->drv_mutex); } static void serio_shutdown(struct device *dev) { struct serio *serio = to_serio_port(dev); serio_cleanup(serio); } static void serio_attach_driver(struct serio_driver *drv) { int error; error = driver_attach(&drv->driver); if (error) pr_warning("driver_attach() failed for %s with error %d\n", drv->driver.name, error); } int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name) { bool manual_bind = drv->manual_bind; int error; drv->driver.bus = &serio_bus; drv->driver.owner = owner; drv->driver.mod_name = mod_name; /* * Temporarily disable automatic binding because probing * takes long time and we are better off doing it in kseriod */ drv->manual_bind = true; error = driver_register(&drv->driver); if (error) { pr_err("driver_register() failed for %s, error: %d\n", drv->driver.name, error); return error; } /* * Restore original bind mode and let kseriod bind the * driver to free ports */ if (!manual_bind) { drv->manual_bind = false; error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER); if (error) { driver_unregister(&drv->driver); return error; } } return 0; } EXPORT_SYMBOL(__serio_register_driver); void serio_unregister_driver(struct serio_driver *drv) { struct serio *serio; mutex_lock(&serio_mutex); drv->manual_bind = true; /* so serio_find_driver ignores it */ serio_remove_pending_events(drv); start_over: list_for_each_entry(serio, &serio_list, node) { if (serio->drv == drv) { serio_disconnect_port(serio); serio_find_driver(serio); /* we could've deleted some ports, restart */ goto start_over; } } driver_unregister(&drv->driver); mutex_unlock(&serio_mutex); } EXPORT_SYMBOL(serio_unregister_driver); static void serio_set_drv(struct serio *serio, struct serio_driver *drv) { serio_pause_rx(serio); serio->drv = drv; serio_continue_rx(serio); } static int serio_bus_match(struct device *dev, struct device_driver *drv) { struct serio *serio = to_serio_port(dev); struct serio_driver *serio_drv = to_serio_driver(drv); if (serio->manual_bind || serio_drv->manual_bind) return 0; return serio_match_port(serio_drv->id_table, serio); } #ifdef CONFIG_HOTPLUG #define SERIO_ADD_UEVENT_VAR(fmt, val...) \ do { \ int err = add_uevent_var(env, fmt, val); \ if (err) \ return err; \ } while (0) static int serio_uevent(struct device *dev, struct kobj_uevent_env *env) { struct serio *serio; if (!dev) return -ENODEV; serio = to_serio_port(dev); SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type); SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto); SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id); SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra); SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X", serio->id.type, serio->id.proto, serio->id.id, serio->id.extra); return 0; } #undef SERIO_ADD_UEVENT_VAR #else static int serio_uevent(struct device *dev, struct kobj_uevent_env *env) { return -ENODEV; } #endif /* CONFIG_HOTPLUG */ #ifdef CONFIG_PM static int serio_suspend(struct device *dev) { struct serio *serio = to_serio_port(dev); serio_cleanup(serio); return 0; } static int serio_resume(struct device *dev) { struct serio *serio = to_serio_port(dev); /* * Driver reconnect can take a while, so better let kseriod * deal with it. */ serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT); return 0; } static const struct dev_pm_ops serio_pm_ops = { .suspend = serio_suspend, .resume = serio_resume, .poweroff = serio_suspend, .restore = serio_resume, }; #endif /* CONFIG_PM */ /* called from serio_driver->connect/disconnect methods under serio_mutex */ int serio_open(struct serio *serio, struct serio_driver *drv) { serio_set_drv(serio, drv); if (serio->open && serio->open(serio)) { serio_set_drv(serio, NULL); return -1; } return 0; } EXPORT_SYMBOL(serio_open); /* called from serio_driver->connect/disconnect methods under serio_mutex */ void serio_close(struct serio *serio) { if (serio->close) serio->close(serio); serio_set_drv(serio, NULL); } EXPORT_SYMBOL(serio_close); irqreturn_t serio_interrupt(struct serio *serio, unsigned char data, unsigned int dfl) { unsigned long flags; irqreturn_t ret = IRQ_NONE; spin_lock_irqsave(&serio->lock, flags); if (likely(serio->drv)) { ret = serio->drv->interrupt(serio, data, dfl); } else if (!dfl && device_is_registered(&serio->dev)) { serio_rescan(serio); ret = IRQ_HANDLED; } spin_unlock_irqrestore(&serio->lock, flags); return ret; } EXPORT_SYMBOL(serio_interrupt); static struct bus_type serio_bus = { .name = "serio", .dev_attrs = serio_device_attrs, .drv_attrs = serio_driver_attrs, .match = serio_bus_match, .uevent = serio_uevent, .probe = serio_driver_probe, .remove = serio_driver_remove, .shutdown = serio_shutdown, #ifdef CONFIG_PM .pm = &serio_pm_ops, #endif }; static int __init serio_init(void) { int error; error = bus_register(&serio_bus); if (error) { pr_err("Failed to register serio bus, error: %d\n", error); return error; } return 0; } static void __exit serio_exit(void) { bus_unregister(&serio_bus); /* * There should not be any outstanding events but work may * still be scheduled so simply cancel it. */ cancel_work_sync(&serio_event_work); } subsys_initcall(serio_init); module_exit(serio_exit);
redglasses/android_kernel_lge_g3-V20f
drivers/input/serio/serio.c
C
gpl-2.0
24,975
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "lockspace.h" #include "member.h" #include "dir.h" #include "ast.h" #include "recover.h" #include "lowcomms.h" #include "lock.h" #include "requestqueue.h" #include "recoverd.h" /* If the start for which we're re-enabling locking (seq) has been superseded by a newer stop (ls_recover_seq), we need to leave locking disabled. We suspend dlm_recv threads here to avoid the race where dlm_recv a) sees locking stopped and b) adds a message to the requestqueue, but dlm_recoverd enables locking and clears the requestqueue between a and b. */ static int enable_locking(struct dlm_ls *ls, uint64_t seq) { int error = -EINTR; down_write(&ls->ls_recv_active); spin_lock(&ls->ls_recover_lock); if (ls->ls_recover_seq == seq) { set_bit(LSFL_RUNNING, &ls->ls_flags); /* unblocks processes waiting to enter the dlm */ up_write(&ls->ls_in_recovery); error = 0; } spin_unlock(&ls->ls_recover_lock); up_write(&ls->ls_recv_active); return error; } static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv) { unsigned long start; int error, neg = 0; log_debug(ls, "dlm_recover %llx", (unsigned long long)rv->seq); mutex_lock(&ls->ls_recoverd_active); dlm_callback_suspend(ls); /* * Free non-master tossed rsb's. Master rsb's are kept on toss * list and put on root list to be included in resdir recovery. */ dlm_clear_toss_list(ls); /* * This list of root rsb's will be the basis of most of the recovery * routines. */ dlm_create_root_list(ls); /* * Add or remove nodes from the lockspace's ls_nodes list. */ error = dlm_recover_members(ls, rv, &neg); if (error) { log_debug(ls, "dlm_recover_members error %d", error); goto fail; } dlm_set_recover_status(ls, DLM_RS_NODES); error = dlm_recover_members_wait(ls); if (error) { log_debug(ls, "dlm_recover_members_wait error %d", error); goto fail; } start = jiffies; /* * Rebuild our own share of the directory by collecting from all other * nodes their master rsb names that hash to us. */ error = dlm_recover_directory(ls); if (error) { log_debug(ls, "dlm_recover_directory error %d", error); goto fail; } dlm_set_recover_status(ls, DLM_RS_DIR); error = dlm_recover_directory_wait(ls); if (error) { log_debug(ls, "dlm_recover_directory_wait error %d", error); goto fail; } /* * We may have outstanding operations that are waiting for a reply from * a failed node. Mark these to be resent after recovery. Unlock and * cancel ops can just be completed. */ dlm_recover_waiters_pre(ls); error = dlm_recovery_stopped(ls); if (error) goto fail; if (neg || dlm_no_directory(ls)) { /* * Clear lkb's for departed nodes. */ dlm_purge_locks(ls); /* * Get new master nodeid's for rsb's that were mastered on * departed nodes. */ error = dlm_recover_masters(ls); if (error) { log_debug(ls, "dlm_recover_masters error %d", error); goto fail; } /* * Send our locks on remastered rsb's to the new masters. */ error = dlm_recover_locks(ls); if (error) { log_debug(ls, "dlm_recover_locks error %d", error); goto fail; } dlm_set_recover_status(ls, DLM_RS_LOCKS); error = dlm_recover_locks_wait(ls); if (error) { log_debug(ls, "dlm_recover_locks_wait error %d", error); goto fail; } /* * Finalize state in master rsb's now that all locks can be * checked. This includes conversion resolution and lvb * settings. */ dlm_recover_rsbs(ls); } else { /* * Other lockspace members may be going through the "neg" steps * while also adding us to the lockspace, in which case they'll * be doing the recover_locks (RS_LOCKS) barrier. */ dlm_set_recover_status(ls, DLM_RS_LOCKS); error = dlm_recover_locks_wait(ls); if (error) { log_debug(ls, "dlm_recover_locks_wait error %d", error); goto fail; } } dlm_release_root_list(ls); /* * Purge directory-related requests that are saved in requestqueue. * All dir requests from before recovery are invalid now due to the dir * rebuild and will be resent by the requesting nodes. */ dlm_purge_requestqueue(ls); dlm_set_recover_status(ls, DLM_RS_DONE); error = dlm_recover_done_wait(ls); if (error) { log_debug(ls, "dlm_recover_done_wait error %d", error); goto fail; } dlm_clear_members_gone(ls); dlm_adjust_timeouts(ls); dlm_callback_resume(ls); error = enable_locking(ls, rv->seq); if (error) { log_debug(ls, "enable_locking error %d", error); goto fail; } error = dlm_process_requestqueue(ls); if (error) { log_debug(ls, "dlm_process_requestqueue error %d", error); goto fail; } error = dlm_recover_waiters_post(ls); if (error) { log_debug(ls, "dlm_recover_waiters_post error %d", error); goto fail; } dlm_grant_after_purge(ls); log_debug(ls, "dlm_recover %llx generation %u done: %u ms", (unsigned long long)rv->seq, ls->ls_generation, jiffies_to_msecs(jiffies - start)); mutex_unlock(&ls->ls_recoverd_active); dlm_lsop_recover_done(ls); return 0; fail: dlm_release_root_list(ls); log_debug(ls, "dlm_recover %llx error %d", (unsigned long long)rv->seq, error); mutex_unlock(&ls->ls_recoverd_active); return error; } /* The dlm_ls_start() that created the rv we take here may already have been stopped via dlm_ls_stop(); in that case we need to leave the RECOVERY_STOP flag set. */ static void do_ls_recovery(struct dlm_ls *ls) { struct dlm_recover *rv = NULL; spin_lock(&ls->ls_recover_lock); rv = ls->ls_recover_args; ls->ls_recover_args = NULL; if (rv && ls->ls_recover_seq == rv->seq) clear_bit(LSFL_RECOVERY_STOP, &ls->ls_flags); spin_unlock(&ls->ls_recover_lock); if (rv) { ls_recover(ls, rv); kfree(rv->nodes); kfree(rv); } } static int dlm_recoverd(void *arg) { struct dlm_ls *ls; ls = dlm_find_lockspace_local(arg); if (!ls) { log_print("dlm_recoverd: no lockspace %p", arg); return -1; } while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); if (!test_bit(LSFL_WORK, &ls->ls_flags)) schedule(); set_current_state(TASK_RUNNING); if (test_and_clear_bit(LSFL_WORK, &ls->ls_flags)) do_ls_recovery(ls); } dlm_put_lockspace(ls); return 0; } void dlm_recoverd_kick(struct dlm_ls *ls) { set_bit(LSFL_WORK, &ls->ls_flags); wake_up_process(ls->ls_recoverd_task); } int dlm_recoverd_start(struct dlm_ls *ls) { struct task_struct *p; int error = 0; p = kthread_run(dlm_recoverd, ls, "dlm_recoverd"); if (IS_ERR(p)) error = PTR_ERR(p); else ls->ls_recoverd_task = p; return error; } void dlm_recoverd_stop(struct dlm_ls *ls) { kthread_stop(ls->ls_recoverd_task); } void dlm_recoverd_suspend(struct dlm_ls *ls) { wake_up(&ls->ls_wait_general); mutex_lock(&ls->ls_recoverd_active); } void dlm_recoverd_resume(struct dlm_ls *ls) { mutex_unlock(&ls->ls_recoverd_active); }
xXminiWHOOPERxX/xXminiWHOOPERxX-Kernel-for-M4-MLG-
fs/dlm/recoverd.c
C
gpl-2.0
7,567
/* * linux/drivers/input/keyboard/omap-keypad.c * * OMAP Keypad Driver * * Copyright (C) 2003 Nokia Corporation * Written by Timo Teräs <ext-timo.teras@nokia.com> * * Added support for H2 & H3 Keypad * Copyright (C) 2004 Texas Instruments * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/input.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/errno.h> #include <linux/slab.h> #include <asm/gpio.h> #include <plat/keypad.h> #include <plat/menelaus.h> #include <asm/irq.h> #include <mach/hardware.h> #include <asm/io.h> #include <plat/mux.h> #undef NEW_BOARD_LEARNING_MODE static void omap_kp_tasklet(unsigned long); static void omap_kp_timer(unsigned long); static unsigned char keypad_state[8]; static DEFINE_MUTEX(kp_enable_mutex); static int kp_enable = 1; static int kp_cur_group = -1; struct omap_kp { struct input_dev *input; struct timer_list timer; int irq; unsigned int rows; unsigned int cols; unsigned long delay; unsigned int debounce; }; static DECLARE_TASKLET_DISABLED(kp_tasklet, omap_kp_tasklet, 0); static unsigned int *row_gpios; static unsigned int *col_gpios; #ifdef CONFIG_ARCH_OMAP2 static void set_col_gpio_val(struct omap_kp *omap_kp, u8 value) { int col; for (col = 0; col < omap_kp->cols; col++) gpio_set_value(col_gpios[col], value & (1 << col)); } static u8 get_row_gpio_val(struct omap_kp *omap_kp) { int row; u8 value = 0; for (row = 0; row < omap_kp->rows; row++) { if (gpio_get_value(row_gpios[row])) value |= (1 << row); } return value; } #else #define set_col_gpio_val(x, y) do {} while (0) #define get_row_gpio_val(x) 0 #endif static irqreturn_t omap_kp_interrupt(int irq, void *dev_id) { struct omap_kp *omap_kp = dev_id; /* disable keyboard interrupt and schedule for handling */ if (cpu_is_omap24xx()) { int i; for (i = 0; i < omap_kp->rows; i++) { int gpio_irq = gpio_to_irq(row_gpios[i]); /* * The interrupt which we're currently handling should * be disabled _nosync() to avoid deadlocks waiting * for this handler to complete. All others should * be disabled the regular way for SMP safety. */ if (gpio_irq == irq) disable_irq_nosync(gpio_irq); else disable_irq(gpio_irq); } } else /* disable keyboard interrupt and schedule for handling */ omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); tasklet_schedule(&kp_tasklet); return IRQ_HANDLED; } static void omap_kp_timer(unsigned long data) { tasklet_schedule(&kp_tasklet); } static void omap_kp_scan_keypad(struct omap_kp *omap_kp, unsigned char *state) { int col = 0; /* read the keypad status */ if (cpu_is_omap24xx()) { /* read the keypad status */ for (col = 0; col < omap_kp->cols; col++) { set_col_gpio_val(omap_kp, ~(1 << col)); state[col] = ~(get_row_gpio_val(omap_kp)) & 0xff; } set_col_gpio_val(omap_kp, 0); } else { /* disable keyboard interrupt and schedule for handling */ omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); /* read the keypad status */ omap_writew(0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC); for (col = 0; col < omap_kp->cols; col++) { omap_writew(~(1 << col) & 0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC); udelay(omap_kp->delay); state[col] = ~omap_readw(OMAP1_MPUIO_BASE + OMAP_MPUIO_KBR_LATCH) & 0xff; } omap_writew(0x00, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBC); udelay(2); } } static void omap_kp_tasklet(unsigned long data) { struct omap_kp *omap_kp_data = (struct omap_kp *) data; unsigned short *keycodes = omap_kp_data->input->keycode; unsigned int row_shift = get_count_order(omap_kp_data->cols); unsigned char new_state[8], changed, key_down = 0; int col, row; int spurious = 0; /* check for any changes */ omap_kp_scan_keypad(omap_kp_data, new_state); /* check for changes and print those */ for (col = 0; col < omap_kp_data->cols; col++) { changed = new_state[col] ^ keypad_state[col]; key_down |= new_state[col]; if (changed == 0) continue; for (row = 0; row < omap_kp_data->rows; row++) { int key; if (!(changed & (1 << row))) continue; #ifdef NEW_BOARD_LEARNING_MODE printk(KERN_INFO "omap-keypad: key %d-%d %s\n", col, row, (new_state[col] & (1 << row)) ? "pressed" : "released"); #else key = keycodes[MATRIX_SCAN_CODE(row, col, row_shift)]; if (key < 0) { printk(KERN_WARNING "omap-keypad: Spurious key event %d-%d\n", col, row); /* We scan again after a couple of seconds */ spurious = 1; continue; } if (!(kp_cur_group == (key & GROUP_MASK) || kp_cur_group == -1)) continue; kp_cur_group = key & GROUP_MASK; input_report_key(omap_kp_data->input, key & ~GROUP_MASK, new_state[col] & (1 << row)); #endif } } input_sync(omap_kp_data->input); memcpy(keypad_state, new_state, sizeof(keypad_state)); if (key_down) { int delay = HZ / 20; /* some key is pressed - keep irq disabled and use timer * to poll the keypad */ if (spurious) delay = 2 * HZ; mod_timer(&omap_kp_data->timer, jiffies + delay); } else { /* enable interrupts */ if (cpu_is_omap24xx()) { int i; for (i = 0; i < omap_kp_data->rows; i++) enable_irq(gpio_to_irq(row_gpios[i])); } else { omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); kp_cur_group = -1; } } } static ssize_t omap_kp_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%u\n", kp_enable); } static ssize_t omap_kp_enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int state; if (sscanf(buf, "%u", &state) != 1) return -EINVAL; if ((state != 1) && (state != 0)) return -EINVAL; mutex_lock(&kp_enable_mutex); if (state != kp_enable) { if (state) enable_irq(INT_KEYBOARD); else disable_irq(INT_KEYBOARD); kp_enable = state; } mutex_unlock(&kp_enable_mutex); return strnlen(buf, count); } static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, omap_kp_enable_show, omap_kp_enable_store); #ifdef CONFIG_PM static int omap_kp_suspend(struct platform_device *dev, pm_message_t state) { /* Nothing yet */ return 0; } static int omap_kp_resume(struct platform_device *dev) { /* Nothing yet */ return 0; } #else #define omap_kp_suspend NULL #define omap_kp_resume NULL #endif static int __devinit omap_kp_probe(struct platform_device *pdev) { struct omap_kp *omap_kp; struct input_dev *input_dev; struct omap_kp_platform_data *pdata = pdev->dev.platform_data; int i, col_idx, row_idx, irq_idx, ret; unsigned int row_shift, keycodemax; if (!pdata->rows || !pdata->cols || !pdata->keymap_data) { printk(KERN_ERR "No rows, cols or keymap_data from pdata\n"); return -EINVAL; } row_shift = get_count_order(pdata->cols); keycodemax = pdata->rows << row_shift; omap_kp = kzalloc(sizeof(struct omap_kp) + keycodemax * sizeof(unsigned short), GFP_KERNEL); input_dev = input_allocate_device(); if (!omap_kp || !input_dev) { kfree(omap_kp); input_free_device(input_dev); return -ENOMEM; } platform_set_drvdata(pdev, omap_kp); omap_kp->input = input_dev; /* Disable the interrupt for the MPUIO keyboard */ if (!cpu_is_omap24xx()) omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); input_dev->keycode = &omap_kp[1]; input_dev->keycodesize = sizeof(unsigned short); input_dev->keycodemax = keycodemax; if (pdata->rep) __set_bit(EV_REP, input_dev->evbit); if (pdata->delay) omap_kp->delay = pdata->delay; if (pdata->row_gpios && pdata->col_gpios) { row_gpios = pdata->row_gpios; col_gpios = pdata->col_gpios; } omap_kp->rows = pdata->rows; omap_kp->cols = pdata->cols; if (cpu_is_omap24xx()) { /* Cols: outputs */ for (col_idx = 0; col_idx < omap_kp->cols; col_idx++) { if (gpio_request(col_gpios[col_idx], "omap_kp_col") < 0) { printk(KERN_ERR "Failed to request" "GPIO%d for keypad\n", col_gpios[col_idx]); goto err1; } gpio_direction_output(col_gpios[col_idx], 0); } /* Rows: inputs */ for (row_idx = 0; row_idx < omap_kp->rows; row_idx++) { if (gpio_request(row_gpios[row_idx], "omap_kp_row") < 0) { printk(KERN_ERR "Failed to request" "GPIO%d for keypad\n", row_gpios[row_idx]); goto err2; } gpio_direction_input(row_gpios[row_idx]); } } else { col_idx = 0; row_idx = 0; } setup_timer(&omap_kp->timer, omap_kp_timer, (unsigned long)omap_kp); /* get the irq and init timer*/ tasklet_enable(&kp_tasklet); kp_tasklet.data = (unsigned long) omap_kp; ret = device_create_file(&pdev->dev, &dev_attr_enable); if (ret < 0) goto err2; /* setup input device */ __set_bit(EV_KEY, input_dev->evbit); matrix_keypad_build_keymap(pdata->keymap_data, row_shift, input_dev->keycode, input_dev->keybit); input_dev->name = "omap-keypad"; input_dev->phys = "omap-keypad/input0"; input_dev->dev.parent = &pdev->dev; input_dev->id.bustype = BUS_HOST; input_dev->id.vendor = 0x0001; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; ret = input_register_device(omap_kp->input); if (ret < 0) { printk(KERN_ERR "Unable to register omap-keypad input device\n"); goto err3; } if (pdata->dbounce) omap_writew(0xff, OMAP1_MPUIO_BASE + OMAP_MPUIO_GPIO_DEBOUNCING); /* scan current status and enable interrupt */ omap_kp_scan_keypad(omap_kp, keypad_state); if (!cpu_is_omap24xx()) { omap_kp->irq = platform_get_irq(pdev, 0); if (omap_kp->irq >= 0) { if (request_irq(omap_kp->irq, omap_kp_interrupt, 0, "omap-keypad", omap_kp) < 0) goto err4; } omap_writew(0, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); } else { for (irq_idx = 0; irq_idx < omap_kp->rows; irq_idx++) { if (request_irq(gpio_to_irq(row_gpios[irq_idx]), omap_kp_interrupt, IRQF_TRIGGER_FALLING, "omap-keypad", omap_kp) < 0) goto err5; } } return 0; err5: for (i = irq_idx - 1; i >=0; i--) free_irq(row_gpios[i], omap_kp); err4: input_unregister_device(omap_kp->input); input_dev = NULL; err3: device_remove_file(&pdev->dev, &dev_attr_enable); err2: for (i = row_idx - 1; i >=0; i--) gpio_free(row_gpios[i]); err1: for (i = col_idx - 1; i >=0; i--) gpio_free(col_gpios[i]); kfree(omap_kp); input_free_device(input_dev); return -EINVAL; } static int __devexit omap_kp_remove(struct platform_device *pdev) { struct omap_kp *omap_kp = platform_get_drvdata(pdev); /* disable keypad interrupt handling */ tasklet_disable(&kp_tasklet); if (cpu_is_omap24xx()) { int i; for (i = 0; i < omap_kp->cols; i++) gpio_free(col_gpios[i]); for (i = 0; i < omap_kp->rows; i++) { gpio_free(row_gpios[i]); free_irq(gpio_to_irq(row_gpios[i]), omap_kp); } } else { omap_writew(1, OMAP1_MPUIO_BASE + OMAP_MPUIO_KBD_MASKIT); free_irq(omap_kp->irq, omap_kp); } del_timer_sync(&omap_kp->timer); tasklet_kill(&kp_tasklet); /* unregister everything */ input_unregister_device(omap_kp->input); kfree(omap_kp); return 0; } static struct platform_driver omap_kp_driver = { .probe = omap_kp_probe, .remove = __devexit_p(omap_kp_remove), .suspend = omap_kp_suspend, .resume = omap_kp_resume, .driver = { .name = "omap-keypad", .owner = THIS_MODULE, }, }; module_platform_driver(omap_kp_driver); MODULE_AUTHOR("Timo Teräs"); MODULE_DESCRIPTION("OMAP Keypad Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:omap-keypad");
charles1018/kernel_msm
drivers/input/keyboard/omap-keypad.c
C
gpl-2.0
12,312
/* * i2c tv tuner chip device driver * controls all those simple 4-control-bytes style tuners. * * This "tuner-simple" module was split apart from the original "tuner" module. */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/tuner.h> #include <media/v4l2-common.h> #include <media/tuner-types.h> #include "tuner-i2c.h" #include "tuner-simple.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable verbose debug messages"); #define TUNER_SIMPLE_MAX 64 static unsigned int simple_devcount; static int offset; module_param(offset, int, 0664); MODULE_PARM_DESC(offset, "Allows to specify an offset for tuner"); static unsigned int atv_input[TUNER_SIMPLE_MAX] = \ { [0 ... (TUNER_SIMPLE_MAX-1)] = 0 }; static unsigned int dtv_input[TUNER_SIMPLE_MAX] = \ { [0 ... (TUNER_SIMPLE_MAX-1)] = 0 }; module_param_array(atv_input, int, NULL, 0644); module_param_array(dtv_input, int, NULL, 0644); MODULE_PARM_DESC(atv_input, "specify atv rf input, 0 for autoselect"); MODULE_PARM_DESC(dtv_input, "specify dtv rf input, 0 for autoselect"); /* ---------------------------------------------------------------------- */ /* tv standard selection for Temic 4046 FM5 this value takes the low bits of control byte 2 from datasheet Rev.01, Feb.00 standard BG I L L2 D picture IF 38.9 38.9 38.9 33.95 38.9 sound 1 33.4 32.9 32.4 40.45 32.4 sound 2 33.16 NICAM 33.05 32.348 33.05 33.05 */ #define TEMIC_SET_PAL_I 0x05 #define TEMIC_SET_PAL_DK 0x09 #define TEMIC_SET_PAL_L 0x0a /* SECAM ? */ #define TEMIC_SET_PAL_L2 0x0b /* change IF ! */ #define TEMIC_SET_PAL_BG 0x0c /* tv tuner system standard selection for Philips FQ1216ME this value takes the low bits of control byte 2 from datasheet "1999 Nov 16" (supersedes "1999 Mar 23") standard BG DK I L L` picture carrier 38.90 38.90 38.90 38.90 33.95 colour 34.47 34.47 34.47 34.47 38.38 sound 1 33.40 32.40 32.90 32.40 40.45 sound 2 33.16 - - - - NICAM 33.05 33.05 32.35 33.05 39.80 */ #define PHILIPS_SET_PAL_I 0x01 /* Bit 2 always zero !*/ #define PHILIPS_SET_PAL_BGDK 0x09 #define PHILIPS_SET_PAL_L2 0x0a #define PHILIPS_SET_PAL_L 0x0b /* system switching for Philips FI1216MF MK2 from datasheet "1996 Jul 09", standard BG L L' picture carrier 38.90 38.90 33.95 colour 34.47 34.37 38.38 sound 1 33.40 32.40 40.45 sound 2 33.16 - - NICAM 33.05 33.05 39.80 */ #define PHILIPS_MF_SET_STD_BG 0x01 /* Bit 2 must be zero, Bit 3 is system output */ #define PHILIPS_MF_SET_STD_L 0x03 /* Used on Secam France */ #define PHILIPS_MF_SET_STD_LC 0x02 /* Used on SECAM L' */ /* Control byte */ #define TUNER_RATIO_MASK 0x06 /* Bit cb1:cb2 */ #define TUNER_RATIO_SELECT_50 0x00 #define TUNER_RATIO_SELECT_32 0x02 #define TUNER_RATIO_SELECT_166 0x04 #define TUNER_RATIO_SELECT_62 0x06 #define TUNER_CHARGE_PUMP 0x40 /* Bit cb6 */ /* Status byte */ #define TUNER_POR 0x80 #define TUNER_FL 0x40 #define TUNER_MODE 0x38 #define TUNER_AFC 0x07 #define TUNER_SIGNAL 0x07 #define TUNER_STEREO 0x10 #define TUNER_PLL_LOCKED 0x40 #define TUNER_STEREO_MK3 0x04 static DEFINE_MUTEX(tuner_simple_list_mutex); static LIST_HEAD(hybrid_tuner_instance_list); struct tuner_simple_priv { unsigned int nr; u16 last_div; struct tuner_i2c_props i2c_props; struct list_head hybrid_tuner_instance_list; unsigned int type; struct tunertype *tun; u32 frequency; u32 bandwidth; }; /* ---------------------------------------------------------------------- */ static int tuner_read_status(struct dvb_frontend *fe) { struct tuner_simple_priv *priv = fe->tuner_priv; unsigned char byte; if (1 != tuner_i2c_xfer_recv(&priv->i2c_props, &byte, 1)) return 0; return byte; } static inline int tuner_signal(const int status) { return (status & TUNER_SIGNAL) << 13; } static inline int tuner_stereo(const int type, const int status) { switch (type) { case TUNER_PHILIPS_FM1216ME_MK3: case TUNER_PHILIPS_FM1236_MK3: case TUNER_PHILIPS_FM1256_IH3: case TUNER_LG_NTSC_TAPE: case TUNER_TCL_MF02GIP_5N: return ((status & TUNER_SIGNAL) == TUNER_STEREO_MK3); case TUNER_PHILIPS_FM1216MK5: return status | TUNER_STEREO; default: return status & TUNER_STEREO; } } static inline int tuner_islocked(const int status) { return (status & TUNER_FL); } static inline int tuner_afcstatus(const int status) { return (status & TUNER_AFC) - 2; } static int simple_get_status(struct dvb_frontend *fe, u32 *status) { struct tuner_simple_priv *priv = fe->tuner_priv; int tuner_status; if (priv->i2c_props.adap == NULL) return -EINVAL; tuner_status = tuner_read_status(fe); *status = 0; if (tuner_islocked(tuner_status)) *status = TUNER_STATUS_LOCKED; if (tuner_stereo(priv->type, tuner_status)) *status |= TUNER_STATUS_STEREO; tuner_dbg("AFC Status: %d\n", tuner_afcstatus(tuner_status)); return 0; } static int simple_get_rf_strength(struct dvb_frontend *fe, u16 *strength) { struct tuner_simple_priv *priv = fe->tuner_priv; int signal; if (priv->i2c_props.adap == NULL) return -EINVAL; signal = tuner_signal(tuner_read_status(fe)); *strength = signal; tuner_dbg("Signal strength: %d\n", signal); return 0; } /* ---------------------------------------------------------------------- */ static inline char *tuner_param_name(enum param_type type) { char *name; switch (type) { case TUNER_PARAM_TYPE_RADIO: name = "radio"; break; case TUNER_PARAM_TYPE_PAL: name = "pal"; break; case TUNER_PARAM_TYPE_SECAM: name = "secam"; break; case TUNER_PARAM_TYPE_NTSC: name = "ntsc"; break; case TUNER_PARAM_TYPE_DIGITAL: name = "digital"; break; default: name = "unknown"; break; } return name; } static struct tuner_params *simple_tuner_params(struct dvb_frontend *fe, enum param_type desired_type) { struct tuner_simple_priv *priv = fe->tuner_priv; struct tunertype *tun = priv->tun; int i; for (i = 0; i < tun->count; i++) if (desired_type == tun->params[i].type) break; /* use default tuner params if desired_type not available */ if (i == tun->count) { tuner_dbg("desired params (%s) undefined for tuner %d\n", tuner_param_name(desired_type), priv->type); i = 0; } tuner_dbg("using tuner params #%d (%s)\n", i, tuner_param_name(tun->params[i].type)); return &tun->params[i]; } static int simple_config_lookup(struct dvb_frontend *fe, struct tuner_params *t_params, unsigned *frequency, u8 *config, u8 *cb) { struct tuner_simple_priv *priv = fe->tuner_priv; int i; for (i = 0; i < t_params->count; i++) { if (*frequency > t_params->ranges[i].limit) continue; break; } if (i == t_params->count) { tuner_dbg("frequency out of range (%d > %d)\n", *frequency, t_params->ranges[i - 1].limit); *frequency = t_params->ranges[--i].limit; } *config = t_params->ranges[i].config; *cb = t_params->ranges[i].cb; tuner_dbg("freq = %d.%02d (%d), range = %d, " "config = 0x%02x, cb = 0x%02x\n", *frequency / 16, *frequency % 16 * 100 / 16, *frequency, i, *config, *cb); return i; } /* ---------------------------------------------------------------------- */ static void simple_set_rf_input(struct dvb_frontend *fe, u8 *config, u8 *cb, unsigned int rf) { struct tuner_simple_priv *priv = fe->tuner_priv; switch (priv->type) { case TUNER_PHILIPS_TUV1236D: switch (rf) { case 1: *cb |= 0x08; break; default: *cb &= ~0x08; break; } break; case TUNER_PHILIPS_FCV1236D: switch (rf) { case 1: *cb |= 0x01; break; default: *cb &= ~0x01; break; } break; default: break; } } static int simple_std_setup(struct dvb_frontend *fe, struct analog_parameters *params, u8 *config, u8 *cb) { struct tuner_simple_priv *priv = fe->tuner_priv; int rc; /* tv norm specific stuff for multi-norm tuners */ switch (priv->type) { case TUNER_PHILIPS_SECAM: /* FI1216MF */ /* 0x01 -> ??? no change ??? */ /* 0x02 -> PAL BDGHI / SECAM L */ /* 0x04 -> ??? PAL others / SECAM others ??? */ *cb &= ~0x03; if (params->std & V4L2_STD_SECAM_L) /* also valid for V4L2_STD_SECAM */ *cb |= PHILIPS_MF_SET_STD_L; else if (params->std & V4L2_STD_SECAM_LC) *cb |= PHILIPS_MF_SET_STD_LC; else /* V4L2_STD_B|V4L2_STD_GH */ *cb |= PHILIPS_MF_SET_STD_BG; break; case TUNER_TEMIC_4046FM5: *cb &= ~0x0f; if (params->std & V4L2_STD_PAL_BG) { *cb |= TEMIC_SET_PAL_BG; } else if (params->std & V4L2_STD_PAL_I) { *cb |= TEMIC_SET_PAL_I; } else if (params->std & V4L2_STD_PAL_DK) { *cb |= TEMIC_SET_PAL_DK; } else if (params->std & V4L2_STD_SECAM_L) { *cb |= TEMIC_SET_PAL_L; } break; case TUNER_PHILIPS_FQ1216ME: *cb &= ~0x0f; if (params->std & (V4L2_STD_PAL_BG|V4L2_STD_PAL_DK)) { *cb |= PHILIPS_SET_PAL_BGDK; } else if (params->std & V4L2_STD_PAL_I) { *cb |= PHILIPS_SET_PAL_I; } else if (params->std & V4L2_STD_SECAM_L) { *cb |= PHILIPS_SET_PAL_L; } break; case TUNER_PHILIPS_FCV1236D: /* 0x00 -> ATSC antenna input 1 */ /* 0x01 -> ATSC antenna input 2 */ /* 0x02 -> NTSC antenna input 1 */ /* 0x03 -> NTSC antenna input 2 */ *cb &= ~0x03; if (!(params->std & V4L2_STD_ATSC)) *cb |= 2; break; case TUNER_MICROTUNE_4042FI5: /* Set the charge pump for fast tuning */ *config |= TUNER_CHARGE_PUMP; break; case TUNER_PHILIPS_TUV1236D: { struct tuner_i2c_props i2c = priv->i2c_props; /* 0x40 -> ATSC antenna input 1 */ /* 0x48 -> ATSC antenna input 2 */ /* 0x00 -> NTSC antenna input 1 */ /* 0x08 -> NTSC antenna input 2 */ u8 buffer[4] = { 0x14, 0x00, 0x17, 0x00}; *cb &= ~0x40; if (params->std & V4L2_STD_ATSC) { *cb |= 0x40; buffer[1] = 0x04; } /* set to the correct mode (analog or digital) */ i2c.addr = 0x0a; rc = tuner_i2c_xfer_send(&i2c, &buffer[0], 2); if (2 != rc) tuner_warn("i2c i/o error: rc == %d " "(should be 2)\n", rc); rc = tuner_i2c_xfer_send(&i2c, &buffer[2], 2); if (2 != rc) tuner_warn("i2c i/o error: rc == %d " "(should be 2)\n", rc); break; } } if (atv_input[priv->nr]) simple_set_rf_input(fe, config, cb, atv_input[priv->nr]); return 0; } static int simple_set_aux_byte(struct dvb_frontend *fe, u8 config, u8 aux) { struct tuner_simple_priv *priv = fe->tuner_priv; int rc; u8 buffer[2]; buffer[0] = (config & ~0x38) | 0x18; buffer[1] = aux; tuner_dbg("setting aux byte: 0x%02x 0x%02x\n", buffer[0], buffer[1]); rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 2); if (2 != rc) tuner_warn("i2c i/o error: rc == %d (should be 2)\n", rc); return rc == 2 ? 0 : rc; } static int simple_post_tune(struct dvb_frontend *fe, u8 *buffer, u16 div, u8 config, u8 cb) { struct tuner_simple_priv *priv = fe->tuner_priv; int rc; switch (priv->type) { case TUNER_LG_TDVS_H06XF: simple_set_aux_byte(fe, config, 0x20); break; case TUNER_PHILIPS_FQ1216LME_MK3: simple_set_aux_byte(fe, config, 0x60); /* External AGC */ break; case TUNER_MICROTUNE_4042FI5: { /* FIXME - this may also work for other tuners */ unsigned long timeout = jiffies + msecs_to_jiffies(1); u8 status_byte = 0; /* Wait until the PLL locks */ for (;;) { if (time_after(jiffies, timeout)) return 0; rc = tuner_i2c_xfer_recv(&priv->i2c_props, &status_byte, 1); if (1 != rc) { tuner_warn("i2c i/o read error: rc == %d " "(should be 1)\n", rc); break; } if (status_byte & TUNER_PLL_LOCKED) break; udelay(10); } /* Set the charge pump for optimized phase noise figure */ config &= ~TUNER_CHARGE_PUMP; buffer[0] = (div>>8) & 0x7f; buffer[1] = div & 0xff; buffer[2] = config; buffer[3] = cb; tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n", buffer[0], buffer[1], buffer[2], buffer[3]); rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4); if (4 != rc) tuner_warn("i2c i/o error: rc == %d " "(should be 4)\n", rc); break; } } return 0; } static int simple_radio_bandswitch(struct dvb_frontend *fe, u8 *buffer) { struct tuner_simple_priv *priv = fe->tuner_priv; switch (priv->type) { case TUNER_TENA_9533_DI: case TUNER_YMEC_TVF_5533MF: tuner_dbg("This tuner doesn't have FM. " "Most cards have a TEA5767 for FM\n"); return 0; case TUNER_PHILIPS_FM1216ME_MK3: case TUNER_PHILIPS_FM1236_MK3: case TUNER_PHILIPS_FMD1216ME_MK3: case TUNER_PHILIPS_FMD1216MEX_MK3: case TUNER_LG_NTSC_TAPE: case TUNER_PHILIPS_FM1256_IH3: case TUNER_TCL_MF02GIP_5N: buffer[3] = 0x19; break; case TUNER_PHILIPS_FM1216MK5: buffer[2] = 0x88; buffer[3] = 0x09; break; case TUNER_TNF_5335MF: buffer[3] = 0x11; break; case TUNER_LG_PAL_FM: buffer[3] = 0xa5; break; case TUNER_THOMSON_DTT761X: buffer[3] = 0x39; break; case TUNER_PHILIPS_FQ1216LME_MK3: case TUNER_PHILIPS_FQ1236_MK5: tuner_err("This tuner doesn't have FM\n"); /* Set the low band for sanity, since it covers 88-108 MHz */ buffer[3] = 0x01; break; case TUNER_MICROTUNE_4049FM5: default: buffer[3] = 0xa4; break; } return 0; } /* ---------------------------------------------------------------------- */ static int simple_set_tv_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct tuner_simple_priv *priv = fe->tuner_priv; u8 config, cb; u16 div; u8 buffer[4]; int rc, IFPCoff, i; enum param_type desired_type; struct tuner_params *t_params; /* IFPCoff = Video Intermediate Frequency - Vif: 940 =16*58.75 NTSC/J (Japan) 732 =16*45.75 M/N STD 704 =16*44 ATSC (at DVB code) 632 =16*39.50 I U.K. 622.4=16*38.90 B/G D/K I, L STD 592 =16*37.00 D China 590 =16.36.875 B Australia 543.2=16*33.95 L' STD 171.2=16*10.70 FM Radio (at set_radio_freq) */ if (params->std == V4L2_STD_NTSC_M_JP) { IFPCoff = 940; desired_type = TUNER_PARAM_TYPE_NTSC; } else if ((params->std & V4L2_STD_MN) && !(params->std & ~V4L2_STD_MN)) { IFPCoff = 732; desired_type = TUNER_PARAM_TYPE_NTSC; } else if (params->std == V4L2_STD_SECAM_LC) { IFPCoff = 543; desired_type = TUNER_PARAM_TYPE_SECAM; } else { IFPCoff = 623; desired_type = TUNER_PARAM_TYPE_PAL; } t_params = simple_tuner_params(fe, desired_type); i = simple_config_lookup(fe, t_params, &params->frequency, &config, &cb); div = params->frequency + IFPCoff + offset; tuner_dbg("Freq= %d.%02d MHz, V_IF=%d.%02d MHz, " "Offset=%d.%02d MHz, div=%0d\n", params->frequency / 16, params->frequency % 16 * 100 / 16, IFPCoff / 16, IFPCoff % 16 * 100 / 16, offset / 16, offset % 16 * 100 / 16, div); /* tv norm specific stuff for multi-norm tuners */ simple_std_setup(fe, params, &config, &cb); if (t_params->cb_first_if_lower_freq && div < priv->last_div) { buffer[0] = config; buffer[1] = cb; buffer[2] = (div>>8) & 0x7f; buffer[3] = div & 0xff; } else { buffer[0] = (div>>8) & 0x7f; buffer[1] = div & 0xff; buffer[2] = config; buffer[3] = cb; } priv->last_div = div; if (t_params->has_tda9887) { struct v4l2_priv_tun_config tda9887_cfg; int tda_config = 0; int is_secam_l = (params->std & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)) && !(params->std & ~(V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)); tda9887_cfg.tuner = TUNER_TDA9887; tda9887_cfg.priv = &tda_config; if (params->std == V4L2_STD_SECAM_LC) { if (t_params->port1_active ^ t_params->port1_invert_for_secam_lc) tda_config |= TDA9887_PORT1_ACTIVE; if (t_params->port2_active ^ t_params->port2_invert_for_secam_lc) tda_config |= TDA9887_PORT2_ACTIVE; } else { if (t_params->port1_active) tda_config |= TDA9887_PORT1_ACTIVE; if (t_params->port2_active) tda_config |= TDA9887_PORT2_ACTIVE; } if (t_params->intercarrier_mode) tda_config |= TDA9887_INTERCARRIER; if (is_secam_l) { if (i == 0 && t_params->default_top_secam_low) tda_config |= TDA9887_TOP(t_params->default_top_secam_low); else if (i == 1 && t_params->default_top_secam_mid) tda_config |= TDA9887_TOP(t_params->default_top_secam_mid); else if (t_params->default_top_secam_high) tda_config |= TDA9887_TOP(t_params->default_top_secam_high); } else { if (i == 0 && t_params->default_top_low) tda_config |= TDA9887_TOP(t_params->default_top_low); else if (i == 1 && t_params->default_top_mid) tda_config |= TDA9887_TOP(t_params->default_top_mid); else if (t_params->default_top_high) tda_config |= TDA9887_TOP(t_params->default_top_high); } if (t_params->default_pll_gating_18) tda_config |= TDA9887_GATING_18; i2c_clients_command(priv->i2c_props.adap, TUNER_SET_CONFIG, &tda9887_cfg); } tuner_dbg("tv 0x%02x 0x%02x 0x%02x 0x%02x\n", buffer[0], buffer[1], buffer[2], buffer[3]); rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4); if (4 != rc) tuner_warn("i2c i/o error: rc == %d (should be 4)\n", rc); simple_post_tune(fe, &buffer[0], div, config, cb); return 0; } static int simple_set_radio_freq(struct dvb_frontend *fe, struct analog_parameters *params) { struct tunertype *tun; struct tuner_simple_priv *priv = fe->tuner_priv; u8 buffer[4]; u16 div; int rc, j; struct tuner_params *t_params; unsigned int freq = params->frequency; tun = priv->tun; for (j = tun->count-1; j > 0; j--) if (tun->params[j].type == TUNER_PARAM_TYPE_RADIO) break; /* default t_params (j=0) will be used if desired type wasn't found */ t_params = &tun->params[j]; /* Select Radio 1st IF used */ switch (t_params->radio_if) { case 0: /* 10.7 MHz */ freq += (unsigned int)(10.7*16000); break; case 1: /* 33.3 MHz */ freq += (unsigned int)(33.3*16000); break; case 2: /* 41.3 MHz */ freq += (unsigned int)(41.3*16000); break; default: tuner_warn("Unsupported radio_if value %d\n", t_params->radio_if); return 0; } buffer[2] = (t_params->ranges[0].config & ~TUNER_RATIO_MASK) | TUNER_RATIO_SELECT_50; /* 50 kHz step */ /* Bandswitch byte */ simple_radio_bandswitch(fe, &buffer[0]); /* Convert from 1/16 kHz V4L steps to 1/20 MHz (=50 kHz) PLL steps freq * (1 Mhz / 16000 V4L steps) * (20 PLL steps / 1 MHz) = freq * (1/800) */ div = (freq + 400) / 800; if (t_params->cb_first_if_lower_freq && div < priv->last_div) { buffer[0] = buffer[2]; buffer[1] = buffer[3]; buffer[2] = (div>>8) & 0x7f; buffer[3] = div & 0xff; } else { buffer[0] = (div>>8) & 0x7f; buffer[1] = div & 0xff; } tuner_dbg("radio 0x%02x 0x%02x 0x%02x 0x%02x\n", buffer[0], buffer[1], buffer[2], buffer[3]); priv->last_div = div; if (t_params->has_tda9887) { int config = 0; struct v4l2_priv_tun_config tda9887_cfg; tda9887_cfg.tuner = TUNER_TDA9887; tda9887_cfg.priv = &config; if (t_params->port1_active && !t_params->port1_fm_high_sensitivity) config |= TDA9887_PORT1_ACTIVE; if (t_params->port2_active && !t_params->port2_fm_high_sensitivity) config |= TDA9887_PORT2_ACTIVE; if (t_params->intercarrier_mode) config |= TDA9887_INTERCARRIER; /* if (t_params->port1_set_for_fm_mono) config &= ~TDA9887_PORT1_ACTIVE;*/ if (t_params->fm_gain_normal) config |= TDA9887_GAIN_NORMAL; if (t_params->radio_if == 2) config |= TDA9887_RIF_41_3; i2c_clients_command(priv->i2c_props.adap, TUNER_SET_CONFIG, &tda9887_cfg); } rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4); if (4 != rc) tuner_warn("i2c i/o error: rc == %d (should be 4)\n", rc); /* Write AUX byte */ switch (priv->type) { case TUNER_PHILIPS_FM1216ME_MK3: buffer[2] = 0x98; buffer[3] = 0x20; /* set TOP AGC */ rc = tuner_i2c_xfer_send(&priv->i2c_props, buffer, 4); if (4 != rc) tuner_warn("i2c i/o error: rc == %d (should be 4)\n", rc); break; } return 0; } static int simple_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct tuner_simple_priv *priv = fe->tuner_priv; int ret = -EINVAL; if (priv->i2c_props.adap == NULL) return -EINVAL; switch (params->mode) { case V4L2_TUNER_RADIO: ret = simple_set_radio_freq(fe, params); priv->frequency = params->frequency * 125 / 2; break; case V4L2_TUNER_ANALOG_TV: case V4L2_TUNER_DIGITAL_TV: ret = simple_set_tv_freq(fe, params); priv->frequency = params->frequency * 62500; break; } priv->bandwidth = 0; return ret; } static void simple_set_dvb(struct dvb_frontend *fe, u8 *buf, const u32 delsys, const u32 frequency, const u32 bandwidth) { struct tuner_simple_priv *priv = fe->tuner_priv; switch (priv->type) { case TUNER_PHILIPS_FMD1216ME_MK3: case TUNER_PHILIPS_FMD1216MEX_MK3: if (bandwidth == 8000000 && frequency >= 158870000) buf[3] |= 0x08; break; case TUNER_PHILIPS_TD1316: /* determine band */ buf[3] |= (frequency < 161000000) ? 1 : (frequency < 444000000) ? 2 : 4; /* setup PLL filter */ if (bandwidth == 8000000) buf[3] |= 1 << 3; break; case TUNER_PHILIPS_TUV1236D: case TUNER_PHILIPS_FCV1236D: { unsigned int new_rf; if (dtv_input[priv->nr]) new_rf = dtv_input[priv->nr]; else switch (delsys) { case SYS_DVBC_ANNEX_B: new_rf = 1; break; case SYS_ATSC: default: new_rf = 0; break; } simple_set_rf_input(fe, &buf[2], &buf[3], new_rf); break; } default: break; } } static u32 simple_dvb_configure(struct dvb_frontend *fe, u8 *buf, const u32 delsys, const u32 freq, const u32 bw) { /* This function returns the tuned frequency on success, 0 on error */ struct tuner_simple_priv *priv = fe->tuner_priv; struct tunertype *tun = priv->tun; static struct tuner_params *t_params; u8 config, cb; u32 div; int ret; u32 frequency = freq / 62500; if (!tun->stepsize) { /* tuner-core was loaded before the digital tuner was * configured and somehow picked the wrong tuner type */ tuner_err("attempt to treat tuner %d (%s) as digital tuner " "without stepsize defined.\n", priv->type, priv->tun->name); return 0; /* failure */ } t_params = simple_tuner_params(fe, TUNER_PARAM_TYPE_DIGITAL); ret = simple_config_lookup(fe, t_params, &frequency, &config, &cb); if (ret < 0) return 0; /* failure */ div = ((frequency + t_params->iffreq) * 62500 + offset + tun->stepsize/2) / tun->stepsize; buf[0] = div >> 8; buf[1] = div & 0xff; buf[2] = config; buf[3] = cb; simple_set_dvb(fe, buf, delsys, freq, bw); tuner_dbg("%s: div=%d | buf=0x%02x,0x%02x,0x%02x,0x%02x\n", tun->name, div, buf[0], buf[1], buf[2], buf[3]); /* calculate the frequency we set it to */ return (div * tun->stepsize) - t_params->iffreq; } static int simple_dvb_calc_regs(struct dvb_frontend *fe, u8 *buf, int buf_len) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 delsys = c->delivery_system; u32 bw = c->bandwidth_hz; struct tuner_simple_priv *priv = fe->tuner_priv; u32 frequency; if (buf_len < 5) return -EINVAL; frequency = simple_dvb_configure(fe, buf+1, delsys, c->frequency, bw); if (frequency == 0) return -EINVAL; buf[0] = priv->i2c_props.addr; priv->frequency = frequency; priv->bandwidth = c->bandwidth_hz; return 5; } static int simple_dvb_set_params(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 delsys = c->delivery_system; u32 bw = c->bandwidth_hz; u32 freq = c->frequency; struct tuner_simple_priv *priv = fe->tuner_priv; u32 frequency; u32 prev_freq, prev_bw; int ret; u8 buf[5]; if (priv->i2c_props.adap == NULL) return -EINVAL; prev_freq = priv->frequency; prev_bw = priv->bandwidth; frequency = simple_dvb_configure(fe, buf+1, delsys, freq, bw); if (frequency == 0) return -EINVAL; buf[0] = priv->i2c_props.addr; priv->frequency = frequency; priv->bandwidth = bw; /* put analog demod in standby when tuning digital */ if (fe->ops.analog_ops.standby) fe->ops.analog_ops.standby(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); /* buf[0] contains the i2c address, but * * we already have it in i2c_props.addr */ ret = tuner_i2c_xfer_send(&priv->i2c_props, buf+1, 4); if (ret != 4) goto fail; return 0; fail: /* calc_regs sets frequency and bandwidth. if we failed, unset them */ priv->frequency = prev_freq; priv->bandwidth = prev_bw; return ret; } static int simple_init(struct dvb_frontend *fe) { struct tuner_simple_priv *priv = fe->tuner_priv; if (priv->i2c_props.adap == NULL) return -EINVAL; if (priv->tun->initdata) { int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = tuner_i2c_xfer_send(&priv->i2c_props, priv->tun->initdata + 1, priv->tun->initdata[0]); if (ret != priv->tun->initdata[0]) return ret; } return 0; } static int simple_sleep(struct dvb_frontend *fe) { struct tuner_simple_priv *priv = fe->tuner_priv; if (priv->i2c_props.adap == NULL) return -EINVAL; if (priv->tun->sleepdata) { int ret; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); ret = tuner_i2c_xfer_send(&priv->i2c_props, priv->tun->sleepdata + 1, priv->tun->sleepdata[0]); if (ret != priv->tun->sleepdata[0]) return ret; } return 0; } static int simple_release(struct dvb_frontend *fe) { struct tuner_simple_priv *priv = fe->tuner_priv; mutex_lock(&tuner_simple_list_mutex); if (priv) hybrid_tuner_release_state(priv); mutex_unlock(&tuner_simple_list_mutex); fe->tuner_priv = NULL; return 0; } static int simple_get_frequency(struct dvb_frontend *fe, u32 *frequency) { struct tuner_simple_priv *priv = fe->tuner_priv; *frequency = priv->frequency; return 0; } static int simple_get_bandwidth(struct dvb_frontend *fe, u32 *bandwidth) { struct tuner_simple_priv *priv = fe->tuner_priv; *bandwidth = priv->bandwidth; return 0; } static struct dvb_tuner_ops simple_tuner_ops = { .init = simple_init, .sleep = simple_sleep, .set_analog_params = simple_set_params, .set_params = simple_dvb_set_params, .calc_regs = simple_dvb_calc_regs, .release = simple_release, .get_frequency = simple_get_frequency, .get_bandwidth = simple_get_bandwidth, .get_status = simple_get_status, .get_rf_strength = simple_get_rf_strength, }; struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c_adap, u8 i2c_addr, unsigned int type) { struct tuner_simple_priv *priv = NULL; int instance; if (type >= tuner_count) { printk(KERN_WARNING "%s: invalid tuner type: %d (max: %d)\n", __func__, type, tuner_count-1); return NULL; } /* If i2c_adap is set, check that the tuner is at the correct address. * Otherwise, if i2c_adap is NULL, the tuner will be programmed directly * by the digital demod via calc_regs. */ if (i2c_adap != NULL) { u8 b[1]; struct i2c_msg msg = { .addr = i2c_addr, .flags = I2C_M_RD, .buf = b, .len = 1, }; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (1 != i2c_transfer(i2c_adap, &msg, 1)) printk(KERN_WARNING "tuner-simple %d-%04x: " "unable to probe %s, proceeding anyway.", i2c_adapter_id(i2c_adap), i2c_addr, tuners[type].name); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } mutex_lock(&tuner_simple_list_mutex); instance = hybrid_tuner_request_state(struct tuner_simple_priv, priv, hybrid_tuner_instance_list, i2c_adap, i2c_addr, "tuner-simple"); switch (instance) { case 0: mutex_unlock(&tuner_simple_list_mutex); return NULL; case 1: fe->tuner_priv = priv; priv->type = type; priv->tun = &tuners[type]; priv->nr = simple_devcount++; break; default: fe->tuner_priv = priv; break; } mutex_unlock(&tuner_simple_list_mutex); memcpy(&fe->ops.tuner_ops, &simple_tuner_ops, sizeof(struct dvb_tuner_ops)); if (type != priv->type) tuner_warn("couldn't set type to %d. Using %d (%s) instead\n", type, priv->type, priv->tun->name); else tuner_info("type set to %d (%s)\n", priv->type, priv->tun->name); if ((debug) || ((atv_input[priv->nr] > 0) || (dtv_input[priv->nr] > 0))) { if (0 == atv_input[priv->nr]) tuner_info("tuner %d atv rf input will be " "autoselected\n", priv->nr); else tuner_info("tuner %d atv rf input will be " "set to input %d (insmod option)\n", priv->nr, atv_input[priv->nr]); if (0 == dtv_input[priv->nr]) tuner_info("tuner %d dtv rf input will be " "autoselected\n", priv->nr); else tuner_info("tuner %d dtv rf input will be " "set to input %d (insmod option)\n", priv->nr, dtv_input[priv->nr]); } strlcpy(fe->ops.tuner_ops.info.name, priv->tun->name, sizeof(fe->ops.tuner_ops.info.name)); return fe; } EXPORT_SYMBOL_GPL(simple_tuner_attach); MODULE_DESCRIPTION("Simple 4-control-bytes style tuner driver"); MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); MODULE_LICENSE("GPL"); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
computersforpeace/UBIFS-backports
drivers/media/common/tuners/tuner-simple.c
C
gpl-2.0
29,469
/* sun3lance.c: Ethernet driver for SUN3 Lance chip */ /* Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net). This driver is a part of the linux kernel, and is thus distributed under the GNU General Public License. The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically true for the correct IRQ and address of the lance registers. They have not been widely tested, however. What we probably need is a "proper" way to search for a device in the sun3's prom, but, alas, linux has no such thing. This driver is largely based on atarilance.c, by Roman Hodek. Other sources of inspiration were the NetBSD sun3 am7990 driver, and the linux sparc lance driver (sunlance.c). There are more assumptions made throughout this driver, it almost certainly still needs work, but it does work at least for RARP/BOOTP and mounting the root NFS filesystem. */ static char *version = "sun3lance.c: v1.2 1/12/2001 Sam Creasey (sammy@sammy.net)\n"; #include <linux/module.h> #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <asm/cacheflush.h> #include <asm/setup.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/dvma.h> #include <asm/idprom.h> #include <asm/machines.h> #ifdef CONFIG_SUN3 #include <asm/sun3mmu.h> #else #include <asm/sun3xprom.h> #endif /* sun3/60 addr/irq for the lance chip. If your sun is different, change this. */ #define LANCE_OBIO 0x120000 #define LANCE_IRQ IRQ_AUTO_3 /* Debug level: * 0 = silent, print only serious errors * 1 = normal, print error messages * 2 = debug, print debug infos * 3 = debug, print even more debug infos (packet data) */ #define LANCE_DEBUG 0 #ifdef LANCE_DEBUG static int lance_debug = LANCE_DEBUG; #else static int lance_debug = 1; #endif module_param(lance_debug, int, 0); MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)"); MODULE_LICENSE("GPL"); #define DPRINTK(n,a) \ do { \ if (lance_debug >= n) \ printk a; \ } while( 0 ) /* we're only using 32k of memory, so we use 4 TX buffers and 16 RX buffers. These values are expressed as log2. */ #define TX_LOG_RING_SIZE 3 #define RX_LOG_RING_SIZE 5 /* These are the derived values */ #define TX_RING_SIZE (1 << TX_LOG_RING_SIZE) #define TX_RING_LEN_BITS (TX_LOG_RING_SIZE << 5) #define TX_RING_MOD_MASK (TX_RING_SIZE - 1) #define RX_RING_SIZE (1 << RX_LOG_RING_SIZE) #define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) #define RX_RING_MOD_MASK (RX_RING_SIZE - 1) /* Definitions for packet buffer access: */ #define PKT_BUF_SZ 1544 /* Get the address of a packet buffer corresponding to a given buffer head */ #define PKTBUF_ADDR(head) (void *)((unsigned long)(MEM) | (head)->base) /* The LANCE Rx and Tx ring descriptors. */ struct lance_rx_head { unsigned short base; /* Low word of base addr */ volatile unsigned char flag; unsigned char base_hi; /* High word of base addr (unused) */ short buf_length; /* This length is 2s complement! */ volatile short msg_length; /* This length is "normal". */ }; struct lance_tx_head { unsigned short base; /* Low word of base addr */ volatile unsigned char flag; unsigned char base_hi; /* High word of base addr (unused) */ short length; /* Length is 2s complement! */ volatile short misc; }; /* The LANCE initialization block, described in databook. */ struct lance_init_block { unsigned short mode; /* Pre-set mode */ unsigned char hwaddr[6]; /* Physical ethernet address */ unsigned int filter[2]; /* Multicast filter (unused). */ /* Receive and transmit ring base, along with length bits. */ unsigned short rdra; unsigned short rlen; unsigned short tdra; unsigned short tlen; unsigned short pad[4]; /* is thie needed? */ }; /* The whole layout of the Lance shared memory */ struct lance_memory { struct lance_init_block init; struct lance_tx_head tx_head[TX_RING_SIZE]; struct lance_rx_head rx_head[RX_RING_SIZE]; char rx_data[RX_RING_SIZE][PKT_BUF_SZ]; char tx_data[TX_RING_SIZE][PKT_BUF_SZ]; }; /* The driver's private device structure */ struct lance_private { volatile unsigned short *iobase; struct lance_memory *mem; int new_rx, new_tx; /* The next free ring entry */ int old_tx, old_rx; /* ring entry to be processed */ /* These two must be longs for set_bit() */ long tx_full; long lock; }; /* I/O register access macros */ #define MEM lp->mem #define DREG lp->iobase[0] #define AREG lp->iobase[1] #define REGA(a) (*( AREG = (a), &DREG )) /* Definitions for the Lance */ /* tx_head flags */ #define TMD1_ENP 0x01 /* end of packet */ #define TMD1_STP 0x02 /* start of packet */ #define TMD1_DEF 0x04 /* deferred */ #define TMD1_ONE 0x08 /* one retry needed */ #define TMD1_MORE 0x10 /* more than one retry needed */ #define TMD1_ERR 0x40 /* error summary */ #define TMD1_OWN 0x80 /* ownership (set: chip owns) */ #define TMD1_OWN_CHIP TMD1_OWN #define TMD1_OWN_HOST 0 /* tx_head misc field */ #define TMD3_TDR 0x03FF /* Time Domain Reflectometry counter */ #define TMD3_RTRY 0x0400 /* failed after 16 retries */ #define TMD3_LCAR 0x0800 /* carrier lost */ #define TMD3_LCOL 0x1000 /* late collision */ #define TMD3_UFLO 0x4000 /* underflow (late memory) */ #define TMD3_BUFF 0x8000 /* buffering error (no ENP) */ /* rx_head flags */ #define RMD1_ENP 0x01 /* end of packet */ #define RMD1_STP 0x02 /* start of packet */ #define RMD1_BUFF 0x04 /* buffer error */ #define RMD1_CRC 0x08 /* CRC error */ #define RMD1_OFLO 0x10 /* overflow */ #define RMD1_FRAM 0x20 /* framing error */ #define RMD1_ERR 0x40 /* error summary */ #define RMD1_OWN 0x80 /* ownership (set: ship owns) */ #define RMD1_OWN_CHIP RMD1_OWN #define RMD1_OWN_HOST 0 /* register names */ #define CSR0 0 /* mode/status */ #define CSR1 1 /* init block addr (low) */ #define CSR2 2 /* init block addr (high) */ #define CSR3 3 /* misc */ #define CSR8 8 /* address filter */ #define CSR15 15 /* promiscuous mode */ /* CSR0 */ /* (R=readable, W=writeable, S=set on write, C=clear on write) */ #define CSR0_INIT 0x0001 /* initialize (RS) */ #define CSR0_STRT 0x0002 /* start (RS) */ #define CSR0_STOP 0x0004 /* stop (RS) */ #define CSR0_TDMD 0x0008 /* transmit demand (RS) */ #define CSR0_TXON 0x0010 /* transmitter on (R) */ #define CSR0_RXON 0x0020 /* receiver on (R) */ #define CSR0_INEA 0x0040 /* interrupt enable (RW) */ #define CSR0_INTR 0x0080 /* interrupt active (R) */ #define CSR0_IDON 0x0100 /* initialization done (RC) */ #define CSR0_TINT 0x0200 /* transmitter interrupt (RC) */ #define CSR0_RINT 0x0400 /* receiver interrupt (RC) */ #define CSR0_MERR 0x0800 /* memory error (RC) */ #define CSR0_MISS 0x1000 /* missed frame (RC) */ #define CSR0_CERR 0x2000 /* carrier error (no heartbeat :-) (RC) */ #define CSR0_BABL 0x4000 /* babble: tx-ed too many bits (RC) */ #define CSR0_ERR 0x8000 /* error (RC) */ /* CSR3 */ #define CSR3_BCON 0x0001 /* byte control */ #define CSR3_ACON 0x0002 /* ALE control */ #define CSR3_BSWP 0x0004 /* byte swap (1=big endian) */ /***************************** Prototypes *****************************/ static int lance_probe( struct net_device *dev); static int lance_open( struct net_device *dev ); static void lance_init_ring( struct net_device *dev ); static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ); static irqreturn_t lance_interrupt( int irq, void *dev_id); static int lance_rx( struct net_device *dev ); static int lance_close( struct net_device *dev ); static void set_multicast_list( struct net_device *dev ); /************************* End of Prototypes **************************/ struct net_device * __init sun3lance_probe(int unit) { struct net_device *dev; static int found; int err = -ENODEV; if (!MACH_IS_SUN3 && !MACH_IS_SUN3X) return ERR_PTR(-ENODEV); /* check that this machine has an onboard lance */ switch(idprom->id_machtype) { case SM_SUN3|SM_3_50: case SM_SUN3|SM_3_60: case SM_SUN3X|SM_3_80: /* these machines have lance */ break; default: return ERR_PTR(-ENODEV); } if (found) return ERR_PTR(-ENODEV); dev = alloc_etherdev(sizeof(struct lance_private)); if (!dev) return ERR_PTR(-ENOMEM); if (unit >= 0) { sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); } if (!lance_probe(dev)) goto out; err = register_netdev(dev); if (err) goto out1; found = 1; return dev; out1: #ifdef CONFIG_SUN3 iounmap((void __iomem *)dev->base_addr); #endif out: free_netdev(dev); return ERR_PTR(err); } static const struct net_device_ops lance_netdev_ops = { .ndo_open = lance_open, .ndo_stop = lance_close, .ndo_start_xmit = lance_start_xmit, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = NULL, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int __init lance_probe( struct net_device *dev) { unsigned long ioaddr; struct lance_private *lp; int i; static int did_version; volatile unsigned short *ioaddr_probe; unsigned short tmp1, tmp2; #ifdef CONFIG_SUN3 ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE); if (!ioaddr) return 0; #else ioaddr = SUN3X_LANCE; #endif /* test to see if there's really a lance here */ /* (CSRO_INIT shouldn't be readable) */ ioaddr_probe = (volatile unsigned short *)ioaddr; tmp1 = ioaddr_probe[0]; tmp2 = ioaddr_probe[1]; ioaddr_probe[1] = CSR0; ioaddr_probe[0] = CSR0_INIT | CSR0_STOP; if(ioaddr_probe[0] != CSR0_STOP) { ioaddr_probe[0] = tmp1; ioaddr_probe[1] = tmp2; #ifdef CONFIG_SUN3 iounmap((void __iomem *)ioaddr); #endif return 0; } lp = netdev_priv(dev); /* XXX - leak? */ MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000); if (MEM == NULL) { #ifdef CONFIG_SUN3 iounmap((void __iomem *)ioaddr); #endif printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n"); return 0; } lp->iobase = (volatile unsigned short *)ioaddr; dev->base_addr = (unsigned long)ioaddr; /* informational only */ REGA(CSR0) = CSR0_STOP; if (request_irq(LANCE_IRQ, lance_interrupt, IRQF_DISABLED, "SUN3 Lance", dev) < 0) { #ifdef CONFIG_SUN3 iounmap((void __iomem *)ioaddr); #endif dvma_free((void *)MEM); printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n"); return 0; } dev->irq = (unsigned short)LANCE_IRQ; printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ", dev->name, (unsigned long)ioaddr, (unsigned long)MEM, dev->irq); /* copy in the ethernet address from the prom */ for(i = 0; i < 6 ; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* tell the card it's ether address, bytes swapped */ MEM->init.hwaddr[0] = dev->dev_addr[1]; MEM->init.hwaddr[1] = dev->dev_addr[0]; MEM->init.hwaddr[2] = dev->dev_addr[3]; MEM->init.hwaddr[3] = dev->dev_addr[2]; MEM->init.hwaddr[4] = dev->dev_addr[5]; MEM->init.hwaddr[5] = dev->dev_addr[4]; printk("%pM\n", dev->dev_addr); MEM->init.mode = 0x0000; MEM->init.filter[0] = 0x00000000; MEM->init.filter[1] = 0x00000000; MEM->init.rdra = dvma_vtob(MEM->rx_head); MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | (dvma_vtob(MEM->rx_head) >> 16); MEM->init.tdra = dvma_vtob(MEM->tx_head); MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | (dvma_vtob(MEM->tx_head) >> 16); DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n", dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head), (dvma_vtob(MEM->tx_head)))); if (did_version++ == 0) printk( version ); dev->netdev_ops = &lance_netdev_ops; // KLUDGE -- REMOVE ME set_bit(__LINK_STATE_PRESENT, &dev->state); return 1; } static int lance_open( struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); int i; DPRINTK( 2, ( "%s: lance_open()\n", dev->name )); REGA(CSR0) = CSR0_STOP; lance_init_ring(dev); /* From now on, AREG is kept to point to CSR0 */ REGA(CSR0) = CSR0_INIT; i = 1000000; while (--i > 0) if (DREG & CSR0_IDON) break; if (i <= 0 || (DREG & CSR0_ERR)) { DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", dev->name, i, DREG )); DREG = CSR0_STOP; return -EIO; } DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA; netif_start_queue(dev); DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG )); return 0; } /* Initialize the LANCE Rx and Tx rings. */ static void lance_init_ring( struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); int i; lp->lock = 0; lp->tx_full = 0; lp->new_rx = lp->new_tx = 0; lp->old_rx = lp->old_tx = 0; for( i = 0; i < TX_RING_SIZE; i++ ) { MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]); MEM->tx_head[i].flag = 0; MEM->tx_head[i].base_hi = (dvma_vtob(MEM->tx_data[i])) >>16; MEM->tx_head[i].length = 0; MEM->tx_head[i].misc = 0; } for( i = 0; i < RX_RING_SIZE; i++ ) { MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]); MEM->rx_head[i].flag = RMD1_OWN_CHIP; MEM->rx_head[i].base_hi = (dvma_vtob(MEM->rx_data[i])) >> 16; MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000; MEM->rx_head[i].msg_length = 0; } /* tell the card it's ether address, bytes swapped */ MEM->init.hwaddr[0] = dev->dev_addr[1]; MEM->init.hwaddr[1] = dev->dev_addr[0]; MEM->init.hwaddr[2] = dev->dev_addr[3]; MEM->init.hwaddr[3] = dev->dev_addr[2]; MEM->init.hwaddr[4] = dev->dev_addr[5]; MEM->init.hwaddr[5] = dev->dev_addr[4]; MEM->init.mode = 0x0000; MEM->init.filter[0] = 0x00000000; MEM->init.filter[1] = 0x00000000; MEM->init.rdra = dvma_vtob(MEM->rx_head); MEM->init.rlen = (RX_LOG_RING_SIZE << 13) | (dvma_vtob(MEM->rx_head) >> 16); MEM->init.tdra = dvma_vtob(MEM->tx_head); MEM->init.tlen = (TX_LOG_RING_SIZE << 13) | (dvma_vtob(MEM->tx_head) >> 16); /* tell the lance the address of its init block */ REGA(CSR1) = dvma_vtob(&(MEM->init)); REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16; #ifdef CONFIG_SUN3X REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON; #else REGA(CSR3) = CSR3_BSWP; #endif } static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); int entry, len; struct lance_tx_head *head; unsigned long flags; DPRINTK( 1, ( "%s: transmit start.\n", dev->name)); /* Transmitter timeout, serious problems. */ if (netif_queue_stopped(dev)) { int tickssofar = jiffies - dev_trans_start(dev); if (tickssofar < HZ/5) return NETDEV_TX_BUSY; DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", dev->name, DREG )); DREG = CSR0_STOP; /* * Always set BSWP after a STOP as STOP puts it back into * little endian mode. */ REGA(CSR3) = CSR3_BSWP; dev->stats.tx_errors++; if(lance_debug >= 2) { int i; printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n", lp->old_tx, lp->new_tx, lp->tx_full ? " (full)" : "", lp->new_rx ); for( i = 0 ; i < RX_RING_SIZE; i++ ) printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n", i, MEM->rx_head[i].base, -MEM->rx_head[i].buf_length, MEM->rx_head[i].msg_length); for( i = 0 ; i < TX_RING_SIZE; i++ ) printk("tx #%d: base=%04x len=%04x misc=%04x\n", i, MEM->tx_head[i].base, -MEM->tx_head[i].length, MEM->tx_head[i].misc ); } lance_init_ring(dev); REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; netif_start_queue(dev); return NETDEV_TX_OK; } /* Block a timer-based transmit from overlapping. This could better be done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */ /* Block a timer-based transmit from overlapping with us by stopping the queue for a bit... */ netif_stop_queue(dev); if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) { printk( "%s: tx queue lock!.\n", dev->name); /* don't clear dev->tbusy flag. */ return NETDEV_TX_BUSY; } AREG = CSR0; DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name, DREG )); #ifdef CONFIG_SUN3X /* this weirdness doesn't appear on sun3... */ if(!(DREG & CSR0_INIT)) { DPRINTK( 1, ("INIT not set, reinitializing...\n")); REGA( CSR0 ) = CSR0_STOP; lance_init_ring(dev); REGA( CSR0 ) = CSR0_INIT | CSR0_STRT; } #endif /* Fill in a Tx ring entry */ #if 0 if (lance_debug >= 2) { printk( "%s: TX pkt %d type 0x%04x" " from %s to %s" " data at 0x%08x len %d\n", dev->name, lp->new_tx, ((u_short *)skb->data)[6], DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data), (int)skb->data, (int)skb->len ); } #endif /* We're not prepared for the int until the last flags are set/reset. * And the int may happen already after setting the OWN_CHIP... */ local_irq_save(flags); /* Mask to ring buffer boundary. */ entry = lp->new_tx; head = &(MEM->tx_head[entry]); /* Caution: the write order is important here, set the "ownership" bits * last. */ /* the sun3's lance needs it's buffer padded to the minimum size */ len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; // head->length = -len; head->length = (-len) | 0xf000; head->misc = 0; skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len); if (len != skb->len) memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK; dev->stats.tx_bytes += skb->len; /* Trigger an immediate send poll. */ REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT; AREG = CSR0; DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n", dev->name, DREG )); dev_kfree_skb(skb); lp->lock = 0; if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == TMD1_OWN_HOST) netif_start_queue(dev); local_irq_restore(flags); return NETDEV_TX_OK; } /* The LANCE interrupt handler. */ static irqreturn_t lance_interrupt( int irq, void *dev_id) { struct net_device *dev = dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; static int in_interrupt; if (dev == NULL) { DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" )); return IRQ_NONE; } if (in_interrupt) DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name )); in_interrupt = 1; still_more: flush_cache_all(); AREG = CSR0; csr0 = DREG; /* ack interrupts */ DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON); /* clear errors */ if(csr0 & CSR0_ERR) DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS; DPRINTK( 2, ( "%s: interrupt csr0=%04x new csr=%04x.\n", dev->name, csr0, DREG )); if (csr0 & CSR0_TINT) { /* Tx-done interrupt */ int old_tx = lp->old_tx; // if(lance_debug >= 3) { // int i; // // printk("%s: tx int\n", dev->name); // // for(i = 0; i < TX_RING_SIZE; i++) // printk("ring %d flag=%04x\n", i, // MEM->tx_head[i].flag); // } while( old_tx != lp->new_tx) { struct lance_tx_head *head = &(MEM->tx_head[old_tx]); DPRINTK(3, ("on tx_ring %d\n", old_tx)); if (head->flag & TMD1_OWN_CHIP) break; /* It still hasn't been Txed */ if (head->flag & TMD1_ERR) { int status = head->misc; dev->stats.tx_errors++; if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++; if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++; if (status & TMD3_LCOL) dev->stats.tx_window_errors++; if (status & (TMD3_UFLO | TMD3_BUFF)) { dev->stats.tx_fifo_errors++; printk("%s: Tx FIFO error\n", dev->name); REGA(CSR0) = CSR0_STOP; REGA(CSR3) = CSR3_BSWP; lance_init_ring(dev); REGA(CSR0) = CSR0_STRT | CSR0_INEA; return IRQ_HANDLED; } } else if(head->flag & (TMD1_ENP | TMD1_STP)) { head->flag &= ~(TMD1_ENP | TMD1_STP); if(head->flag & (TMD1_ONE | TMD1_MORE)) dev->stats.collisions++; dev->stats.tx_packets++; DPRINTK(3, ("cleared tx ring %d\n", old_tx)); } old_tx = (old_tx +1) & TX_RING_MOD_MASK; } lp->old_tx = old_tx; } if (netif_queue_stopped(dev)) { /* The ring is no longer full, clear tbusy. */ netif_start_queue(dev); netif_wake_queue(dev); } if (csr0 & CSR0_RINT) /* Rx interrupt */ lance_rx( dev ); /* Log misc errors. */ if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & CSR0_MERR) { DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), " "status %04x.\n", dev->name, csr0 )); /* Restart the chip. */ REGA(CSR0) = CSR0_STOP; REGA(CSR3) = CSR3_BSWP; lance_init_ring(dev); REGA(CSR0) = CSR0_STRT | CSR0_INEA; } /* Clear any other interrupt, and set interrupt enable. */ // DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR | // CSR0_IDON | CSR0_INEA; REGA(CSR0) = CSR0_INEA; if(DREG & (CSR0_RINT | CSR0_TINT)) { DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG)); goto still_more; } DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n", dev->name, DREG )); in_interrupt = 0; return IRQ_HANDLED; } /* get packet, toss into skbuff */ static int lance_rx( struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); int entry = lp->new_rx; /* If we own the next entry, it's a new packet. Send it up. */ while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) { struct lance_rx_head *head = &(MEM->rx_head[entry]); int status = head->flag; if (status != (RMD1_ENP|RMD1_STP)) { /* There was an error. */ /* There is a tricky error noted by John Murphy, <murf@perftech.com> to Russ Nelson: Even with full-sized buffers it's possible for a jabber packet to use two buffers, with only the last correctly noting the error. */ if (status & RMD1_ENP) /* Only count a general error at the */ dev->stats.rx_errors++; /* end of a packet.*/ if (status & RMD1_FRAM) dev->stats.rx_frame_errors++; if (status & RMD1_OFLO) dev->stats.rx_over_errors++; if (status & RMD1_CRC) dev->stats.rx_crc_errors++; if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++; head->flag &= (RMD1_ENP|RMD1_STP); } else { /* Malloc up new buffer, compatible with net-3. */ // short pkt_len = head->msg_length;// & 0xfff; short pkt_len = (head->msg_length & 0xfff) - 4; struct sk_buff *skb; if (pkt_len < 60) { printk( "%s: Runt packet!\n", dev->name ); dev->stats.rx_errors++; } else { skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) { DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n", dev->name )); dev->stats.rx_dropped++; head->msg_length = 0; head->flag |= RMD1_OWN_CHIP; lp->new_rx = (lp->new_rx+1) & RX_RING_MOD_MASK; } #if 0 if (lance_debug >= 3) { u_char *data = PKTBUF_ADDR(head); printk("%s: RX pkt %d type 0x%04x" " from %pM to %pM", dev->name, lp->new_tx, ((u_short *)data)[6], &data[6], data); printk(" data %02x %02x %02x %02x %02x %02x %02x %02x " "len %d at %08x\n", data[15], data[16], data[17], data[18], data[19], data[20], data[21], data[22], pkt_len, data); } #endif if (lance_debug >= 3) { u_char *data = PKTBUF_ADDR(head); printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len); } skb_reserve( skb, 2 ); /* 16 byte align */ skb_put( skb, pkt_len ); /* Make room */ skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); skb->protocol = eth_type_trans( skb, dev ); netif_rx( skb ); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } } // head->buf_length = -PKT_BUF_SZ | 0xf000; head->msg_length = 0; head->flag = RMD1_OWN_CHIP; entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK; } /* From lance.c (Donald Becker): */ /* We should check that at least two ring entries are free. If not, we should free one and mark stats->rx_dropped++. */ return 0; } static int lance_close( struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue(dev); AREG = CSR0; DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n", dev->name, DREG )); /* We stop the LANCE here -- it occasionally polls memory if we don't. */ DREG = CSR0_STOP; return 0; } /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list num_addrs > 0 Multicast mode, receive normal and MC packets, and do best-effort filtering. */ /* completely untested on a sun3 */ static void set_multicast_list( struct net_device *dev ) { struct lance_private *lp = netdev_priv(dev); if(netif_queue_stopped(dev)) /* Only possible if board is already started */ return; /* We take the simple way out and always enable promiscuous mode. */ DREG = CSR0_STOP; /* Temporarily stop the lance. */ if (dev->flags & IFF_PROMISC) { /* Log any net taps. */ DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name )); REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */ } else { short multicast_table[4]; int num_addrs = netdev_mc_count(dev); int i; /* We don't use the multicast table, but rely on upper-layer * filtering. */ memset( multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table) ); for( i = 0; i < 4; i++ ) REGA( CSR8+i ) = multicast_table[i]; REGA( CSR15 ) = 0; /* Unset promiscuous mode */ } /* * Always set BSWP after a STOP as STOP puts it back into * little endian mode. */ REGA( CSR3 ) = CSR3_BSWP; /* Resume normal operation and reset AREG to CSR0 */ REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT; } #ifdef MODULE static struct net_device *sun3lance_dev; int __init init_module(void) { sun3lance_dev = sun3lance_probe(-1); if (IS_ERR(sun3lance_dev)) return PTR_ERR(sun3lance_dev); return 0; } void __exit cleanup_module(void) { unregister_netdev(sun3lance_dev); #ifdef CONFIG_SUN3 iounmap((void __iomem *)sun3lance_dev->base_addr); #endif free_netdev(sun3lance_dev); } #endif /* MODULE */
MoKee/android_kernel_motorola_msm8960dt-common
drivers/net/ethernet/amd/sun3lance.c
C
gpl-2.0
26,355
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "NQ", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .base = 1, .ngpio = 7, }, .pddr = (void __iomem *)MCFEPORT_EPDDR, .podr = (void __iomem *)MCFEPORT_EPDR, .ppdr = (void __iomem *)MCFEPORT_EPPDR, }, { .gpio_chip = { .label = "TA", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 8, .ngpio = 4, }, .pddr = (void __iomem *)MCFGPTA_GPTDDR, .podr = (void __iomem *)MCFGPTA_GPTPORT, .ppdr = (void __iomem *)MCFGPTB_GPTPORT, }, { .gpio_chip = { .label = "TB", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 16, .ngpio = 4, }, .pddr = (void __iomem *)MCFGPTB_GPTDDR, .podr = (void __iomem *)MCFGPTB_GPTPORT, .ppdr = (void __iomem *)MCFGPTB_GPTPORT, }, { .gpio_chip = { .label = "QA", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 24, .ngpio = 4, }, .pddr = (void __iomem *)MCFQADC_DDRQA, .podr = (void __iomem *)MCFQADC_PORTQA, .ppdr = (void __iomem *)MCFQADC_PORTQA, }, { .gpio_chip = { .label = "QB", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 32, .ngpio = 4, }, .pddr = (void __iomem *)MCFQADC_DDRQB, .podr = (void __iomem *)MCFQADC_PORTQB, .ppdr = (void __iomem *)MCFQADC_PORTQB, }, { .gpio_chip = { .label = "A", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 40, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRA, .podr = (void __iomem *)MCFGPIO_PORTA, .ppdr = (void __iomem *)MCFGPIO_PORTAP, .setr = (void __iomem *)MCFGPIO_SETA, .clrr = (void __iomem *)MCFGPIO_CLRA, }, { .gpio_chip = { .label = "B", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 48, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRB, .podr = (void __iomem *)MCFGPIO_PORTB, .ppdr = (void __iomem *)MCFGPIO_PORTBP, .setr = (void __iomem *)MCFGPIO_SETB, .clrr = (void __iomem *)MCFGPIO_CLRB, }, { .gpio_chip = { .label = "C", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 56, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRC, .podr = (void __iomem *)MCFGPIO_PORTC, .ppdr = (void __iomem *)MCFGPIO_PORTCP, .setr = (void __iomem *)MCFGPIO_SETC, .clrr = (void __iomem *)MCFGPIO_CLRC, }, { .gpio_chip = { .label = "D", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 64, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRD, .podr = (void __iomem *)MCFGPIO_PORTD, .ppdr = (void __iomem *)MCFGPIO_PORTDP, .setr = (void __iomem *)MCFGPIO_SETD, .clrr = (void __iomem *)MCFGPIO_CLRD, }, { .gpio_chip = { .label = "E", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 72, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRE, .podr = (void __iomem *)MCFGPIO_PORTE, .ppdr = (void __iomem *)MCFGPIO_PORTEP, .setr = (void __iomem *)MCFGPIO_SETE, .clrr = (void __iomem *)MCFGPIO_CLRE, }, { .gpio_chip = { .label = "F", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 80, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRF, .podr = (void __iomem *)MCFGPIO_PORTF, .ppdr = (void __iomem *)MCFGPIO_PORTFP, .setr = (void __iomem *)MCFGPIO_SETF, .clrr = (void __iomem *)MCFGPIO_CLRF, }, { .gpio_chip = { .label = "G", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 88, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRG, .podr = (void __iomem *)MCFGPIO_PORTG, .ppdr = (void __iomem *)MCFGPIO_PORTGP, .setr = (void __iomem *)MCFGPIO_SETG, .clrr = (void __iomem *)MCFGPIO_CLRG, }, { .gpio_chip = { .label = "H", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 96, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRH, .podr = (void __iomem *)MCFGPIO_PORTH, .ppdr = (void __iomem *)MCFGPIO_PORTHP, .setr = (void __iomem *)MCFGPIO_SETH, .clrr = (void __iomem *)MCFGPIO_CLRH, }, { .gpio_chip = { .label = "J", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 104, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRJ, .podr = (void __iomem *)MCFGPIO_PORTJ, .ppdr = (void __iomem *)MCFGPIO_PORTJP, .setr = (void __iomem *)MCFGPIO_SETJ, .clrr = (void __iomem *)MCFGPIO_CLRJ, }, { .gpio_chip = { .label = "DD", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 112, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDRDD, .podr = (void __iomem *)MCFGPIO_PORTDD, .ppdr = (void __iomem *)MCFGPIO_PORTDDP, .setr = (void __iomem *)MCFGPIO_SETDD, .clrr = (void __iomem *)MCFGPIO_CLRDD, }, { .gpio_chip = { .label = "EH", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 120, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDREH, .podr = (void __iomem *)MCFGPIO_PORTEH, .ppdr = (void __iomem *)MCFGPIO_PORTEHP, .setr = (void __iomem *)MCFGPIO_SETEH, .clrr = (void __iomem *)MCFGPIO_CLREH, }, { .gpio_chip = { .label = "EL", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 128, .ngpio = 8, }, .pddr = (void __iomem *)MCFGPIO_DDREL, .podr = (void __iomem *)MCFGPIO_PORTEL, .ppdr = (void __iomem *)MCFGPIO_PORTELP, .setr = (void __iomem *)MCFGPIO_SETEL, .clrr = (void __iomem *)MCFGPIO_CLREL, }, { .gpio_chip = { .label = "AS", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 136, .ngpio = 6, }, .pddr = (void __iomem *)MCFGPIO_DDRAS, .podr = (void __iomem *)MCFGPIO_PORTAS, .ppdr = (void __iomem *)MCFGPIO_PORTASP, .setr = (void __iomem *)MCFGPIO_SETAS, .clrr = (void __iomem *)MCFGPIO_CLRAS, }, { .gpio_chip = { .label = "QS", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 144, .ngpio = 7, }, .pddr = (void __iomem *)MCFGPIO_DDRQS, .podr = (void __iomem *)MCFGPIO_PORTQS, .ppdr = (void __iomem *)MCFGPIO_PORTQSP, .setr = (void __iomem *)MCFGPIO_SETQS, .clrr = (void __iomem *)MCFGPIO_CLRQS, }, { .gpio_chip = { .label = "SD", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 152, .ngpio = 6, }, .pddr = (void __iomem *)MCFGPIO_DDRSD, .podr = (void __iomem *)MCFGPIO_PORTSD, .ppdr = (void __iomem *)MCFGPIO_PORTSDP, .setr = (void __iomem *)MCFGPIO_SETSD, .clrr = (void __iomem *)MCFGPIO_CLRSD, }, { .gpio_chip = { .label = "TC", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 160, .ngpio = 4, }, .pddr = (void __iomem *)MCFGPIO_DDRTC, .podr = (void __iomem *)MCFGPIO_PORTTC, .ppdr = (void __iomem *)MCFGPIO_PORTTCP, .setr = (void __iomem *)MCFGPIO_SETTC, .clrr = (void __iomem *)MCFGPIO_CLRTC, }, { .gpio_chip = { .label = "TD", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 168, .ngpio = 4, }, .pddr = (void __iomem *)MCFGPIO_DDRTD, .podr = (void __iomem *)MCFGPIO_PORTTD, .ppdr = (void __iomem *)MCFGPIO_PORTTDP, .setr = (void __iomem *)MCFGPIO_SETTD, .clrr = (void __iomem *)MCFGPIO_CLRTD, }, { .gpio_chip = { .label = "UA", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value_fast, .base = 176, .ngpio = 4, }, .pddr = (void __iomem *)MCFGPIO_DDRUA, .podr = (void __iomem *)MCFGPIO_PORTUA, .ppdr = (void __iomem *)MCFGPIO_PORTUAP, .setr = (void __iomem *)MCFGPIO_SETUA, .clrr = (void __iomem *)MCFGPIO_CLRUA, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
broodplank/samsung-kernel-jfltexx
arch/m68k/platform/528x/gpio.c
C
gpl-2.0
12,775
/* * Coldfire generic GPIO support * * (C) Copyright 2009, Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> #include <asm/mcfgpio.h> static struct mcf_gpio_chip mcf_gpio_chips[] = { { .gpio_chip = { .label = "GPIO0", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .ngpio = 32, }, .pddr = (void __iomem *) MCFSIM2_GPIOENABLE, .podr = (void __iomem *) MCFSIM2_GPIOWRITE, .ppdr = (void __iomem *) MCFSIM2_GPIOREAD, }, { .gpio_chip = { .label = "GPIO1", .request = mcf_gpio_request, .free = mcf_gpio_free, .direction_input = mcf_gpio_direction_input, .direction_output = mcf_gpio_direction_output, .get = mcf_gpio_get_value, .set = mcf_gpio_set_value, .base = 32, .ngpio = 32, }, .pddr = (void __iomem *) MCFSIM2_GPIO1ENABLE, .podr = (void __iomem *) MCFSIM2_GPIO1WRITE, .ppdr = (void __iomem *) MCFSIM2_GPIO1READ, }, }; static int __init mcf_gpio_init(void) { unsigned i = 0; while (i < ARRAY_SIZE(mcf_gpio_chips)) (void)gpiochip_add((struct gpio_chip *)&mcf_gpio_chips[i++]); return 0; } core_initcall(mcf_gpio_init);
crimsonthunder/morfic_n5
arch/m68k/platform/5249/gpio.c
C
gpl-2.0
1,841
/* * Ftrace support for Microblaze. * * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2009 PetaLogix * * Based on MIPS and PowerPC ftrace code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <asm/cacheflush.h> #include <linux/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; int faulted, err; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; /* * Protect against fault, even if it shouldn't * happen. This tool is too much intrusive to * ignore such a protection. */ asm volatile(" 1: lwi %0, %2, 0; \ 2: swi %3, %2, 0; \ addik %1, r0, 0; \ 3: \ .section .fixup, \"ax\"; \ 4: brid 3b; \ addik %1, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b; \ .word 2b,4b; \ .previous;" \ : "=&r" (old), "=r" (faulted) : "r" (parent), "r" (return_hooker) ); flush_dcache_range((u32)parent, (u32)parent + 4); flush_icache_range((u32)parent, (u32)parent + 4); if (unlikely(faulted)) { ftrace_graph_stop(); WARN_ON(1); return; } err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); if (err == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE /* save value to addr - it is save to do it in asm */ static int ftrace_modify_code(unsigned long addr, unsigned int value) { int faulted = 0; __asm__ __volatile__(" 1: swi %2, %1, 0; \ addik %0, r0, 0; \ 2: \ .section .fixup, \"ax\"; \ 3: brid 2b; \ addik %0, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,3b; \ .previous;" \ : "=r" (faulted) : "r" (addr), "r" (value) ); if (unlikely(faulted)) return -EFAULT; flush_dcache_range(addr, addr + 4); flush_icache_range(addr, addr + 4); return 0; } #define MICROBLAZE_NOP 0x80000000 #define MICROBLAZE_BRI 0xb800000C static unsigned int recorded; /* if save was or not */ static unsigned int imm; /* saving whole imm instruction */ /* There are two approaches howto solve ftrace_make nop function - look below */ #undef USE_FTRACE_NOP #ifdef USE_FTRACE_NOP static unsigned int bralid; /* saving whole bralid instruction */ #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* we have this part of code which we are working with * b000c000 imm -16384 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * * The first solution (!USE_FTRACE_NOP-could be called branch solution) * b000c000 bri 12 (0xC - jump to any other instruction) * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * any other instruction * * The second solution (USE_FTRACE_NOP) - no jump just nops * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 */ int ret = 0; if (recorded == 0) { recorded = 1; imm = *(unsigned int *)rec->ip; pr_debug("%s: imm:0x%x\n", __func__, imm); #ifdef USE_FTRACE_NOP bralid = *(unsigned int *)(rec->ip + 4); pr_debug("%s: bralid 0x%x\n", __func__, bralid); #endif /* USE_FTRACE_NOP */ } #ifdef USE_FTRACE_NOP ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); #else /* USE_FTRACE_NOP */ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); #endif /* USE_FTRACE_NOP */ return ret; } /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); #ifdef USE_FTRACE_NOP pr_debug("%s: bralid:0x%x\n", __func__, bralid); ret += ftrace_modify_code(rec->ip + 4, bralid); #endif /* USE_FTRACE_NOP */ return ret; } int __init ftrace_dyn_arch_init(void *data) { /* The return code is retured via data */ *(unsigned long *)data = 0; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned int upper = (unsigned int)func; unsigned int lower = (unsigned int)func; int ret = 0; /* create proper saving to ftrace_call poll */ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", __func__, (unsigned int)func, (unsigned int)ip, upper, lower); /* save upper and lower code */ ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); /* We just need to replace the rtsd r15, 8 with NOP */ ret += ftrace_modify_code((unsigned long)&ftrace_caller, MICROBLAZE_NOP); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned int old_jump; /* saving place for jump instruction */ int ftrace_enable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); old_jump = *(unsigned int *)ip; /* save jump over instruction */ ret = ftrace_modify_code(ip, MICROBLAZE_NOP); pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); return ret; } int ftrace_disable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); ret = ftrace_modify_code(ip, old_jump); pr_debug("%s\n", __func__); return ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */
qkdxorjs1002/nov_kernel_razr
arch/microblaze/kernel/ftrace.c
C
gpl-2.0
6,208
/* * Ftrace support for Microblaze. * * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2009 PetaLogix * * Based on MIPS and PowerPC ftrace code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <asm/cacheflush.h> #include <linux/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; int faulted, err; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; /* * Protect against fault, even if it shouldn't * happen. This tool is too much intrusive to * ignore such a protection. */ asm volatile(" 1: lwi %0, %2, 0; \ 2: swi %3, %2, 0; \ addik %1, r0, 0; \ 3: \ .section .fixup, \"ax\"; \ 4: brid 3b; \ addik %1, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b; \ .word 2b,4b; \ .previous;" \ : "=&r" (old), "=r" (faulted) : "r" (parent), "r" (return_hooker) ); flush_dcache_range((u32)parent, (u32)parent + 4); flush_icache_range((u32)parent, (u32)parent + 4); if (unlikely(faulted)) { ftrace_graph_stop(); WARN_ON(1); return; } err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); if (err == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE /* save value to addr - it is save to do it in asm */ static int ftrace_modify_code(unsigned long addr, unsigned int value) { int faulted = 0; __asm__ __volatile__(" 1: swi %2, %1, 0; \ addik %0, r0, 0; \ 2: \ .section .fixup, \"ax\"; \ 3: brid 2b; \ addik %0, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,3b; \ .previous;" \ : "=r" (faulted) : "r" (addr), "r" (value) ); if (unlikely(faulted)) return -EFAULT; flush_dcache_range(addr, addr + 4); flush_icache_range(addr, addr + 4); return 0; } #define MICROBLAZE_NOP 0x80000000 #define MICROBLAZE_BRI 0xb800000C static unsigned int recorded; /* if save was or not */ static unsigned int imm; /* saving whole imm instruction */ /* There are two approaches howto solve ftrace_make nop function - look below */ #undef USE_FTRACE_NOP #ifdef USE_FTRACE_NOP static unsigned int bralid; /* saving whole bralid instruction */ #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* we have this part of code which we are working with * b000c000 imm -16384 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * * The first solution (!USE_FTRACE_NOP-could be called branch solution) * b000c000 bri 12 (0xC - jump to any other instruction) * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * any other instruction * * The second solution (USE_FTRACE_NOP) - no jump just nops * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 */ int ret = 0; if (recorded == 0) { recorded = 1; imm = *(unsigned int *)rec->ip; pr_debug("%s: imm:0x%x\n", __func__, imm); #ifdef USE_FTRACE_NOP bralid = *(unsigned int *)(rec->ip + 4); pr_debug("%s: bralid 0x%x\n", __func__, bralid); #endif /* USE_FTRACE_NOP */ } #ifdef USE_FTRACE_NOP ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); #else /* USE_FTRACE_NOP */ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); #endif /* USE_FTRACE_NOP */ return ret; } /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); #ifdef USE_FTRACE_NOP pr_debug("%s: bralid:0x%x\n", __func__, bralid); ret += ftrace_modify_code(rec->ip + 4, bralid); #endif /* USE_FTRACE_NOP */ return ret; } int __init ftrace_dyn_arch_init(void *data) { /* The return code is retured via data */ *(unsigned long *)data = 0; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned int upper = (unsigned int)func; unsigned int lower = (unsigned int)func; int ret = 0; /* create proper saving to ftrace_call poll */ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", __func__, (unsigned int)func, (unsigned int)ip, upper, lower); /* save upper and lower code */ ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); /* We just need to replace the rtsd r15, 8 with NOP */ ret += ftrace_modify_code((unsigned long)&ftrace_caller, MICROBLAZE_NOP); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned int old_jump; /* saving place for jump instruction */ int ftrace_enable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); old_jump = *(unsigned int *)ip; /* save jump over instruction */ ret = ftrace_modify_code(ip, MICROBLAZE_NOP); pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); return ret; } int ftrace_disable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); ret = ftrace_modify_code(ip, old_jump); pr_debug("%s\n", __func__); return ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */
bilalliberty/depricated-kernel-villec2--3.4-
arch/microblaze/kernel/ftrace.c
C
gpl-2.0
6,208
<?php /** * SimplePie * * A PHP-Based RSS and Atom Feed Framework. * Takes the hard work out of managing a complete RSS/Atom solution. * * Copyright (c) 2004-2012, Ryan Parman, Geoffrey Sneddon, Ryan McCue, and contributors * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, this list * of conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * * Neither the name of the SimplePie Team nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS * AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * @package SimplePie * @version 1.3.1 * @copyright 2004-2012 Ryan Parman, Geoffrey Sneddon, Ryan McCue * @author Ryan Parman * @author Geoffrey Sneddon * @author Ryan McCue * @link http://simplepie.org/ SimplePie * @license http://www.opensource.org/licenses/bsd-license.php BSD License */ /** * Manages all author-related data * * Used by {@see SimplePie_Item::get_author()} and {@see SimplePie::get_authors()} * * This class can be overloaded with {@see SimplePie::set_author_class()} * * @package SimplePie * @subpackage API */ class SimplePie_Author { /** * Author's name * * @var string * @see get_name() */ var $name; /** * Author's link * * @var string * @see get_link() */ var $link; /** * Author's email address * * @var string * @see get_email() */ var $email; /** * Constructor, used to input the data * * @param string $name * @param string $link * @param string $email */ public function __construct($name = null, $link = null, $email = null) { $this->name = $name; $this->link = $link; $this->email = $email; } /** * String-ified version * * @return string */ public function __toString() { // There is no $this->data here return md5(serialize($this)); } /** * Author's name * * @return string|null */ public function get_name() { if ($this->name !== null) { return $this->name; } else { return null; } } /** * Author's link * * @return string|null */ public function get_link() { if ($this->link !== null) { return $this->link; } else { return null; } } /** * Author's email address * * @return string|null */ public function get_email() { if ($this->email !== null) { return $this->email; } else { return null; } } }
cuongnd/banhangonline88_joomla
libraries/vendor/simplepie/simplepie/library/SimplePie/Author.php
PHP
gpl-2.0
3,592
/* * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ #include <linux/list_sort.h> #include <linux/libnvdimm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/ndctl.h> #include <linux/sysfs.h> #include <linux/delay.h> #include <linux/list.h> #include <linux/acpi.h> #include <linux/sort.h> #include <linux/pmem.h> #include <linux/io.h> #include <linux/nd.h> #include <asm/cacheflush.h> #include "nfit.h" /* * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is * irrelevant. */ #include <linux/io-64-nonatomic-hi-lo.h> static bool force_enable_dimms; module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status"); static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT; module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds"); /* after three payloads of overflow, it's dead jim */ static unsigned int scrub_overflow_abort = 3; module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(scrub_overflow_abort, "Number of times we overflow ARS results before abort"); static bool disable_vendor_specific; module_param(disable_vendor_specific, bool, S_IRUGO); MODULE_PARM_DESC(disable_vendor_specific, "Limit commands to the publicly specified set\n"); LIST_HEAD(acpi_descs); DEFINE_MUTEX(acpi_desc_lock); static struct workqueue_struct *nfit_wq; struct nfit_table_prev { struct list_head spas; struct list_head memdevs; struct list_head dcrs; struct list_head bdws; struct list_head idts; struct list_head flushes; }; static u8 nfit_uuid[NFIT_UUID_MAX][16]; const u8 *to_nfit_uuid(enum nfit_uuids id) { return nfit_uuid[id]; } EXPORT_SYMBOL(to_nfit_uuid); static struct acpi_nfit_desc *to_acpi_nfit_desc( struct nvdimm_bus_descriptor *nd_desc) { return container_of(nd_desc, struct acpi_nfit_desc, nd_desc); } static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; /* * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct * acpi_device. */ if (!nd_desc->provider_name || strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0) return NULL; return to_acpi_device(acpi_desc->dev); } static int xlat_status(void *buf, unsigned int cmd) { struct nd_cmd_clear_error *clear_err; struct nd_cmd_ars_status *ars_status; struct nd_cmd_ars_start *ars_start; struct nd_cmd_ars_cap *ars_cap; u16 flags; switch (cmd) { case ND_CMD_ARS_CAP: ars_cap = buf; if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE) return -ENOTTY; /* Command failed */ if (ars_cap->status & 0xffff) return -EIO; /* No supported scan types for this range */ flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; if ((ars_cap->status >> 16 & flags) == 0) return -ENOTTY; break; case ND_CMD_ARS_START: ars_start = buf; /* ARS is in progress */ if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) return -EBUSY; /* Command failed */ if (ars_start->status & 0xffff) return -EIO; break; case ND_CMD_ARS_STATUS: ars_status = buf; /* Command failed */ if (ars_status->status & 0xffff) return -EIO; /* Check extended status (Upper two bytes) */ if (ars_status->status == NFIT_ARS_STATUS_DONE) return 0; /* ARS is in progress */ if (ars_status->status == NFIT_ARS_STATUS_BUSY) return -EBUSY; /* No ARS performed for the current boot */ if (ars_status->status == NFIT_ARS_STATUS_NONE) return -EAGAIN; /* * ARS interrupted, either we overflowed or some other * agent wants the scan to stop. If we didn't overflow * then just continue with the returned results. */ if (ars_status->status == NFIT_ARS_STATUS_INTR) { if (ars_status->flags & NFIT_ARS_F_OVERFLOW) return -ENOSPC; return 0; } /* Unknown status */ if (ars_status->status >> 16) return -EIO; break; case ND_CMD_CLEAR_ERROR: clear_err = buf; if (clear_err->status & 0xffff) return -EIO; if (!clear_err->cleared) return -EIO; if (clear_err->length > clear_err->cleared) return clear_err->cleared; break; default: break; } return 0; } static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc) { struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); union acpi_object in_obj, in_buf, *out_obj; const struct nd_cmd_desc *desc = NULL; struct device *dev = acpi_desc->dev; struct nd_cmd_pkg *call_pkg = NULL; const char *cmd_name, *dimm_name; unsigned long cmd_mask, dsm_mask; acpi_handle handle; unsigned int func; const u8 *uuid; u32 offset; int rc, i; func = cmd; if (cmd == ND_CMD_CALL) { call_pkg = buf; func = call_pkg->nd_command; } if (nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_device *adev = nfit_mem->adev; if (!adev) return -ENOTTY; if (call_pkg && nfit_mem->family != call_pkg->nd_family) return -ENOTTY; dimm_name = nvdimm_name(nvdimm); cmd_name = nvdimm_cmd_name(cmd); cmd_mask = nvdimm_cmd_mask(nvdimm); dsm_mask = nfit_mem->dsm_mask; desc = nd_cmd_dimm_desc(cmd); uuid = to_nfit_uuid(nfit_mem->family); handle = adev->handle; } else { struct acpi_device *adev = to_acpi_dev(acpi_desc); cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; dsm_mask = cmd_mask; desc = nd_cmd_bus_desc(cmd); uuid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; dimm_name = "bus"; } if (!desc || (cmd && (desc->out_num + desc->in_num == 0))) return -ENOTTY; if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask)) return -ENOTTY; in_obj.type = ACPI_TYPE_PACKAGE; in_obj.package.count = 1; in_obj.package.elements = &in_buf; in_buf.type = ACPI_TYPE_BUFFER; in_buf.buffer.pointer = buf; in_buf.buffer.length = 0; /* libnvdimm has already validated the input envelope */ for (i = 0; i < desc->in_num; i++) in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc, i, buf); if (call_pkg) { /* skip over package wrapper */ in_buf.buffer.pointer = (void *) &call_pkg->nd_payload; in_buf.buffer.length = call_pkg->nd_size_in; } if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n", __func__, dimm_name, cmd, func, in_buf.buffer.length); print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, in_buf.buffer.pointer, min_t(u32, 256, in_buf.buffer.length), true); } out_obj = acpi_evaluate_dsm(handle, uuid, 1, func, &in_obj); if (!out_obj) { dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name, cmd_name); return -EINVAL; } if (call_pkg) { call_pkg->nd_fw_size = out_obj->buffer.length; memcpy(call_pkg->nd_payload + call_pkg->nd_size_in, out_obj->buffer.pointer, min(call_pkg->nd_fw_size, call_pkg->nd_size_out)); ACPI_FREE(out_obj); /* * Need to support FW function w/o known size in advance. * Caller can determine required size based upon nd_fw_size. * If we return an error (like elsewhere) then caller wouldn't * be able to rely upon data returned to make calculation. */ return 0; } if (out_obj->package.type != ACPI_TYPE_BUFFER) { dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n", __func__, dimm_name, cmd_name, out_obj->type); rc = -EINVAL; goto out; } if (IS_ENABLED(CONFIG_ACPI_NFIT_DEBUG)) { dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name, cmd_name, out_obj->buffer.length); print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4, out_obj->buffer.pointer, min_t(u32, 128, out_obj->buffer.length), true); } for (i = 0, offset = 0; i < desc->out_num; i++) { u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf, (u32 *) out_obj->buffer.pointer); if (offset + out_size > out_obj->buffer.length) { dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n", __func__, dimm_name, cmd_name, i); break; } if (in_buf.buffer.length + offset + out_size > buf_len) { dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n", __func__, dimm_name, cmd_name, i); rc = -ENXIO; goto out; } memcpy(buf + in_buf.buffer.length + offset, out_obj->buffer.pointer + offset, out_size); offset += out_size; } if (offset + in_buf.buffer.length < buf_len) { if (i >= 1) { /* * status valid, return the number of bytes left * unfilled in the output buffer */ rc = buf_len - offset - in_buf.buffer.length; if (cmd_rc) *cmd_rc = xlat_status(buf, cmd); } else { dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", __func__, dimm_name, cmd_name, buf_len, offset); rc = -ENXIO; } } else { rc = 0; if (cmd_rc) *cmd_rc = xlat_status(buf, cmd); } out: ACPI_FREE(out_obj); return rc; } static const char *spa_type_name(u16 type) { static const char *to_name[] = { [NFIT_SPA_VOLATILE] = "volatile", [NFIT_SPA_PM] = "pmem", [NFIT_SPA_DCR] = "dimm-control-region", [NFIT_SPA_BDW] = "block-data-window", [NFIT_SPA_VDISK] = "volatile-disk", [NFIT_SPA_VCD] = "volatile-cd", [NFIT_SPA_PDISK] = "persistent-disk", [NFIT_SPA_PCD] = "persistent-cd", }; if (type > NFIT_SPA_PCD) return "unknown"; return to_name[type]; } int nfit_spa_type(struct acpi_nfit_system_address *spa) { int i; for (i = 0; i < NFIT_UUID_MAX; i++) if (memcmp(to_nfit_uuid(i), spa->range_guid, 16) == 0) return i; return -1; } static bool add_spa(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_system_address *spa) { struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; if (spa->header.length != sizeof(*spa)) return false; list_for_each_entry(nfit_spa, &prev->spas, list) { if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) { list_move_tail(&nfit_spa->list, &acpi_desc->spas); return true; } } nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa), GFP_KERNEL); if (!nfit_spa) return false; INIT_LIST_HEAD(&nfit_spa->list); memcpy(nfit_spa->spa, spa, sizeof(*spa)); list_add_tail(&nfit_spa->list, &acpi_desc->spas); dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__, spa->range_index, spa_type_name(nfit_spa_type(spa))); return true; } static bool add_memdev(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_memory_map *memdev) { struct device *dev = acpi_desc->dev; struct nfit_memdev *nfit_memdev; if (memdev->header.length != sizeof(*memdev)) return false; list_for_each_entry(nfit_memdev, &prev->memdevs, list) if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) { list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs); return true; } nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev), GFP_KERNEL); if (!nfit_memdev) return false; INIT_LIST_HEAD(&nfit_memdev->list); memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev)); list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs); dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d\n", __func__, memdev->device_handle, memdev->range_index, memdev->region_index); return true; } /* * An implementation may provide a truncated control region if no block windows * are defined. */ static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr) { if (dcr->header.length < offsetof(struct acpi_nfit_control_region, window_size)) return 0; if (dcr->windows) return sizeof(*dcr); return offsetof(struct acpi_nfit_control_region, window_size); } static bool add_dcr(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_control_region *dcr) { struct device *dev = acpi_desc->dev; struct nfit_dcr *nfit_dcr; if (!sizeof_dcr(dcr)) return false; list_for_each_entry(nfit_dcr, &prev->dcrs, list) if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) { list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs); return true; } nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr), GFP_KERNEL); if (!nfit_dcr) return false; INIT_LIST_HEAD(&nfit_dcr->list); memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)); list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs); dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__, dcr->region_index, dcr->windows); return true; } static bool add_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_data_region *bdw) { struct device *dev = acpi_desc->dev; struct nfit_bdw *nfit_bdw; if (bdw->header.length != sizeof(*bdw)) return false; list_for_each_entry(nfit_bdw, &prev->bdws, list) if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) { list_move_tail(&nfit_bdw->list, &acpi_desc->bdws); return true; } nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw), GFP_KERNEL); if (!nfit_bdw) return false; INIT_LIST_HEAD(&nfit_bdw->list); memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw)); list_add_tail(&nfit_bdw->list, &acpi_desc->bdws); dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__, bdw->region_index, bdw->windows); return true; } static size_t sizeof_idt(struct acpi_nfit_interleave *idt) { if (idt->header.length < sizeof(*idt)) return 0; return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1); } static bool add_idt(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_interleave *idt) { struct device *dev = acpi_desc->dev; struct nfit_idt *nfit_idt; if (!sizeof_idt(idt)) return false; list_for_each_entry(nfit_idt, &prev->idts, list) { if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt)) continue; if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) { list_move_tail(&nfit_idt->list, &acpi_desc->idts); return true; } } nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt), GFP_KERNEL); if (!nfit_idt) return false; INIT_LIST_HEAD(&nfit_idt->list); memcpy(nfit_idt->idt, idt, sizeof_idt(idt)); list_add_tail(&nfit_idt->list, &acpi_desc->idts); dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__, idt->interleave_index, idt->line_count); return true; } static size_t sizeof_flush(struct acpi_nfit_flush_address *flush) { if (flush->header.length < sizeof(*flush)) return 0; return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1); } static bool add_flush(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, struct acpi_nfit_flush_address *flush) { struct device *dev = acpi_desc->dev; struct nfit_flush *nfit_flush; if (!sizeof_flush(flush)) return false; list_for_each_entry(nfit_flush, &prev->flushes, list) { if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush)) continue; if (memcmp(nfit_flush->flush, flush, sizeof_flush(flush)) == 0) { list_move_tail(&nfit_flush->list, &acpi_desc->flushes); return true; } } nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush) + sizeof_flush(flush), GFP_KERNEL); if (!nfit_flush) return false; INIT_LIST_HEAD(&nfit_flush->list); memcpy(nfit_flush->flush, flush, sizeof_flush(flush)); list_add_tail(&nfit_flush->list, &acpi_desc->flushes); dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__, flush->device_handle, flush->hint_count); return true; } static void *add_table(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev, void *table, const void *end) { struct device *dev = acpi_desc->dev; struct acpi_nfit_header *hdr; void *err = ERR_PTR(-ENOMEM); if (table >= end) return NULL; hdr = table; if (!hdr->length) { dev_warn(dev, "found a zero length table '%d' parsing nfit\n", hdr->type); return NULL; } switch (hdr->type) { case ACPI_NFIT_TYPE_SYSTEM_ADDRESS: if (!add_spa(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_MEMORY_MAP: if (!add_memdev(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_CONTROL_REGION: if (!add_dcr(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_DATA_REGION: if (!add_bdw(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_INTERLEAVE: if (!add_idt(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_FLUSH_ADDRESS: if (!add_flush(acpi_desc, prev, table)) return err; break; case ACPI_NFIT_TYPE_SMBIOS: dev_dbg(dev, "%s: smbios\n", __func__); break; default: dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type); break; } return table + hdr->length; } static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem) { u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle; u16 dcr = nfit_mem->dcr->region_index; struct nfit_spa *nfit_spa; list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { u16 range_index = nfit_spa->spa->range_index; int type = nfit_spa_type(nfit_spa->spa); struct nfit_memdev *nfit_memdev; if (type != NFIT_SPA_BDW) continue; list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { if (nfit_memdev->memdev->range_index != range_index) continue; if (nfit_memdev->memdev->device_handle != device_handle) continue; if (nfit_memdev->memdev->region_index != dcr) continue; nfit_mem->spa_bdw = nfit_spa->spa; return; } } dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n", nfit_mem->spa_dcr->range_index); nfit_mem->bdw = NULL; } static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) { u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; struct nfit_memdev *nfit_memdev; struct nfit_bdw *nfit_bdw; struct nfit_idt *nfit_idt; u16 idt_idx, range_index; list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { if (nfit_bdw->bdw->region_index != dcr) continue; nfit_mem->bdw = nfit_bdw->bdw; break; } if (!nfit_mem->bdw) return; nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); if (!nfit_mem->spa_bdw) return; range_index = nfit_mem->spa_bdw->range_index; list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { if (nfit_memdev->memdev->range_index != range_index || nfit_memdev->memdev->region_index != dcr) continue; nfit_mem->memdev_bdw = nfit_memdev->memdev; idt_idx = nfit_memdev->memdev->interleave_index; list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { if (nfit_idt->idt->interleave_index != idt_idx) continue; nfit_mem->idt_bdw = nfit_idt->idt; break; } break; } } static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, struct acpi_nfit_system_address *spa) { struct nfit_mem *nfit_mem, *found; struct nfit_memdev *nfit_memdev; int type = nfit_spa_type(spa); switch (type) { case NFIT_SPA_DCR: case NFIT_SPA_PM: break; default: return 0; } list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct nfit_flush *nfit_flush; struct nfit_dcr *nfit_dcr; u32 device_handle; u16 dcr; if (nfit_memdev->memdev->range_index != spa->range_index) continue; found = NULL; dcr = nfit_memdev->memdev->region_index; device_handle = nfit_memdev->memdev->device_handle; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) { found = nfit_mem; break; } if (found) nfit_mem = found; else { nfit_mem = devm_kzalloc(acpi_desc->dev, sizeof(*nfit_mem), GFP_KERNEL); if (!nfit_mem) return -ENOMEM; INIT_LIST_HEAD(&nfit_mem->list); nfit_mem->acpi_desc = acpi_desc; list_add(&nfit_mem->list, &acpi_desc->dimms); } list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { if (nfit_dcr->dcr->region_index != dcr) continue; /* * Record the control region for the dimm. For * the ACPI 6.1 case, where there are separate * control regions for the pmem vs blk * interfaces, be sure to record the extended * blk details. */ if (!nfit_mem->dcr) nfit_mem->dcr = nfit_dcr->dcr; else if (nfit_mem->dcr->windows == 0 && nfit_dcr->dcr->windows) nfit_mem->dcr = nfit_dcr->dcr; break; } list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) { struct acpi_nfit_flush_address *flush; u16 i; if (nfit_flush->flush->device_handle != device_handle) continue; nfit_mem->nfit_flush = nfit_flush; flush = nfit_flush->flush; nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev, flush->hint_count * sizeof(struct resource), GFP_KERNEL); if (!nfit_mem->flush_wpq) return -ENOMEM; for (i = 0; i < flush->hint_count; i++) { struct resource *res = &nfit_mem->flush_wpq[i]; res->start = flush->hint_address[i]; res->end = res->start + 8 - 1; } break; } if (dcr && !nfit_mem->dcr) { dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n", spa->range_index, dcr); return -ENODEV; } if (type == NFIT_SPA_DCR) { struct nfit_idt *nfit_idt; u16 idt_idx; /* multiple dimms may share a SPA when interleaved */ nfit_mem->spa_dcr = spa; nfit_mem->memdev_dcr = nfit_memdev->memdev; idt_idx = nfit_memdev->memdev->interleave_index; list_for_each_entry(nfit_idt, &acpi_desc->idts, list) { if (nfit_idt->idt->interleave_index != idt_idx) continue; nfit_mem->idt_dcr = nfit_idt->idt; break; } nfit_mem_init_bdw(acpi_desc, nfit_mem, spa); } else { /* * A single dimm may belong to multiple SPA-PM * ranges, record at least one in addition to * any SPA-DCR range. */ nfit_mem->memdev_pmem = nfit_memdev->memdev; } } return 0; } static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b) { struct nfit_mem *a = container_of(_a, typeof(*a), list); struct nfit_mem *b = container_of(_b, typeof(*b), list); u32 handleA, handleB; handleA = __to_nfit_memdev(a)->device_handle; handleB = __to_nfit_memdev(b)->device_handle; if (handleA < handleB) return -1; else if (handleA > handleB) return 1; return 0; } static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; /* * For each SPA-DCR or SPA-PMEM address range find its * corresponding MEMDEV(s). From each MEMDEV find the * corresponding DCR. Then, if we're operating on a SPA-DCR, * try to find a SPA-BDW and a corresponding BDW that references * the DCR. Throw it all into an nfit_mem object. Note, that * BDWs are optional. */ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { int rc; rc = nfit_mem_dcr_init(acpi_desc, nfit_spa->spa); if (rc) return rc; } list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp); return 0; } static ssize_t revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision); } static DEVICE_ATTR_RO(revision); /* * This shows the number of full Address Range Scrubs that have been * completed since driver load time. Userspace can wait on this using * select/poll etc. A '+' at the end indicates an ARS is in progress */ static ssize_t scrub_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm_bus_descriptor *nd_desc; ssize_t rc = -ENXIO; device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, (work_busy(&acpi_desc->work)) ? "+\n" : "\n"); } device_unlock(dev); return rc; } static ssize_t scrub_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct nvdimm_bus_descriptor *nd_desc; ssize_t rc; long val; rc = kstrtol(buf, 0, &val); if (rc) return rc; if (val != 1) return -EINVAL; device_lock(dev); nd_desc = dev_get_drvdata(dev); if (nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); rc = acpi_nfit_ars_rescan(acpi_desc); } device_unlock(dev); if (rc) return rc; return size; } static DEVICE_ATTR_RW(scrub); static bool ars_supported(struct nvdimm_bus *nvdimm_bus) { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START | 1 << ND_CMD_ARS_STATUS; return (nd_desc->cmd_mask & mask) == mask; } static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) return 0; return a->mode; } static struct attribute *acpi_nfit_attributes[] = { &dev_attr_revision.attr, &dev_attr_scrub.attr, NULL, }; static struct attribute_group acpi_nfit_attribute_group = { .name = "nfit", .attrs = acpi_nfit_attributes, .is_visible = nfit_visible, }; static const struct attribute_group *acpi_nfit_attribute_groups[] = { &nvdimm_bus_attribute_group, &acpi_nfit_attribute_group, NULL, }; static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); return __to_nfit_memdev(nfit_mem); } static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); return nfit_mem->dcr; } static ssize_t handle_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); return sprintf(buf, "%#x\n", memdev->device_handle); } static DEVICE_ATTR_RO(handle); static ssize_t phys_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev); return sprintf(buf, "%#x\n", memdev->physical_id); } static DEVICE_ATTR_RO(phys_id); static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id)); } static DEVICE_ATTR_RO(vendor); static ssize_t rev_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id)); } static DEVICE_ATTR_RO(rev_id); static ssize_t device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id)); } static DEVICE_ATTR_RO(device); static ssize_t subsystem_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id)); } static DEVICE_ATTR_RO(subsystem_vendor); static ssize_t subsystem_rev_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_revision_id)); } static DEVICE_ATTR_RO(subsystem_rev_id); static ssize_t subsystem_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id)); } static DEVICE_ATTR_RO(subsystem_device); static int num_nvdimm_formats(struct nvdimm *nvdimm) { struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); int formats = 0; if (nfit_mem->memdev_pmem) formats++; if (nfit_mem->memdev_bdw) formats++; return formats; } static ssize_t format_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code)); } static DEVICE_ATTR_RO(format); static ssize_t format1_show(struct device *dev, struct device_attribute *attr, char *buf) { u32 handle; ssize_t rc = -ENXIO; struct nfit_mem *nfit_mem; struct nfit_memdev *nfit_memdev; struct acpi_nfit_desc *acpi_desc; struct nvdimm *nvdimm = to_nvdimm(dev); struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); nfit_mem = nvdimm_provider_data(nvdimm); acpi_desc = nfit_mem->acpi_desc; handle = to_nfit_memdev(dev)->device_handle; /* assumes DIMMs have at most 2 published interface codes */ mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nfit_dcr *nfit_dcr; if (memdev->device_handle != handle) continue; list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) { if (nfit_dcr->dcr->region_index != memdev->region_index) continue; if (nfit_dcr->dcr->code == dcr->code) continue; rc = sprintf(buf, "0x%04x\n", le16_to_cpu(nfit_dcr->dcr->code)); break; } if (rc != ENXIO) break; } mutex_unlock(&acpi_desc->init_mutex); return rc; } static DEVICE_ATTR_RO(format1); static ssize_t formats_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm)); } static DEVICE_ATTR_RO(formats); static ssize_t serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number)); } static DEVICE_ATTR_RO(serial); static ssize_t family_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); if (nfit_mem->family < 0) return -ENXIO; return sprintf(buf, "%d\n", nfit_mem->family); } static DEVICE_ATTR_RO(family); static ssize_t dsm_mask_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nvdimm *nvdimm = to_nvdimm(dev); struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); if (nfit_mem->family < 0) return -ENXIO; return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask); } static DEVICE_ATTR_RO(dsm_mask); static ssize_t flags_show(struct device *dev, struct device_attribute *attr, char *buf) { u16 flags = to_nfit_memdev(dev)->flags; return sprintf(buf, "%s%s%s%s%s\n", flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "", flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "", flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "", flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "", flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : ""); } static DEVICE_ATTR_RO(flags); static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev); if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) return sprintf(buf, "%04x-%02x-%04x-%08x\n", be16_to_cpu(dcr->vendor_id), dcr->manufacturing_location, be16_to_cpu(dcr->manufacturing_date), be32_to_cpu(dcr->serial_number)); else return sprintf(buf, "%04x-%08x\n", be16_to_cpu(dcr->vendor_id), be32_to_cpu(dcr->serial_number)); } static DEVICE_ATTR_RO(id); static struct attribute *acpi_nfit_dimm_attributes[] = { &dev_attr_handle.attr, &dev_attr_phys_id.attr, &dev_attr_vendor.attr, &dev_attr_device.attr, &dev_attr_rev_id.attr, &dev_attr_subsystem_vendor.attr, &dev_attr_subsystem_device.attr, &dev_attr_subsystem_rev_id.attr, &dev_attr_format.attr, &dev_attr_formats.attr, &dev_attr_format1.attr, &dev_attr_serial.attr, &dev_attr_flags.attr, &dev_attr_id.attr, &dev_attr_family.attr, &dev_attr_dsm_mask.attr, NULL, }; static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct nvdimm *nvdimm = to_nvdimm(dev); if (!to_nfit_dcr(dev)) return 0; if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1) return 0; return a->mode; } static struct attribute_group acpi_nfit_dimm_attribute_group = { .name = "nfit", .attrs = acpi_nfit_dimm_attributes, .is_visible = acpi_nfit_dimm_attr_visible, }; static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = { &nvdimm_attribute_group, &nd_device_attribute_group, &acpi_nfit_dimm_attribute_group, NULL, }; static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc, u32 device_handle) { struct nfit_mem *nfit_mem; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle) return nfit_mem->nvdimm; return NULL; } static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle) { struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; unsigned long dsm_mask; const u8 *uuid; int i; /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; nfit_mem->family = NVDIMM_FAMILY_INTEL; adev = to_acpi_dev(acpi_desc); if (!adev) return 0; adev_dimm = acpi_find_child_device(adev, device_handle, false); nfit_mem->adev = adev_dimm; if (!adev_dimm) { dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n", device_handle); return force_enable_dimms ? 0 : -ENODEV; } /* * Until standardization materializes we need to consider 4 * different command sets. Note, that checking for function0 (bit0) * tells us if any commands are reachable through this uuid. */ for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++) if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) break; /* limit the supported commands to those that are publicly documented */ nfit_mem->family = i; if (nfit_mem->family == NVDIMM_FAMILY_INTEL) { dsm_mask = 0x3fe; if (disable_vendor_specific) dsm_mask &= ~(1 << ND_CMD_VENDOR); } else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) { dsm_mask = 0x1c3c76; } else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) { dsm_mask = 0x1fe; if (disable_vendor_specific) dsm_mask &= ~(1 << 8); } else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) { dsm_mask = 0xffffffff; } else { dev_dbg(dev, "unknown dimm command family\n"); nfit_mem->family = -1; /* DSMs are optional, continue loading the driver... */ return 0; } uuid = to_nfit_uuid(nfit_mem->family); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i)) set_bit(i, &nfit_mem->dsm_mask); return 0; } static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) { struct nfit_mem *nfit_mem; int dimm_count = 0; list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) { struct acpi_nfit_flush_address *flush; unsigned long flags = 0, cmd_mask; struct nvdimm *nvdimm; u32 device_handle; u16 mem_flags; int rc; device_handle = __to_nfit_memdev(nfit_mem)->device_handle; nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle); if (nvdimm) { dimm_count++; continue; } if (nfit_mem->bdw && nfit_mem->memdev_pmem) flags |= NDD_ALIASING; mem_flags = __to_nfit_memdev(nfit_mem)->flags; if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED) flags |= NDD_UNARMED; rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle); if (rc) continue; /* * TODO: provide translation for non-NVDIMM_FAMILY_INTEL * devices (i.e. from nd_cmd to acpi_dsm) to standardize the * userspace interface. */ cmd_mask = 1UL << ND_CMD_CALL; if (nfit_mem->family == NVDIMM_FAMILY_INTEL) cmd_mask |= nfit_mem->dsm_mask; flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush : NULL; nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem, acpi_nfit_dimm_attribute_groups, flags, cmd_mask, flush ? flush->hint_count : 0, nfit_mem->flush_wpq); if (!nvdimm) return -ENOMEM; nfit_mem->nvdimm = nvdimm; dimm_count++; if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0) continue; dev_info(acpi_desc->dev, "%s flags:%s%s%s%s\n", nvdimm_name(nvdimm), mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "", mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"", mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "", mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : ""); } return nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count); } static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; const u8 *uuid = to_nfit_uuid(NFIT_DEV_BUS); struct acpi_device *adev; int i; nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; adev = to_acpi_dev(acpi_desc); if (!adev) return; for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) if (acpi_check_dsm(adev->handle, uuid, 1, 1ULL << i)) set_bit(i, &nd_desc->cmd_mask); } static ssize_t range_index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct nd_region *nd_region = to_nd_region(dev); struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region); return sprintf(buf, "%d\n", nfit_spa->spa->range_index); } static DEVICE_ATTR_RO(range_index); static struct attribute *acpi_nfit_region_attributes[] = { &dev_attr_range_index.attr, NULL, }; static struct attribute_group acpi_nfit_region_attribute_group = { .name = "nfit", .attrs = acpi_nfit_region_attributes, }; static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { &nd_region_attribute_group, &nd_mapping_attribute_group, &nd_device_attribute_group, &nd_numa_attribute_group, &acpi_nfit_region_attribute_group, NULL, }; /* enough info to uniquely specify an interleave set */ struct nfit_set_info { struct nfit_set_info_map { u64 region_offset; u32 serial_number; u32 pad; } mapping[0]; }; static size_t sizeof_nfit_set_info(int num_mappings) { return sizeof(struct nfit_set_info) + num_mappings * sizeof(struct nfit_set_info_map); } static int cmp_map(const void *m0, const void *m1) { const struct nfit_set_info_map *map0 = m0; const struct nfit_set_info_map *map1 = m1; return memcmp(&map0->region_offset, &map1->region_offset, sizeof(u64)); } /* Retrieve the nth entry referencing this spa */ static struct acpi_nfit_memory_map *memdev_from_spa( struct acpi_nfit_desc *acpi_desc, u16 range_index, int n) { struct nfit_memdev *nfit_memdev; list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) if (nfit_memdev->memdev->range_index == range_index) if (n-- == 0) return nfit_memdev->memdev; return NULL; } static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, struct nd_region_desc *ndr_desc, struct acpi_nfit_system_address *spa) { int i, spa_type = nfit_spa_type(spa); struct device *dev = acpi_desc->dev; struct nd_interleave_set *nd_set; u16 nr = ndr_desc->num_mappings; struct nfit_set_info *info; if (spa_type == NFIT_SPA_PM || spa_type == NFIT_SPA_VOLATILE) /* pass */; else return 0; nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); if (!nd_set) return -ENOMEM; info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); if (!info) return -ENOMEM; for (i = 0; i < nr; i++) { struct nd_mapping *nd_mapping = &ndr_desc->nd_mapping[i]; struct nfit_set_info_map *map = &info->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, spa->range_index, i); if (!memdev || !nfit_mem->dcr) { dev_err(dev, "%s: failed to find DCR\n", __func__); return -ENODEV; } map->region_offset = memdev->region_offset; map->serial_number = nfit_mem->dcr->serial_number; } sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), cmp_map, NULL); nd_set->cookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); ndr_desc->nd_set = nd_set; devm_kfree(dev, info); return 0; } static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio) { struct acpi_nfit_interleave *idt = mmio->idt; u32 sub_line_offset, line_index, line_offset; u64 line_no, table_skip_count, table_offset; line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset); table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index); line_offset = idt->line_offset[line_index] * mmio->line_size; table_offset = table_skip_count * mmio->table_size; return mmio->base_offset + line_offset + table_offset + sub_line_offset; } static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; u64 offset = nfit_blk->stat_offset + mmio->size * bw; const u32 STATUS_MASK = 0x80000037; if (mmio->num_lines) offset = to_interleave_offset(offset, mmio); return readl(mmio->addr.base + offset) & STATUS_MASK; } static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw, resource_size_t dpa, unsigned int len, unsigned int write) { u64 cmd, offset; struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR]; enum { BCW_OFFSET_MASK = (1ULL << 48)-1, BCW_LEN_SHIFT = 48, BCW_LEN_MASK = (1ULL << 8) - 1, BCW_CMD_SHIFT = 56, }; cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK; len = len >> L1_CACHE_SHIFT; cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT; cmd |= ((u64) write) << BCW_CMD_SHIFT; offset = nfit_blk->cmd_offset + mmio->size * bw; if (mmio->num_lines) offset = to_interleave_offset(offset, mmio); writeq(cmd, mmio->addr.base + offset); nvdimm_flush(nfit_blk->nd_region); if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH) readq(mmio->addr.base + offset); } static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk, resource_size_t dpa, void *iobuf, size_t len, int rw, unsigned int lane) { struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; unsigned int copied = 0; u64 base_offset; int rc; base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES + lane * mmio->size; write_blk_ctl(nfit_blk, lane, dpa, len, rw); while (len) { unsigned int c; u64 offset; if (mmio->num_lines) { u32 line_offset; offset = to_interleave_offset(base_offset + copied, mmio); div_u64_rem(offset, mmio->line_size, &line_offset); c = min_t(size_t, len, mmio->line_size - line_offset); } else { offset = base_offset + nfit_blk->bdw_offset; c = len; } if (rw) memcpy_to_pmem(mmio->addr.aperture + offset, iobuf + copied, c); else { if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH) mmio_flush_range((void __force *) mmio->addr.aperture + offset, c); memcpy_from_pmem(iobuf + copied, mmio->addr.aperture + offset, c); } copied += c; len -= c; } if (rw) nvdimm_flush(nfit_blk->nd_region); rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0; return rc; } static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw) { struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr); struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW]; struct nd_region *nd_region = nfit_blk->nd_region; unsigned int lane, copied = 0; int rc = 0; lane = nd_region_acquire_lane(nd_region); while (len) { u64 c = min(len, mmio->size); rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied, iobuf + copied, c, rw, lane); if (rc) break; copied += c; len -= c; } nd_region_release_lane(nd_region, lane); return rc; } static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, struct acpi_nfit_interleave *idt, u16 interleave_ways) { if (idt) { mmio->num_lines = idt->line_count; mmio->line_size = idt->line_size; if (interleave_ways == 0) return -ENXIO; mmio->table_size = mmio->num_lines * interleave_ways * mmio->line_size; } return 0; } static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, struct nfit_blk *nfit_blk) { struct nd_cmd_dimm_flags flags; int rc; memset(&flags, 0, sizeof(flags)); rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags, sizeof(flags), NULL); if (rc >= 0 && flags.status == 0) nfit_blk->dimm_flags = flags.flags; else if (rc == -ENOTTY) { /* fall back to a conservative default */ nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH; rc = 0; } else rc = -ENXIO; return rc; } static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); struct nd_blk_region *ndbr = to_nd_blk_region(dev); struct nfit_blk_mmio *mmio; struct nfit_blk *nfit_blk; struct nfit_mem *nfit_mem; struct nvdimm *nvdimm; int rc; nvdimm = nd_blk_region_to_dimm(ndbr); nfit_mem = nvdimm_provider_data(nvdimm); if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) { dev_dbg(dev, "%s: missing%s%s%s\n", __func__, nfit_mem ? "" : " nfit_mem", (nfit_mem && nfit_mem->dcr) ? "" : " dcr", (nfit_mem && nfit_mem->bdw) ? "" : " bdw"); return -ENXIO; } nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL); if (!nfit_blk) return -ENOMEM; nd_blk_region_set_provider_data(ndbr, nfit_blk); nfit_blk->nd_region = to_nd_region(dev); /* map block aperture memory */ nfit_blk->bdw_offset = nfit_mem->bdw->offset; mmio = &nfit_blk->mmio[BDW]; mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address, nfit_mem->spa_bdw->length, ARCH_MEMREMAP_PMEM); if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map bdw\n", __func__, nvdimm_name(nvdimm)); return -ENOMEM; } mmio->size = nfit_mem->bdw->size; mmio->base_offset = nfit_mem->memdev_bdw->region_offset; mmio->idt = nfit_mem->idt_bdw; mmio->spa = nfit_mem->spa_bdw; rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw, nfit_mem->memdev_bdw->interleave_ways); if (rc) { dev_dbg(dev, "%s: %s failed to init bdw interleave\n", __func__, nvdimm_name(nvdimm)); return rc; } /* map block control memory */ nfit_blk->cmd_offset = nfit_mem->dcr->command_offset; nfit_blk->stat_offset = nfit_mem->dcr->status_offset; mmio = &nfit_blk->mmio[DCR]; mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address, nfit_mem->spa_dcr->length); if (!mmio->addr.base) { dev_dbg(dev, "%s: %s failed to map dcr\n", __func__, nvdimm_name(nvdimm)); return -ENOMEM; } mmio->size = nfit_mem->dcr->window_size; mmio->base_offset = nfit_mem->memdev_dcr->region_offset; mmio->idt = nfit_mem->idt_dcr; mmio->spa = nfit_mem->spa_dcr; rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr, nfit_mem->memdev_dcr->interleave_ways); if (rc) { dev_dbg(dev, "%s: %s failed to init dcr interleave\n", __func__, nvdimm_name(nvdimm)); return rc; } rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk); if (rc < 0) { dev_dbg(dev, "%s: %s failed get DIMM flags\n", __func__, nvdimm_name(nvdimm)); return rc; } if (nvdimm_has_flush(nfit_blk->nd_region) < 0) dev_warn(dev, "unable to guarantee persistence of writes\n"); if (mmio->line_size == 0) return 0; if ((u32) nfit_blk->cmd_offset % mmio->line_size + 8 > mmio->line_size) { dev_dbg(dev, "cmd_offset crosses interleave boundary\n"); return -ENXIO; } else if ((u32) nfit_blk->stat_offset % mmio->line_size + 8 > mmio->line_size) { dev_dbg(dev, "stat_offset crosses interleave boundary\n"); return -ENXIO; } return 0; } static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_nfit_system_address *spa = nfit_spa->spa; int cmd_rc, rc; cmd->address = spa->address; cmd->length = spa->length; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd, sizeof(*cmd), &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { int rc; int cmd_rc; struct nd_cmd_ars_start ars_start; struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; memset(&ars_start, 0, sizeof(ars_start)); ars_start.address = spa->address; ars_start.length = spa->length; if (nfit_spa_type(spa) == NFIT_SPA_PM) ars_start.type = ND_ARS_PERSISTENT; else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) ars_start.type = ND_ARS_VOLATILE; else return -ENOTTY; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static int ars_continue(struct acpi_nfit_desc *acpi_desc) { int rc, cmd_rc; struct nd_cmd_ars_start ars_start; struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; memset(&ars_start, 0, sizeof(ars_start)); ars_start.address = ars_status->restart_address; ars_start.length = ars_status->restart_length; ars_start.type = ars_status->type; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start, sizeof(ars_start), &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static int ars_get_status(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status; int rc, cmd_rc; rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status, acpi_desc->ars_status_size, &cmd_rc); if (rc < 0) return rc; return cmd_rc; } static int ars_status_process_records(struct nvdimm_bus *nvdimm_bus, struct nd_cmd_ars_status *ars_status) { int rc; u32 i; for (i = 0; i < ars_status->num_records; i++) { rc = nvdimm_bus_add_poison(nvdimm_bus, ars_status->records[i].err_address, ars_status->records[i].length); if (rc) return rc; } return 0; } static void acpi_nfit_remove_resource(void *data) { struct resource *res = data; remove_resource(res); } static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc, struct nd_region_desc *ndr_desc) { struct resource *res, *nd_res = ndr_desc->res; int is_pmem, ret; /* No operation if the region is already registered as PMEM */ is_pmem = region_intersects(nd_res->start, resource_size(nd_res), IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY); if (is_pmem == REGION_INTERSECTS) return 0; res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; res->name = "Persistent Memory"; res->start = nd_res->start; res->end = nd_res->end; res->flags = IORESOURCE_MEM; res->desc = IORES_DESC_PERSISTENT_MEMORY; ret = insert_resource(&iomem_resource, res); if (ret) return ret; ret = devm_add_action_or_reset(acpi_desc->dev, acpi_nfit_remove_resource, res); if (ret) return ret; return 0; } static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc, struct nd_mapping *nd_mapping, struct nd_region_desc *ndr_desc, struct acpi_nfit_memory_map *memdev, struct nfit_spa *nfit_spa) { struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, memdev->device_handle); struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nd_blk_region_desc *ndbr_desc; struct nfit_mem *nfit_mem; int blk_valid = 0; if (!nvdimm) { dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n", spa->range_index, memdev->device_handle); return -ENODEV; } nd_mapping->nvdimm = nvdimm; switch (nfit_spa_type(spa)) { case NFIT_SPA_PM: case NFIT_SPA_VOLATILE: nd_mapping->start = memdev->address; nd_mapping->size = memdev->region_size; break; case NFIT_SPA_DCR: nfit_mem = nvdimm_provider_data(nvdimm); if (!nfit_mem || !nfit_mem->bdw) { dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n", spa->range_index, nvdimm_name(nvdimm)); } else { nd_mapping->size = nfit_mem->bdw->capacity; nd_mapping->start = nfit_mem->bdw->start_address; ndr_desc->num_lanes = nfit_mem->bdw->windows; blk_valid = 1; } ndr_desc->nd_mapping = nd_mapping; ndr_desc->num_mappings = blk_valid; ndbr_desc = to_blk_region_desc(ndr_desc); ndbr_desc->enable = acpi_nfit_blk_region_enable; ndbr_desc->do_io = acpi_desc->blk_do_io; nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) return -ENOMEM; break; } return 0; } static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa) { return (nfit_spa_type(spa) == NFIT_SPA_VDISK || nfit_spa_type(spa) == NFIT_SPA_VCD || nfit_spa_type(spa) == NFIT_SPA_PDISK || nfit_spa_type(spa) == NFIT_SPA_PCD); } static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { static struct nd_mapping nd_mappings[ND_MAX_MAPPINGS]; struct acpi_nfit_system_address *spa = nfit_spa->spa; struct nd_blk_region_desc ndbr_desc; struct nd_region_desc *ndr_desc; struct nfit_memdev *nfit_memdev; struct nvdimm_bus *nvdimm_bus; struct resource res; int count = 0, rc; if (nfit_spa->nd_region) return 0; if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) { dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n", __func__); return 0; } memset(&res, 0, sizeof(res)); memset(&nd_mappings, 0, sizeof(nd_mappings)); memset(&ndbr_desc, 0, sizeof(ndbr_desc)); res.start = spa->address; res.end = res.start + spa->length - 1; ndr_desc = &ndbr_desc.ndr_desc; ndr_desc->res = &res; ndr_desc->provider_data = nfit_spa; ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) ndr_desc->numa_node = acpi_map_pxm_to_online_node( spa->proximity_domain); else ndr_desc->numa_node = NUMA_NO_NODE; list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev; struct nd_mapping *nd_mapping; if (memdev->range_index != spa->range_index) continue; if (count >= ND_MAX_MAPPINGS) { dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n", spa->range_index, ND_MAX_MAPPINGS); return -ENXIO; } nd_mapping = &nd_mappings[count++]; rc = acpi_nfit_init_mapping(acpi_desc, nd_mapping, ndr_desc, memdev, nfit_spa); if (rc) goto out; } ndr_desc->nd_mapping = nd_mappings; ndr_desc->num_mappings = count; rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa); if (rc) goto out; nvdimm_bus = acpi_desc->nvdimm_bus; if (nfit_spa_type(spa) == NFIT_SPA_PM) { rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc); if (rc) { dev_warn(acpi_desc->dev, "failed to insert pmem resource to iomem: %d\n", rc); goto out; } nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; } else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE) { nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; } else if (nfit_spa_is_virtual(spa)) { nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus, ndr_desc); if (!nfit_spa->nd_region) rc = -ENOMEM; } out: if (rc) dev_err(acpi_desc->dev, "failed to register spa range %d\n", nfit_spa->spa->range_index); return rc; } static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc, u32 max_ars) { struct device *dev = acpi_desc->dev; struct nd_cmd_ars_status *ars_status; if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) { memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size); return 0; } if (acpi_desc->ars_status) devm_kfree(dev, acpi_desc->ars_status); acpi_desc->ars_status = NULL; ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL); if (!ars_status) return -ENOMEM; acpi_desc->ars_status = ars_status; acpi_desc->ars_status_size = max_ars; return 0; } static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { struct acpi_nfit_system_address *spa = nfit_spa->spa; int rc; if (!nfit_spa->max_ars) { struct nd_cmd_ars_cap ars_cap; memset(&ars_cap, 0, sizeof(ars_cap)); rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa); if (rc < 0) return rc; nfit_spa->max_ars = ars_cap.max_ars_out; nfit_spa->clear_err_unit = ars_cap.clear_err_unit; /* check that the supported scrub types match the spa type */ if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE && ((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0) return -ENOTTY; else if (nfit_spa_type(spa) == NFIT_SPA_PM && ((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0) return -ENOTTY; } if (ars_status_alloc(acpi_desc, nfit_spa->max_ars)) return -ENOMEM; rc = ars_get_status(acpi_desc); if (rc < 0 && rc != -ENOSPC) return rc; if (ars_status_process_records(acpi_desc->nvdimm_bus, acpi_desc->ars_status)) return -ENOMEM; return 0; } static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa) { struct acpi_nfit_system_address *spa = nfit_spa->spa; unsigned int overflow_retry = scrub_overflow_abort; u64 init_ars_start = 0, init_ars_len = 0; struct device *dev = acpi_desc->dev; unsigned int tmo = scrub_timeout; int rc; if (!nfit_spa->ars_required || !nfit_spa->nd_region) return; rc = ars_start(acpi_desc, nfit_spa); /* * If we timed out the initial scan we'll still be busy here, * and will wait another timeout before giving up permanently. */ if (rc < 0 && rc != -EBUSY) return; do { u64 ars_start, ars_len; if (acpi_desc->cancel) break; rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); if (rc == -ENOTTY) break; if (rc == -EBUSY && !tmo) { dev_warn(dev, "range %d ars timeout, aborting\n", spa->range_index); break; } if (rc == -EBUSY) { /* * Note, entries may be appended to the list * while the lock is dropped, but the workqueue * being active prevents entries being deleted / * freed. */ mutex_unlock(&acpi_desc->init_mutex); ssleep(1); tmo--; mutex_lock(&acpi_desc->init_mutex); continue; } /* we got some results, but there are more pending... */ if (rc == -ENOSPC && overflow_retry--) { if (!init_ars_len) { init_ars_len = acpi_desc->ars_status->length; init_ars_start = acpi_desc->ars_status->address; } rc = ars_continue(acpi_desc); } if (rc < 0) { dev_warn(dev, "range %d ars continuation failed\n", spa->range_index); break; } if (init_ars_len) { ars_start = init_ars_start; ars_len = init_ars_len; } else { ars_start = acpi_desc->ars_status->address; ars_len = acpi_desc->ars_status->length; } dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n", spa->range_index, ars_start, ars_len); /* notify the region about new poison entries */ nvdimm_region_notify(nfit_spa->nd_region, NVDIMM_REVALIDATE_POISON); break; } while (1); } static void acpi_nfit_scrub(struct work_struct *work) { struct device *dev; u64 init_scrub_length = 0; struct nfit_spa *nfit_spa; u64 init_scrub_address = 0; bool init_ars_done = false; struct acpi_nfit_desc *acpi_desc; unsigned int tmo = scrub_timeout; unsigned int overflow_retry = scrub_overflow_abort; acpi_desc = container_of(work, typeof(*acpi_desc), work); dev = acpi_desc->dev; /* * We scrub in 2 phases. The first phase waits for any platform * firmware initiated scrubs to complete and then we go search for the * affected spa regions to mark them scanned. In the second phase we * initiate a directed scrub for every range that was not scrubbed in * phase 1. If we're called for a 'rescan', we harmlessly pass through * the first phase, but really only care about running phase 2, where * regions can be notified of new poison. */ /* process platform firmware initiated scrubs */ retry: mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { struct nd_cmd_ars_status *ars_status; struct acpi_nfit_system_address *spa; u64 ars_start, ars_len; int rc; if (acpi_desc->cancel) break; if (nfit_spa->nd_region) continue; if (init_ars_done) { /* * No need to re-query, we're now just * reconciling all the ranges covered by the * initial scrub */ rc = 0; } else rc = acpi_nfit_query_poison(acpi_desc, nfit_spa); if (rc == -ENOTTY) { /* no ars capability, just register spa and move on */ acpi_nfit_register_region(acpi_desc, nfit_spa); continue; } if (rc == -EBUSY && !tmo) { /* fallthrough to directed scrub in phase 2 */ dev_warn(dev, "timeout awaiting ars results, continuing...\n"); break; } else if (rc == -EBUSY) { mutex_unlock(&acpi_desc->init_mutex); ssleep(1); tmo--; goto retry; } /* we got some results, but there are more pending... */ if (rc == -ENOSPC && overflow_retry--) { ars_status = acpi_desc->ars_status; /* * Record the original scrub range, so that we * can recall all the ranges impacted by the * initial scrub. */ if (!init_scrub_length) { init_scrub_length = ars_status->length; init_scrub_address = ars_status->address; } rc = ars_continue(acpi_desc); if (rc == 0) { mutex_unlock(&acpi_desc->init_mutex); goto retry; } } if (rc < 0) { /* * Initial scrub failed, we'll give it one more * try below... */ break; } /* We got some final results, record completed ranges */ ars_status = acpi_desc->ars_status; if (init_scrub_length) { ars_start = init_scrub_address; ars_len = ars_start + init_scrub_length; } else { ars_start = ars_status->address; ars_len = ars_status->length; } spa = nfit_spa->spa; if (!init_ars_done) { init_ars_done = true; dev_dbg(dev, "init scrub %#llx + %#llx complete\n", ars_start, ars_len); } if (ars_start <= spa->address && ars_start + ars_len >= spa->address + spa->length) acpi_nfit_register_region(acpi_desc, nfit_spa); } /* * For all the ranges not covered by an initial scrub we still * want to see if there are errors, but it's ok to discover them * asynchronously. */ list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { /* * Flag all the ranges that still need scrubbing, but * register them now to make data available. */ if (!nfit_spa->nd_region) { nfit_spa->ars_required = 1; acpi_nfit_register_region(acpi_desc, nfit_spa); } } list_for_each_entry(nfit_spa, &acpi_desc->spas, list) acpi_nfit_async_scrub(acpi_desc, nfit_spa); acpi_desc->scrub_count++; if (acpi_desc->scrub_count_state) sysfs_notify_dirent(acpi_desc->scrub_count_state); mutex_unlock(&acpi_desc->init_mutex); } static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; int rc; list_for_each_entry(nfit_spa, &acpi_desc->spas, list) if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) { /* BLK regions don't need to wait for ars results */ rc = acpi_nfit_register_region(acpi_desc, nfit_spa); if (rc) return rc; } queue_work(nfit_wq, &acpi_desc->work); return 0; } static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc, struct nfit_table_prev *prev) { struct device *dev = acpi_desc->dev; if (!list_empty(&prev->spas) || !list_empty(&prev->memdevs) || !list_empty(&prev->dcrs) || !list_empty(&prev->bdws) || !list_empty(&prev->idts) || !list_empty(&prev->flushes)) { dev_err(dev, "new nfit deletes entries (unsupported)\n"); return -ENXIO; } return 0; } static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc) { struct device *dev = acpi_desc->dev; struct kernfs_node *nfit; struct device *bus_dev; if (!ars_supported(acpi_desc->nvdimm_bus)) return 0; bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit"); if (!nfit) { dev_err(dev, "sysfs_get_dirent 'nfit' failed\n"); return -ENODEV; } acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub"); sysfs_put(nfit); if (!acpi_desc->scrub_count_state) { dev_err(dev, "sysfs_get_dirent 'scrub' failed\n"); return -ENODEV; } return 0; } static void acpi_nfit_destruct(void *data) { struct acpi_nfit_desc *acpi_desc = data; struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus); /* * Destruct under acpi_desc_lock so that nfit_handle_mce does not * race teardown */ mutex_lock(&acpi_desc_lock); acpi_desc->cancel = 1; /* * Bounce the nvdimm bus lock to make sure any in-flight * acpi_nfit_ars_rescan() submissions have had a chance to * either submit or see ->cancel set. */ device_lock(bus_dev); device_unlock(bus_dev); flush_workqueue(nfit_wq); if (acpi_desc->scrub_count_state) sysfs_put(acpi_desc->scrub_count_state); nvdimm_bus_unregister(acpi_desc->nvdimm_bus); acpi_desc->nvdimm_bus = NULL; list_del(&acpi_desc->list); mutex_unlock(&acpi_desc_lock); } int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz) { struct device *dev = acpi_desc->dev; struct nfit_table_prev prev; const void *end; int rc; if (!acpi_desc->nvdimm_bus) { acpi_nfit_init_dsms(acpi_desc); acpi_desc->nvdimm_bus = nvdimm_bus_register(dev, &acpi_desc->nd_desc); if (!acpi_desc->nvdimm_bus) return -ENOMEM; rc = devm_add_action_or_reset(dev, acpi_nfit_destruct, acpi_desc); if (rc) return rc; rc = acpi_nfit_desc_init_scrub_attr(acpi_desc); if (rc) return rc; /* register this acpi_desc for mce notifications */ mutex_lock(&acpi_desc_lock); list_add_tail(&acpi_desc->list, &acpi_descs); mutex_unlock(&acpi_desc_lock); } mutex_lock(&acpi_desc->init_mutex); INIT_LIST_HEAD(&prev.spas); INIT_LIST_HEAD(&prev.memdevs); INIT_LIST_HEAD(&prev.dcrs); INIT_LIST_HEAD(&prev.bdws); INIT_LIST_HEAD(&prev.idts); INIT_LIST_HEAD(&prev.flushes); list_cut_position(&prev.spas, &acpi_desc->spas, acpi_desc->spas.prev); list_cut_position(&prev.memdevs, &acpi_desc->memdevs, acpi_desc->memdevs.prev); list_cut_position(&prev.dcrs, &acpi_desc->dcrs, acpi_desc->dcrs.prev); list_cut_position(&prev.bdws, &acpi_desc->bdws, acpi_desc->bdws.prev); list_cut_position(&prev.idts, &acpi_desc->idts, acpi_desc->idts.prev); list_cut_position(&prev.flushes, &acpi_desc->flushes, acpi_desc->flushes.prev); end = data + sz; while (!IS_ERR_OR_NULL(data)) data = add_table(acpi_desc, &prev, data, end); if (IS_ERR(data)) { dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__, PTR_ERR(data)); rc = PTR_ERR(data); goto out_unlock; } rc = acpi_nfit_check_deletions(acpi_desc, &prev); if (rc) goto out_unlock; rc = nfit_mem_init(acpi_desc); if (rc) goto out_unlock; rc = acpi_nfit_register_dimms(acpi_desc); if (rc) goto out_unlock; rc = acpi_nfit_register_regions(acpi_desc); out_unlock: mutex_unlock(&acpi_desc->init_mutex); return rc; } EXPORT_SYMBOL_GPL(acpi_nfit_init); struct acpi_nfit_flush_work { struct work_struct work; struct completion cmp; }; static void flush_probe(struct work_struct *work) { struct acpi_nfit_flush_work *flush; flush = container_of(work, typeof(*flush), work); complete(&flush->cmp); } static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc) { struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); struct device *dev = acpi_desc->dev; struct acpi_nfit_flush_work flush; /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */ device_lock(dev); device_unlock(dev); /* * Scrub work could take 10s of seconds, userspace may give up so we * need to be interruptible while waiting. */ INIT_WORK_ONSTACK(&flush.work, flush_probe); COMPLETION_INITIALIZER_ONSTACK(flush.cmp); queue_work(nfit_wq, &flush.work); return wait_for_completion_interruptible(&flush.cmp); } static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd) { struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc); if (nvdimm) return 0; if (cmd != ND_CMD_ARS_START) return 0; /* * The kernel and userspace may race to initiate a scrub, but * the scrub thread is prepared to lose that initial race. It * just needs guarantees that any ars it initiates are not * interrupted by any intervening start reqeusts from userspace. */ if (work_busy(&acpi_desc->work)) return -EBUSY; return 0; } int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc) { struct device *dev = acpi_desc->dev; struct nfit_spa *nfit_spa; if (work_busy(&acpi_desc->work)) return -EBUSY; if (acpi_desc->cancel) return 0; mutex_lock(&acpi_desc->init_mutex); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { struct acpi_nfit_system_address *spa = nfit_spa->spa; if (nfit_spa_type(spa) != NFIT_SPA_PM) continue; nfit_spa->ars_required = 1; } queue_work(nfit_wq, &acpi_desc->work); dev_dbg(dev, "%s: ars_scan triggered\n", __func__); mutex_unlock(&acpi_desc->init_mutex); return 0; } void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev) { struct nvdimm_bus_descriptor *nd_desc; dev_set_drvdata(dev, acpi_desc); acpi_desc->dev = dev; acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io; nd_desc = &acpi_desc->nd_desc; nd_desc->provider_name = "ACPI.NFIT"; nd_desc->module = THIS_MODULE; nd_desc->ndctl = acpi_nfit_ctl; nd_desc->flush_probe = acpi_nfit_flush_probe; nd_desc->clear_to_send = acpi_nfit_clear_to_send; nd_desc->attr_groups = acpi_nfit_attribute_groups; INIT_LIST_HEAD(&acpi_desc->spas); INIT_LIST_HEAD(&acpi_desc->dcrs); INIT_LIST_HEAD(&acpi_desc->bdws); INIT_LIST_HEAD(&acpi_desc->idts); INIT_LIST_HEAD(&acpi_desc->flushes); INIT_LIST_HEAD(&acpi_desc->memdevs); INIT_LIST_HEAD(&acpi_desc->dimms); INIT_LIST_HEAD(&acpi_desc->list); mutex_init(&acpi_desc->init_mutex); INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); } EXPORT_SYMBOL_GPL(acpi_nfit_desc_init); static int acpi_nfit_add(struct acpi_device *adev) { struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_nfit_desc *acpi_desc; struct device *dev = &adev->dev; struct acpi_table_header *tbl; acpi_status status = AE_OK; acpi_size sz; int rc = 0; status = acpi_get_table_with_size(ACPI_SIG_NFIT, 0, &tbl, &sz); if (ACPI_FAILURE(status)) { /* This is ok, we could have an nvdimm hotplugged later */ dev_dbg(dev, "failed to find NFIT at startup\n"); return 0; } acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); if (!acpi_desc) return -ENOMEM; acpi_nfit_desc_init(acpi_desc, &adev->dev); /* Save the acpi header for exporting the revision via sysfs */ acpi_desc->acpi_header = *tbl; /* Evaluate _FIT and override with that if present */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_SUCCESS(status) && buf.length > 0) { union acpi_object *obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer, obj->buffer.length); else dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n", __func__, (int) obj->type); kfree(buf.pointer); } else /* skip over the lead-in header table */ rc = acpi_nfit_init(acpi_desc, (void *) tbl + sizeof(struct acpi_table_nfit), sz - sizeof(struct acpi_table_nfit)); return rc; } static int acpi_nfit_remove(struct acpi_device *adev) { /* see acpi_nfit_destruct */ return 0; } static void acpi_nfit_notify(struct acpi_device *adev, u32 event) { struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(&adev->dev); struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; struct device *dev = &adev->dev; union acpi_object *obj; acpi_status status; int ret; dev_dbg(dev, "%s: event: %d\n", __func__, event); device_lock(dev); if (!dev->driver) { /* dev->driver may be null if we're being removed */ dev_dbg(dev, "%s: no driver found for dev\n", __func__); goto out_unlock; } if (!acpi_desc) { acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL); if (!acpi_desc) goto out_unlock; acpi_nfit_desc_init(acpi_desc, &adev->dev); } else { /* * Finish previous registration before considering new * regions. */ flush_workqueue(nfit_wq); } /* Evaluate _FIT */ status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf); if (ACPI_FAILURE(status)) { dev_err(dev, "failed to evaluate _FIT\n"); goto out_unlock; } obj = buf.pointer; if (obj->type == ACPI_TYPE_BUFFER) { ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer, obj->buffer.length); if (ret) dev_err(dev, "failed to merge updated NFIT\n"); } else dev_err(dev, "Invalid _FIT\n"); kfree(buf.pointer); out_unlock: device_unlock(dev); } static const struct acpi_device_id acpi_nfit_ids[] = { { "ACPI0012", 0 }, { "", 0 }, }; MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids); static struct acpi_driver acpi_nfit_driver = { .name = KBUILD_MODNAME, .ids = acpi_nfit_ids, .ops = { .add = acpi_nfit_add, .remove = acpi_nfit_remove, .notify = acpi_nfit_notify, }, }; static __init int nfit_init(void) { BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20); BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9); BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80); BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40); acpi_str_to_uuid(UUID_VOLATILE_MEMORY, nfit_uuid[NFIT_SPA_VOLATILE]); acpi_str_to_uuid(UUID_PERSISTENT_MEMORY, nfit_uuid[NFIT_SPA_PM]); acpi_str_to_uuid(UUID_CONTROL_REGION, nfit_uuid[NFIT_SPA_DCR]); acpi_str_to_uuid(UUID_DATA_REGION, nfit_uuid[NFIT_SPA_BDW]); acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_VDISK]); acpi_str_to_uuid(UUID_VOLATILE_VIRTUAL_CD, nfit_uuid[NFIT_SPA_VCD]); acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_DISK, nfit_uuid[NFIT_SPA_PDISK]); acpi_str_to_uuid(UUID_PERSISTENT_VIRTUAL_CD, nfit_uuid[NFIT_SPA_PCD]); acpi_str_to_uuid(UUID_NFIT_BUS, nfit_uuid[NFIT_DEV_BUS]); acpi_str_to_uuid(UUID_NFIT_DIMM, nfit_uuid[NFIT_DEV_DIMM]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE1, nfit_uuid[NFIT_DEV_DIMM_N_HPE1]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_HPE2, nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); acpi_str_to_uuid(UUID_NFIT_DIMM_N_MSFT, nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) return -ENOMEM; nfit_mce_register(); return acpi_bus_register_driver(&acpi_nfit_driver); } static __exit void nfit_exit(void) { nfit_mce_unregister(); acpi_bus_unregister_driver(&acpi_nfit_driver); destroy_workqueue(nfit_wq); WARN_ON(!list_empty(&acpi_descs)); } module_init(nfit_init); module_exit(nfit_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Intel Corporation");
aeroevan/linux
drivers/acpi/nfit/core.c
C
gpl-2.0
75,568
/* * Copyright (c) 1996, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.rmi; import java.security.*; /** * {@code RMISecurityManager} implements a policy identical to the policy * implemented by {@link SecurityManager}. RMI applications * should use the {@code SecurityManager} class or another appropriate * {@code SecurityManager} implementation instead of this class. RMI's class * loader will download classes from remote locations only if a security * manager has been set. * * @implNote * <p>Applets typically run in a container that already has a security * manager, so there is generally no need for applets to set a security * manager. If you have a standalone application, you might need to set a * {@code SecurityManager} in order to enable class downloading. This can be * done by adding the following to your code. (It needs to be executed before * RMI can download code from remote hosts, so it most likely needs to appear * in the {@code main} method of your application.) * * <pre>{@code * if (System.getSecurityManager() == null) { * System.setSecurityManager(new SecurityManager()); * } * }</pre> * * @author Roger Riggs * @author Peter Jones * @since JDK1.1 * @deprecated Use {@link SecurityManager} instead. */ @Deprecated public class RMISecurityManager extends SecurityManager { /** * Constructs a new {@code RMISecurityManager}. * @since JDK1.1 */ public RMISecurityManager() { } }
netroby/jdk8u-dev
jdk/src/share/classes/java/rmi/RMISecurityManager.java
Java
gpl-2.0
2,632
/* * Copyright (C) 2007,2008 Freescale semiconductor, Inc. * * Author: Li Yang <LeoLi@freescale.com> * Jerry Huang <Chang-Ming.Huang@freescale.com> * * Initialization based on code from Shlomi Gridish. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/timer.h> #include <linux/usb.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/workqueue.h> #include <linux/time.h> #include <linux/fsl_devices.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include "phy-fsl-usb.h" #define DRIVER_VERSION "Rev. 1.55" #define DRIVER_AUTHOR "Jerry Huang/Li Yang" #define DRIVER_DESC "Freescale USB OTG Transceiver Driver" #define DRIVER_INFO DRIVER_DESC " " DRIVER_VERSION static const char driver_name[] = "fsl-usb2-otg"; const pm_message_t otg_suspend_state = { .event = 1, }; #define HA_DATA_PULSE static struct usb_dr_mmap *usb_dr_regs; static struct fsl_otg *fsl_otg_dev; static int srp_wait_done; /* FSM timers */ struct fsl_otg_timer *a_wait_vrise_tmr, *a_wait_bcon_tmr, *a_aidl_bdis_tmr, *b_ase0_brst_tmr, *b_se0_srp_tmr; /* Driver specific timers */ struct fsl_otg_timer *b_data_pulse_tmr, *b_vbus_pulse_tmr, *b_srp_fail_tmr, *b_srp_wait_tmr, *a_wait_enum_tmr; static struct list_head active_timers; static struct fsl_otg_config fsl_otg_initdata = { .otg_port = 1, }; #ifdef CONFIG_PPC32 static u32 _fsl_readl_be(const unsigned __iomem *p) { return in_be32(p); } static u32 _fsl_readl_le(const unsigned __iomem *p) { return in_le32(p); } static void _fsl_writel_be(u32 v, unsigned __iomem *p) { out_be32(p, v); } static void _fsl_writel_le(u32 v, unsigned __iomem *p) { out_le32(p, v); } static u32 (*_fsl_readl)(const unsigned __iomem *p); static void (*_fsl_writel)(u32 v, unsigned __iomem *p); #define fsl_readl(p) (*_fsl_readl)((p)) #define fsl_writel(v, p) (*_fsl_writel)((v), (p)) #else #define fsl_readl(addr) readl(addr) #define fsl_writel(val, addr) writel(val, addr) #endif /* CONFIG_PPC32 */ int write_ulpi(u8 addr, u8 data) { u32 temp; temp = 0x60000000 | (addr << 16) | data; fsl_writel(temp, &usb_dr_regs->ulpiview); return 0; } /* -------------------------------------------------------------*/ /* Operations that will be called from OTG Finite State Machine */ /* Charge vbus for vbus pulsing in SRP */ void fsl_otg_chrg_vbus(struct otg_fsm *fsm, int on) { u32 tmp; tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; if (on) /* stop discharging, start charging */ tmp = (tmp & ~OTGSC_CTRL_VBUS_DISCHARGE) | OTGSC_CTRL_VBUS_CHARGE; else /* stop charging */ tmp &= ~OTGSC_CTRL_VBUS_CHARGE; fsl_writel(tmp, &usb_dr_regs->otgsc); } /* Discharge vbus through a resistor to ground */ void fsl_otg_dischrg_vbus(int on) { u32 tmp; tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; if (on) /* stop charging, start discharging */ tmp = (tmp & ~OTGSC_CTRL_VBUS_CHARGE) | OTGSC_CTRL_VBUS_DISCHARGE; else /* stop discharging */ tmp &= ~OTGSC_CTRL_VBUS_DISCHARGE; fsl_writel(tmp, &usb_dr_regs->otgsc); } /* A-device driver vbus, controlled through PP bit in PORTSC */ void fsl_otg_drv_vbus(struct otg_fsm *fsm, int on) { u32 tmp; if (on) { tmp = fsl_readl(&usb_dr_regs->portsc) & ~PORTSC_W1C_BITS; fsl_writel(tmp | PORTSC_PORT_POWER, &usb_dr_regs->portsc); } else { tmp = fsl_readl(&usb_dr_regs->portsc) & ~PORTSC_W1C_BITS & ~PORTSC_PORT_POWER; fsl_writel(tmp, &usb_dr_regs->portsc); } } /* * Pull-up D+, signalling connect by periperal. Also used in * data-line pulsing in SRP */ void fsl_otg_loc_conn(struct otg_fsm *fsm, int on) { u32 tmp; tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; if (on) tmp |= OTGSC_CTRL_DATA_PULSING; else tmp &= ~OTGSC_CTRL_DATA_PULSING; fsl_writel(tmp, &usb_dr_regs->otgsc); } /* * Generate SOF by host. This is controlled through suspend/resume the * port. In host mode, controller will automatically send SOF. * Suspend will block the data on the port. */ void fsl_otg_loc_sof(struct otg_fsm *fsm, int on) { u32 tmp; tmp = fsl_readl(&fsl_otg_dev->dr_mem_map->portsc) & ~PORTSC_W1C_BITS; if (on) tmp |= PORTSC_PORT_FORCE_RESUME; else tmp |= PORTSC_PORT_SUSPEND; fsl_writel(tmp, &fsl_otg_dev->dr_mem_map->portsc); } /* Start SRP pulsing by data-line pulsing, followed with v-bus pulsing. */ void fsl_otg_start_pulse(struct otg_fsm *fsm) { u32 tmp; srp_wait_done = 0; #ifdef HA_DATA_PULSE tmp = fsl_readl(&usb_dr_regs->otgsc) & ~OTGSC_INTSTS_MASK; tmp |= OTGSC_HA_DATA_PULSE; fsl_writel(tmp, &usb_dr_regs->otgsc); #else fsl_otg_loc_conn(1); #endif fsl_otg_add_timer(fsm, b_data_pulse_tmr); } void b_data_pulse_end(unsigned long foo) { #ifdef HA_DATA_PULSE #else fsl_otg_loc_conn(0); #endif /* Do VBUS pulse after data pulse */ fsl_otg_pulse_vbus(); } void fsl_otg_pulse_vbus(void) { srp_wait_done = 0; fsl_otg_chrg_vbus(&fsl_otg_dev->fsm, 1); /* start the timer to end vbus charge */ fsl_otg_add_timer(&fsl_otg_dev->fsm, b_vbus_pulse_tmr); } void b_vbus_pulse_end(unsigned long foo) { fsl_otg_chrg_vbus(&fsl_otg_dev->fsm, 0); /* * As USB3300 using the same a_sess_vld and b_sess_vld voltage * we need to discharge the bus for a while to distinguish * residual voltage of vbus pulsing and A device pull up */ fsl_otg_dischrg_vbus(1); fsl_otg_add_timer(&fsl_otg_dev->fsm, b_srp_wait_tmr); } void b_srp_end(unsigned long foo) { fsl_otg_dischrg_vbus(0); srp_wait_done = 1; if ((fsl_otg_dev->phy.otg->state == OTG_STATE_B_SRP_INIT) && fsl_otg_dev->fsm.b_sess_vld) fsl_otg_dev->fsm.b_srp_done = 1; } /* * Workaround for a_host suspending too fast. When a_bus_req=0, * a_host will start by SRP. It needs to set b_hnp_enable before * actually suspending to start HNP */ void a_wait_enum(unsigned long foo) { VDBG("a_wait_enum timeout\n"); if (!fsl_otg_dev->phy.otg->host->b_hnp_enable) fsl_otg_add_timer(&fsl_otg_dev->fsm, a_wait_enum_tmr); else otg_statemachine(&fsl_otg_dev->fsm); } /* The timeout callback function to set time out bit */ void set_tmout(unsigned long indicator) { *(int *)indicator = 1; } /* Initialize timers */ int fsl_otg_init_timers(struct otg_fsm *fsm) { /* FSM used timers */ a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE, (unsigned long)&fsm->a_wait_vrise_tmout); if (!a_wait_vrise_tmr) return -ENOMEM; a_wait_bcon_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_BCON, (unsigned long)&fsm->a_wait_bcon_tmout); if (!a_wait_bcon_tmr) return -ENOMEM; a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS, (unsigned long)&fsm->a_aidl_bdis_tmout); if (!a_aidl_bdis_tmr) return -ENOMEM; b_ase0_brst_tmr = otg_timer_initializer(&set_tmout, TB_ASE0_BRST, (unsigned long)&fsm->b_ase0_brst_tmout); if (!b_ase0_brst_tmr) return -ENOMEM; b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP, (unsigned long)&fsm->b_se0_srp); if (!b_se0_srp_tmr) return -ENOMEM; b_srp_fail_tmr = otg_timer_initializer(&set_tmout, TB_SRP_FAIL, (unsigned long)&fsm->b_srp_done); if (!b_srp_fail_tmr) return -ENOMEM; a_wait_enum_tmr = otg_timer_initializer(&a_wait_enum, 10, (unsigned long)&fsm); if (!a_wait_enum_tmr) return -ENOMEM; /* device driver used timers */ b_srp_wait_tmr = otg_timer_initializer(&b_srp_end, TB_SRP_WAIT, 0); if (!b_srp_wait_tmr) return -ENOMEM; b_data_pulse_tmr = otg_timer_initializer(&b_data_pulse_end, TB_DATA_PLS, 0); if (!b_data_pulse_tmr) return -ENOMEM; b_vbus_pulse_tmr = otg_timer_initializer(&b_vbus_pulse_end, TB_VBUS_PLS, 0); if (!b_vbus_pulse_tmr) return -ENOMEM; return 0; } /* Uninitialize timers */ void fsl_otg_uninit_timers(void) { /* FSM used timers */ kfree(a_wait_vrise_tmr); kfree(a_wait_bcon_tmr); kfree(a_aidl_bdis_tmr); kfree(b_ase0_brst_tmr); kfree(b_se0_srp_tmr); kfree(b_srp_fail_tmr); kfree(a_wait_enum_tmr); /* device driver used timers */ kfree(b_srp_wait_tmr); kfree(b_data_pulse_tmr); kfree(b_vbus_pulse_tmr); } static struct fsl_otg_timer *fsl_otg_get_timer(enum otg_fsm_timer t) { struct fsl_otg_timer *timer; /* REVISIT: use array of pointers to timers instead */ switch (t) { case A_WAIT_VRISE: timer = a_wait_vrise_tmr; break; case A_WAIT_BCON: timer = a_wait_vrise_tmr; break; case A_AIDL_BDIS: timer = a_wait_vrise_tmr; break; case B_ASE0_BRST: timer = a_wait_vrise_tmr; break; case B_SE0_SRP: timer = a_wait_vrise_tmr; break; case B_SRP_FAIL: timer = a_wait_vrise_tmr; break; case A_WAIT_ENUM: timer = a_wait_vrise_tmr; break; default: timer = NULL; } return timer; } /* Add timer to timer list */ void fsl_otg_add_timer(struct otg_fsm *fsm, void *gtimer) { struct fsl_otg_timer *timer = gtimer; struct fsl_otg_timer *tmp_timer; /* * Check if the timer is already in the active list, * if so update timer count */ list_for_each_entry(tmp_timer, &active_timers, list) if (tmp_timer == timer) { timer->count = timer->expires; return; } timer->count = timer->expires; list_add_tail(&timer->list, &active_timers); } static void fsl_otg_fsm_add_timer(struct otg_fsm *fsm, enum otg_fsm_timer t) { struct fsl_otg_timer *timer; timer = fsl_otg_get_timer(t); if (!timer) return; fsl_otg_add_timer(fsm, timer); } /* Remove timer from the timer list; clear timeout status */ void fsl_otg_del_timer(struct otg_fsm *fsm, void *gtimer) { struct fsl_otg_timer *timer = gtimer; struct fsl_otg_timer *tmp_timer, *del_tmp; list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) if (tmp_timer == timer) list_del(&timer->list); } static void fsl_otg_fsm_del_timer(struct otg_fsm *fsm, enum otg_fsm_timer t) { struct fsl_otg_timer *timer; timer = fsl_otg_get_timer(t); if (!timer) return; fsl_otg_del_timer(fsm, timer); } /* Reset controller, not reset the bus */ void otg_reset_controller(void) { u32 command; command = fsl_readl(&usb_dr_regs->usbcmd); command |= (1 << 1); fsl_writel(command, &usb_dr_regs->usbcmd); while (fsl_readl(&usb_dr_regs->usbcmd) & (1 << 1)) ; } /* Call suspend/resume routines in host driver */ int fsl_otg_start_host(struct otg_fsm *fsm, int on) { struct usb_otg *otg = fsm->otg; struct device *dev; struct fsl_otg *otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy); u32 retval = 0; if (!otg->host) return -ENODEV; dev = otg->host->controller; /* * Update a_vbus_vld state as a_vbus_vld int is disabled * in device mode */ fsm->a_vbus_vld = !!(fsl_readl(&usb_dr_regs->otgsc) & OTGSC_STS_A_VBUS_VALID); if (on) { /* start fsl usb host controller */ if (otg_dev->host_working) goto end; else { otg_reset_controller(); VDBG("host on......\n"); if (dev->driver->pm && dev->driver->pm->resume) { retval = dev->driver->pm->resume(dev); if (fsm->id) { /* default-b */ fsl_otg_drv_vbus(fsm, 1); /* * Workaround: b_host can't driver * vbus, but PP in PORTSC needs to * be 1 for host to work. * So we set drv_vbus bit in * transceiver to 0 thru ULPI. */ write_ulpi(0x0c, 0x20); } } otg_dev->host_working = 1; } } else { /* stop fsl usb host controller */ if (!otg_dev->host_working) goto end; else { VDBG("host off......\n"); if (dev && dev->driver) { if (dev->driver->pm && dev->driver->pm->suspend) retval = dev->driver->pm->suspend(dev); if (fsm->id) /* default-b */ fsl_otg_drv_vbus(fsm, 0); } otg_dev->host_working = 0; } } end: return retval; } /* * Call suspend and resume function in udc driver * to stop and start udc driver. */ int fsl_otg_start_gadget(struct otg_fsm *fsm, int on) { struct usb_otg *otg = fsm->otg; struct device *dev; if (!otg->gadget || !otg->gadget->dev.parent) return -ENODEV; VDBG("gadget %s\n", on ? "on" : "off"); dev = otg->gadget->dev.parent; if (on) { if (dev->driver->resume) dev->driver->resume(dev); } else { if (dev->driver->suspend) dev->driver->suspend(dev, otg_suspend_state); } return 0; } /* * Called by initialization code of host driver. Register host controller * to the OTG. Suspend host for OTG role detection. */ static int fsl_otg_set_host(struct usb_otg *otg, struct usb_bus *host) { struct fsl_otg *otg_dev; if (!otg) return -ENODEV; otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy); if (otg_dev != fsl_otg_dev) return -ENODEV; otg->host = host; otg_dev->fsm.a_bus_drop = 0; otg_dev->fsm.a_bus_req = 1; if (host) { VDBG("host off......\n"); otg->host->otg_port = fsl_otg_initdata.otg_port; otg->host->is_b_host = otg_dev->fsm.id; /* * must leave time for hub_wq to finish its thing * before yanking the host driver out from under it, * so suspend the host after a short delay. */ otg_dev->host_working = 1; schedule_delayed_work(&otg_dev->otg_event, 100); return 0; } else { /* host driver going away */ if (!(fsl_readl(&otg_dev->dr_mem_map->otgsc) & OTGSC_STS_USB_ID)) { /* Mini-A cable connected */ struct otg_fsm *fsm = &otg_dev->fsm; otg->state = OTG_STATE_UNDEFINED; fsm->protocol = PROTO_UNDEF; } } otg_dev->host_working = 0; otg_statemachine(&otg_dev->fsm); return 0; } /* Called by initialization code of udc. Register udc to OTG. */ static int fsl_otg_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { struct fsl_otg *otg_dev; if (!otg) return -ENODEV; otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy); VDBG("otg_dev 0x%x\n", (int)otg_dev); VDBG("fsl_otg_dev 0x%x\n", (int)fsl_otg_dev); if (otg_dev != fsl_otg_dev) return -ENODEV; if (!gadget) { if (!otg->default_a) otg->gadget->ops->vbus_draw(otg->gadget, 0); usb_gadget_vbus_disconnect(otg->gadget); otg->gadget = 0; otg_dev->fsm.b_bus_req = 0; otg_statemachine(&otg_dev->fsm); return 0; } otg->gadget = gadget; otg->gadget->is_a_peripheral = !otg_dev->fsm.id; otg_dev->fsm.b_bus_req = 1; /* start the gadget right away if the ID pin says Mini-B */ pr_debug("ID pin=%d\n", otg_dev->fsm.id); if (otg_dev->fsm.id == 1) { fsl_otg_start_host(&otg_dev->fsm, 0); otg_drv_vbus(&otg_dev->fsm, 0); fsl_otg_start_gadget(&otg_dev->fsm, 1); } return 0; } /* * Delayed pin detect interrupt processing. * * When the Mini-A cable is disconnected from the board, * the pin-detect interrupt happens before the disconnect * interrupts for the connected device(s). In order to * process the disconnect interrupt(s) prior to switching * roles, the pin-detect interrupts are delayed, and handled * by this routine. */ static void fsl_otg_event(struct work_struct *work) { struct fsl_otg *og = container_of(work, struct fsl_otg, otg_event.work); struct otg_fsm *fsm = &og->fsm; if (fsm->id) { /* switch to gadget */ fsl_otg_start_host(fsm, 0); otg_drv_vbus(fsm, 0); fsl_otg_start_gadget(fsm, 1); } } /* B-device start SRP */ static int fsl_otg_start_srp(struct usb_otg *otg) { struct fsl_otg *otg_dev; if (!otg || otg->state != OTG_STATE_B_IDLE) return -ENODEV; otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy); if (otg_dev != fsl_otg_dev) return -ENODEV; otg_dev->fsm.b_bus_req = 1; otg_statemachine(&otg_dev->fsm); return 0; } /* A_host suspend will call this function to start hnp */ static int fsl_otg_start_hnp(struct usb_otg *otg) { struct fsl_otg *otg_dev; if (!otg) return -ENODEV; otg_dev = container_of(otg->usb_phy, struct fsl_otg, phy); if (otg_dev != fsl_otg_dev) return -ENODEV; pr_debug("start_hnp...\n"); /* clear a_bus_req to enter a_suspend state */ otg_dev->fsm.a_bus_req = 0; otg_statemachine(&otg_dev->fsm); return 0; } /* * Interrupt handler. OTG/host/peripheral share the same int line. * OTG driver clears OTGSC interrupts and leaves USB interrupts * intact. It needs to have knowledge of some USB interrupts * such as port change. */ irqreturn_t fsl_otg_isr(int irq, void *dev_id) { struct otg_fsm *fsm = &((struct fsl_otg *)dev_id)->fsm; struct usb_otg *otg = ((struct fsl_otg *)dev_id)->phy.otg; u32 otg_int_src, otg_sc; otg_sc = fsl_readl(&usb_dr_regs->otgsc); otg_int_src = otg_sc & OTGSC_INTSTS_MASK & (otg_sc >> 8); /* Only clear otg interrupts */ fsl_writel(otg_sc, &usb_dr_regs->otgsc); /*FIXME: ID change not generate when init to 0 */ fsm->id = (otg_sc & OTGSC_STS_USB_ID) ? 1 : 0; otg->default_a = (fsm->id == 0); /* process OTG interrupts */ if (otg_int_src) { if (otg_int_src & OTGSC_INTSTS_USB_ID) { fsm->id = (otg_sc & OTGSC_STS_USB_ID) ? 1 : 0; otg->default_a = (fsm->id == 0); /* clear conn information */ if (fsm->id) fsm->b_conn = 0; else fsm->a_conn = 0; if (otg->host) otg->host->is_b_host = fsm->id; if (otg->gadget) otg->gadget->is_a_peripheral = !fsm->id; VDBG("ID int (ID is %d)\n", fsm->id); if (fsm->id) { /* switch to gadget */ schedule_delayed_work( &((struct fsl_otg *)dev_id)->otg_event, 100); } else { /* switch to host */ cancel_delayed_work(& ((struct fsl_otg *)dev_id)-> otg_event); fsl_otg_start_gadget(fsm, 0); otg_drv_vbus(fsm, 1); fsl_otg_start_host(fsm, 1); } return IRQ_HANDLED; } } return IRQ_NONE; } static struct otg_fsm_ops fsl_otg_ops = { .chrg_vbus = fsl_otg_chrg_vbus, .drv_vbus = fsl_otg_drv_vbus, .loc_conn = fsl_otg_loc_conn, .loc_sof = fsl_otg_loc_sof, .start_pulse = fsl_otg_start_pulse, .add_timer = fsl_otg_fsm_add_timer, .del_timer = fsl_otg_fsm_del_timer, .start_host = fsl_otg_start_host, .start_gadget = fsl_otg_start_gadget, }; /* Initialize the global variable fsl_otg_dev and request IRQ for OTG */ static int fsl_otg_conf(struct platform_device *pdev) { struct fsl_otg *fsl_otg_tc; int status; if (fsl_otg_dev) return 0; /* allocate space to fsl otg device */ fsl_otg_tc = kzalloc(sizeof(struct fsl_otg), GFP_KERNEL); if (!fsl_otg_tc) return -ENOMEM; fsl_otg_tc->phy.otg = kzalloc(sizeof(struct usb_otg), GFP_KERNEL); if (!fsl_otg_tc->phy.otg) { kfree(fsl_otg_tc); return -ENOMEM; } INIT_DELAYED_WORK(&fsl_otg_tc->otg_event, fsl_otg_event); INIT_LIST_HEAD(&active_timers); status = fsl_otg_init_timers(&fsl_otg_tc->fsm); if (status) { pr_info("Couldn't init OTG timers\n"); goto err; } mutex_init(&fsl_otg_tc->fsm.lock); /* Set OTG state machine operations */ fsl_otg_tc->fsm.ops = &fsl_otg_ops; /* initialize the otg structure */ fsl_otg_tc->phy.label = DRIVER_DESC; fsl_otg_tc->phy.dev = &pdev->dev; fsl_otg_tc->phy.otg->usb_phy = &fsl_otg_tc->phy; fsl_otg_tc->phy.otg->set_host = fsl_otg_set_host; fsl_otg_tc->phy.otg->set_peripheral = fsl_otg_set_peripheral; fsl_otg_tc->phy.otg->start_hnp = fsl_otg_start_hnp; fsl_otg_tc->phy.otg->start_srp = fsl_otg_start_srp; fsl_otg_dev = fsl_otg_tc; /* Store the otg transceiver */ status = usb_add_phy(&fsl_otg_tc->phy, USB_PHY_TYPE_USB2); if (status) { pr_warn(FSL_OTG_NAME ": unable to register OTG transceiver.\n"); goto err; } return 0; err: fsl_otg_uninit_timers(); kfree(fsl_otg_tc->phy.otg); kfree(fsl_otg_tc); return status; } /* OTG Initialization */ int usb_otg_start(struct platform_device *pdev) { struct fsl_otg *p_otg; struct usb_phy *otg_trans = usb_get_phy(USB_PHY_TYPE_USB2); struct otg_fsm *fsm; int status; struct resource *res; u32 temp; struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); p_otg = container_of(otg_trans, struct fsl_otg, phy); fsm = &p_otg->fsm; /* Initialize the state machine structure with default values */ SET_OTG_STATE(otg_trans, OTG_STATE_UNDEFINED); fsm->otg = p_otg->phy.otg; /* We don't require predefined MEM/IRQ resource index */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENXIO; /* We don't request_mem_region here to enable resource sharing * with host/device */ usb_dr_regs = ioremap(res->start, sizeof(struct usb_dr_mmap)); p_otg->dr_mem_map = (struct usb_dr_mmap *)usb_dr_regs; pdata->regs = (void *)usb_dr_regs; if (pdata->init && pdata->init(pdev) != 0) return -EINVAL; if (pdata->big_endian_mmio) { _fsl_readl = _fsl_readl_be; _fsl_writel = _fsl_writel_be; } else { _fsl_readl = _fsl_readl_le; _fsl_writel = _fsl_writel_le; } /* request irq */ p_otg->irq = platform_get_irq(pdev, 0); status = request_irq(p_otg->irq, fsl_otg_isr, IRQF_SHARED, driver_name, p_otg); if (status) { dev_dbg(p_otg->phy.dev, "can't get IRQ %d, error %d\n", p_otg->irq, status); iounmap(p_otg->dr_mem_map); kfree(p_otg->phy.otg); kfree(p_otg); return status; } /* stop the controller */ temp = fsl_readl(&p_otg->dr_mem_map->usbcmd); temp &= ~USB_CMD_RUN_STOP; fsl_writel(temp, &p_otg->dr_mem_map->usbcmd); /* reset the controller */ temp = fsl_readl(&p_otg->dr_mem_map->usbcmd); temp |= USB_CMD_CTRL_RESET; fsl_writel(temp, &p_otg->dr_mem_map->usbcmd); /* wait reset completed */ while (fsl_readl(&p_otg->dr_mem_map->usbcmd) & USB_CMD_CTRL_RESET) ; /* configure the VBUSHS as IDLE(both host and device) */ temp = USB_MODE_STREAM_DISABLE | (pdata->es ? USB_MODE_ES : 0); fsl_writel(temp, &p_otg->dr_mem_map->usbmode); /* configure PHY interface */ temp = fsl_readl(&p_otg->dr_mem_map->portsc); temp &= ~(PORTSC_PHY_TYPE_SEL | PORTSC_PTW); switch (pdata->phy_mode) { case FSL_USB2_PHY_ULPI: temp |= PORTSC_PTS_ULPI; break; case FSL_USB2_PHY_UTMI_WIDE: temp |= PORTSC_PTW_16BIT; /* fall through */ case FSL_USB2_PHY_UTMI: temp |= PORTSC_PTS_UTMI; /* fall through */ default: break; } fsl_writel(temp, &p_otg->dr_mem_map->portsc); if (pdata->have_sysif_regs) { /* configure control enable IO output, big endian register */ temp = __raw_readl(&p_otg->dr_mem_map->control); temp |= USB_CTRL_IOENB; __raw_writel(temp, &p_otg->dr_mem_map->control); } /* disable all interrupt and clear all OTGSC status */ temp = fsl_readl(&p_otg->dr_mem_map->otgsc); temp &= ~OTGSC_INTERRUPT_ENABLE_BITS_MASK; temp |= OTGSC_INTERRUPT_STATUS_BITS_MASK | OTGSC_CTRL_VBUS_DISCHARGE; fsl_writel(temp, &p_otg->dr_mem_map->otgsc); /* * The identification (id) input is FALSE when a Mini-A plug is inserted * in the devices Mini-AB receptacle. Otherwise, this input is TRUE. * Also: record initial state of ID pin */ if (fsl_readl(&p_otg->dr_mem_map->otgsc) & OTGSC_STS_USB_ID) { p_otg->phy.otg->state = OTG_STATE_UNDEFINED; p_otg->fsm.id = 1; } else { p_otg->phy.otg->state = OTG_STATE_A_IDLE; p_otg->fsm.id = 0; } pr_debug("initial ID pin=%d\n", p_otg->fsm.id); /* enable OTG ID pin interrupt */ temp = fsl_readl(&p_otg->dr_mem_map->otgsc); temp |= OTGSC_INTR_USB_ID_EN; temp &= ~(OTGSC_CTRL_VBUS_DISCHARGE | OTGSC_INTR_1MS_TIMER_EN); fsl_writel(temp, &p_otg->dr_mem_map->otgsc); return 0; } /* * state file in sysfs */ static int show_fsl_usb2_otg_state(struct device *dev, struct device_attribute *attr, char *buf) { struct otg_fsm *fsm = &fsl_otg_dev->fsm; char *next = buf; unsigned size = PAGE_SIZE; int t; mutex_lock(&fsm->lock); /* basic driver infomation */ t = scnprintf(next, size, DRIVER_DESC "\n" "fsl_usb2_otg version: %s\n\n", DRIVER_VERSION); size -= t; next += t; /* Registers */ t = scnprintf(next, size, "OTGSC: 0x%08x\n" "PORTSC: 0x%08x\n" "USBMODE: 0x%08x\n" "USBCMD: 0x%08x\n" "USBSTS: 0x%08x\n" "USBINTR: 0x%08x\n", fsl_readl(&usb_dr_regs->otgsc), fsl_readl(&usb_dr_regs->portsc), fsl_readl(&usb_dr_regs->usbmode), fsl_readl(&usb_dr_regs->usbcmd), fsl_readl(&usb_dr_regs->usbsts), fsl_readl(&usb_dr_regs->usbintr)); size -= t; next += t; /* State */ t = scnprintf(next, size, "OTG state: %s\n\n", usb_otg_state_string(fsl_otg_dev->phy.otg->state)); size -= t; next += t; /* State Machine Variables */ t = scnprintf(next, size, "a_bus_req: %d\n" "b_bus_req: %d\n" "a_bus_resume: %d\n" "a_bus_suspend: %d\n" "a_conn: %d\n" "a_sess_vld: %d\n" "a_srp_det: %d\n" "a_vbus_vld: %d\n" "b_bus_resume: %d\n" "b_bus_suspend: %d\n" "b_conn: %d\n" "b_se0_srp: %d\n" "b_ssend_srp: %d\n" "b_sess_vld: %d\n" "id: %d\n", fsm->a_bus_req, fsm->b_bus_req, fsm->a_bus_resume, fsm->a_bus_suspend, fsm->a_conn, fsm->a_sess_vld, fsm->a_srp_det, fsm->a_vbus_vld, fsm->b_bus_resume, fsm->b_bus_suspend, fsm->b_conn, fsm->b_se0_srp, fsm->b_ssend_srp, fsm->b_sess_vld, fsm->id); size -= t; next += t; mutex_unlock(&fsm->lock); return PAGE_SIZE - size; } static DEVICE_ATTR(fsl_usb2_otg_state, S_IRUGO, show_fsl_usb2_otg_state, NULL); /* Char driver interface to control some OTG input */ /* * Handle some ioctl command, such as get otg * status and set host suspend */ static long fsl_otg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { u32 retval = 0; switch (cmd) { case GET_OTG_STATUS: retval = fsl_otg_dev->host_working; break; case SET_A_SUSPEND_REQ: fsl_otg_dev->fsm.a_suspend_req_inf = arg; break; case SET_A_BUS_DROP: fsl_otg_dev->fsm.a_bus_drop = arg; break; case SET_A_BUS_REQ: fsl_otg_dev->fsm.a_bus_req = arg; break; case SET_B_BUS_REQ: fsl_otg_dev->fsm.b_bus_req = arg; break; default: break; } otg_statemachine(&fsl_otg_dev->fsm); return retval; } static int fsl_otg_open(struct inode *inode, struct file *file) { return 0; } static int fsl_otg_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations otg_fops = { .owner = THIS_MODULE, .llseek = NULL, .read = NULL, .write = NULL, .unlocked_ioctl = fsl_otg_ioctl, .open = fsl_otg_open, .release = fsl_otg_release, }; static int fsl_otg_probe(struct platform_device *pdev) { int ret; if (!dev_get_platdata(&pdev->dev)) return -ENODEV; /* configure the OTG */ ret = fsl_otg_conf(pdev); if (ret) { dev_err(&pdev->dev, "Couldn't configure OTG module\n"); return ret; } /* start OTG */ ret = usb_otg_start(pdev); if (ret) { dev_err(&pdev->dev, "Can't init FSL OTG device\n"); return ret; } ret = register_chrdev(FSL_OTG_MAJOR, FSL_OTG_NAME, &otg_fops); if (ret) { dev_err(&pdev->dev, "unable to register FSL OTG device\n"); return ret; } ret = device_create_file(&pdev->dev, &dev_attr_fsl_usb2_otg_state); if (ret) dev_warn(&pdev->dev, "Can't register sysfs attribute\n"); return ret; } static int fsl_otg_remove(struct platform_device *pdev) { struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev); usb_remove_phy(&fsl_otg_dev->phy); free_irq(fsl_otg_dev->irq, fsl_otg_dev); iounmap((void *)usb_dr_regs); fsl_otg_uninit_timers(); kfree(fsl_otg_dev->phy.otg); kfree(fsl_otg_dev); device_remove_file(&pdev->dev, &dev_attr_fsl_usb2_otg_state); unregister_chrdev(FSL_OTG_MAJOR, FSL_OTG_NAME); if (pdata->exit) pdata->exit(pdev); return 0; } struct platform_driver fsl_otg_driver = { .probe = fsl_otg_probe, .remove = fsl_otg_remove, .driver = { .name = driver_name, .owner = THIS_MODULE, }, }; module_platform_driver(fsl_otg_driver); MODULE_DESCRIPTION(DRIVER_INFO); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_LICENSE("GPL");
RoadRunnr/net-next
drivers/usb/phy/phy-fsl-usb.c
C
gpl-2.0
28,119
<?php /** * W3 Cache class */ /** * W3 Cache engine types */ define('W3TC_CACHE_MEMCACHED', 'memcached'); define('W3TC_CACHE_APC', 'apc'); define('W3TC_CACHE_EACCELERATOR', 'eaccelerator'); define('W3TC_CACHE_XCACHE', 'xcache'); define('W3TC_CACHE_WINCACHE', 'wincache'); define('W3TC_CACHE_FILE', 'file'); define('W3TC_CACHE_FILE_GENERIC', 'file_generic'); /** * Class W3_Cache */ class W3_Cache { /** * Returns cache engine instance * * @param string $engine * @param array $config * @return W3_Cache_Base */ function &instance($engine, $config = array()) { static $instances = array(); $instance_key = sprintf('%s_%s', $engine, md5(serialize($config))); if (!isset($instances[$instance_key])) { switch ($engine) { case W3TC_CACHE_MEMCACHED: require_once W3TC_LIB_W3_DIR . '/Cache/Memcached.php'; @$instances[$instance_key] = & new W3_Cache_Memcached($config); break; case W3TC_CACHE_APC: require_once W3TC_LIB_W3_DIR . '/Cache/Apc.php'; @$instances[$instance_key] = & new W3_Cache_Apc(); break; case W3TC_CACHE_EACCELERATOR: require_once W3TC_LIB_W3_DIR . '/Cache/Eaccelerator.php'; @$instances[$instance_key] = & new W3_Cache_Eaccelerator(); break; case W3TC_CACHE_XCACHE: require_once W3TC_LIB_W3_DIR . '/Cache/Xcache.php'; @$instances[$instance_key] = & new W3_Cache_Xcache(); break; case W3TC_CACHE_WINCACHE: require_once W3TC_LIB_W3_DIR . '/Cache/Wincache.php'; @$instances[$instance_key] = & new W3_Cache_Wincache(); break; case W3TC_CACHE_FILE: require_once W3TC_LIB_W3_DIR . '/Cache/File.php'; @$instances[$instance_key] = & new W3_Cache_File($config); break; case W3TC_CACHE_FILE_GENERIC: require_once W3TC_LIB_W3_DIR . '/Cache/File/Generic.php'; @$instances[$instance_key] = & new W3_Cache_File_Generic($config); break; default: trigger_error('Incorrect cache engine', E_USER_WARNING); require_once W3TC_LIB_W3_DIR . '/Cache/Base.php'; @$instances[$instance_key] = & new W3_Cache_Base(); break; } } return $instances[$instance_key]; } }
kus/WordPress-Boilerplate
wp-content/plugins/w3-total-cache/lib/W3/Cache.php
PHP
gpl-2.0
2,703
from matplotlib.colors import LinearSegmentedColormap viridis_data = [[ 0.26700401, 0.00487433, 0.32941519], [ 0.26851048, 0.00960483, 0.33542652], [ 0.26994384, 0.01462494, 0.34137895], [ 0.27130489, 0.01994186, 0.34726862], [ 0.27259384, 0.02556309, 0.35309303], [ 0.27380934, 0.03149748, 0.35885256], [ 0.27495242, 0.03775181, 0.36454323], [ 0.27602238, 0.04416723, 0.37016418], [ 0.2770184 , 0.05034437, 0.37571452], [ 0.27794143, 0.05632444, 0.38119074], [ 0.27879067, 0.06214536, 0.38659204], [ 0.2795655 , 0.06783587, 0.39191723], [ 0.28026658, 0.07341724, 0.39716349], [ 0.28089358, 0.07890703, 0.40232944], [ 0.28144581, 0.0843197 , 0.40741404], [ 0.28192358, 0.08966622, 0.41241521], [ 0.28232739, 0.09495545, 0.41733086], [ 0.28265633, 0.10019576, 0.42216032], [ 0.28291049, 0.10539345, 0.42690202], [ 0.28309095, 0.11055307, 0.43155375], [ 0.28319704, 0.11567966, 0.43611482], [ 0.28322882, 0.12077701, 0.44058404], [ 0.28318684, 0.12584799, 0.44496 ], [ 0.283072 , 0.13089477, 0.44924127], [ 0.28288389, 0.13592005, 0.45342734], [ 0.28262297, 0.14092556, 0.45751726], [ 0.28229037, 0.14591233, 0.46150995], [ 0.28188676, 0.15088147, 0.46540474], [ 0.28141228, 0.15583425, 0.46920128], [ 0.28086773, 0.16077132, 0.47289909], [ 0.28025468, 0.16569272, 0.47649762], [ 0.27957399, 0.17059884, 0.47999675], [ 0.27882618, 0.1754902 , 0.48339654], [ 0.27801236, 0.18036684, 0.48669702], [ 0.27713437, 0.18522836, 0.48989831], [ 0.27619376, 0.19007447, 0.49300074], [ 0.27519116, 0.1949054 , 0.49600488], [ 0.27412802, 0.19972086, 0.49891131], [ 0.27300596, 0.20452049, 0.50172076], [ 0.27182812, 0.20930306, 0.50443413], [ 0.27059473, 0.21406899, 0.50705243], [ 0.26930756, 0.21881782, 0.50957678], [ 0.26796846, 0.22354911, 0.5120084 ], [ 0.26657984, 0.2282621 , 0.5143487 ], [ 0.2651445 , 0.23295593, 0.5165993 ], [ 0.2636632 , 0.23763078, 0.51876163], [ 0.26213801, 0.24228619, 0.52083736], [ 0.26057103, 0.2469217 , 0.52282822], [ 0.25896451, 0.25153685, 0.52473609], [ 0.25732244, 0.2561304 , 0.52656332], [ 0.25564519, 0.26070284, 0.52831152], [ 0.25393498, 0.26525384, 0.52998273], [ 0.25219404, 0.26978306, 0.53157905], [ 0.25042462, 0.27429024, 0.53310261], [ 0.24862899, 0.27877509, 0.53455561], [ 0.2468114 , 0.28323662, 0.53594093], [ 0.24497208, 0.28767547, 0.53726018], [ 0.24311324, 0.29209154, 0.53851561], [ 0.24123708, 0.29648471, 0.53970946], [ 0.23934575, 0.30085494, 0.54084398], [ 0.23744138, 0.30520222, 0.5419214 ], [ 0.23552606, 0.30952657, 0.54294396], [ 0.23360277, 0.31382773, 0.54391424], [ 0.2316735 , 0.3181058 , 0.54483444], [ 0.22973926, 0.32236127, 0.54570633], [ 0.22780192, 0.32659432, 0.546532 ], [ 0.2258633 , 0.33080515, 0.54731353], [ 0.22392515, 0.334994 , 0.54805291], [ 0.22198915, 0.33916114, 0.54875211], [ 0.22005691, 0.34330688, 0.54941304], [ 0.21812995, 0.34743154, 0.55003755], [ 0.21620971, 0.35153548, 0.55062743], [ 0.21429757, 0.35561907, 0.5511844 ], [ 0.21239477, 0.35968273, 0.55171011], [ 0.2105031 , 0.36372671, 0.55220646], [ 0.20862342, 0.36775151, 0.55267486], [ 0.20675628, 0.37175775, 0.55311653], [ 0.20490257, 0.37574589, 0.55353282], [ 0.20306309, 0.37971644, 0.55392505], [ 0.20123854, 0.38366989, 0.55429441], [ 0.1994295 , 0.38760678, 0.55464205], [ 0.1976365 , 0.39152762, 0.55496905], [ 0.19585993, 0.39543297, 0.55527637], [ 0.19410009, 0.39932336, 0.55556494], [ 0.19235719, 0.40319934, 0.55583559], [ 0.19063135, 0.40706148, 0.55608907], [ 0.18892259, 0.41091033, 0.55632606], [ 0.18723083, 0.41474645, 0.55654717], [ 0.18555593, 0.4185704 , 0.55675292], [ 0.18389763, 0.42238275, 0.55694377], [ 0.18225561, 0.42618405, 0.5571201 ], [ 0.18062949, 0.42997486, 0.55728221], [ 0.17901879, 0.43375572, 0.55743035], [ 0.17742298, 0.4375272 , 0.55756466], [ 0.17584148, 0.44128981, 0.55768526], [ 0.17427363, 0.4450441 , 0.55779216], [ 0.17271876, 0.4487906 , 0.55788532], [ 0.17117615, 0.4525298 , 0.55796464], [ 0.16964573, 0.45626209, 0.55803034], [ 0.16812641, 0.45998802, 0.55808199], [ 0.1666171 , 0.46370813, 0.55811913], [ 0.16511703, 0.4674229 , 0.55814141], [ 0.16362543, 0.47113278, 0.55814842], [ 0.16214155, 0.47483821, 0.55813967], [ 0.16066467, 0.47853961, 0.55811466], [ 0.15919413, 0.4822374 , 0.5580728 ], [ 0.15772933, 0.48593197, 0.55801347], [ 0.15626973, 0.4896237 , 0.557936 ], [ 0.15481488, 0.49331293, 0.55783967], [ 0.15336445, 0.49700003, 0.55772371], [ 0.1519182 , 0.50068529, 0.55758733], [ 0.15047605, 0.50436904, 0.55742968], [ 0.14903918, 0.50805136, 0.5572505 ], [ 0.14760731, 0.51173263, 0.55704861], [ 0.14618026, 0.51541316, 0.55682271], [ 0.14475863, 0.51909319, 0.55657181], [ 0.14334327, 0.52277292, 0.55629491], [ 0.14193527, 0.52645254, 0.55599097], [ 0.14053599, 0.53013219, 0.55565893], [ 0.13914708, 0.53381201, 0.55529773], [ 0.13777048, 0.53749213, 0.55490625], [ 0.1364085 , 0.54117264, 0.55448339], [ 0.13506561, 0.54485335, 0.55402906], [ 0.13374299, 0.54853458, 0.55354108], [ 0.13244401, 0.55221637, 0.55301828], [ 0.13117249, 0.55589872, 0.55245948], [ 0.1299327 , 0.55958162, 0.55186354], [ 0.12872938, 0.56326503, 0.55122927], [ 0.12756771, 0.56694891, 0.55055551], [ 0.12645338, 0.57063316, 0.5498411 ], [ 0.12539383, 0.57431754, 0.54908564], [ 0.12439474, 0.57800205, 0.5482874 ], [ 0.12346281, 0.58168661, 0.54744498], [ 0.12260562, 0.58537105, 0.54655722], [ 0.12183122, 0.58905521, 0.54562298], [ 0.12114807, 0.59273889, 0.54464114], [ 0.12056501, 0.59642187, 0.54361058], [ 0.12009154, 0.60010387, 0.54253043], [ 0.11973756, 0.60378459, 0.54139999], [ 0.11951163, 0.60746388, 0.54021751], [ 0.11942341, 0.61114146, 0.53898192], [ 0.11948255, 0.61481702, 0.53769219], [ 0.11969858, 0.61849025, 0.53634733], [ 0.12008079, 0.62216081, 0.53494633], [ 0.12063824, 0.62582833, 0.53348834], [ 0.12137972, 0.62949242, 0.53197275], [ 0.12231244, 0.63315277, 0.53039808], [ 0.12344358, 0.63680899, 0.52876343], [ 0.12477953, 0.64046069, 0.52706792], [ 0.12632581, 0.64410744, 0.52531069], [ 0.12808703, 0.64774881, 0.52349092], [ 0.13006688, 0.65138436, 0.52160791], [ 0.13226797, 0.65501363, 0.51966086], [ 0.13469183, 0.65863619, 0.5176488 ], [ 0.13733921, 0.66225157, 0.51557101], [ 0.14020991, 0.66585927, 0.5134268 ], [ 0.14330291, 0.66945881, 0.51121549], [ 0.1466164 , 0.67304968, 0.50893644], [ 0.15014782, 0.67663139, 0.5065889 ], [ 0.15389405, 0.68020343, 0.50417217], [ 0.15785146, 0.68376525, 0.50168574], [ 0.16201598, 0.68731632, 0.49912906], [ 0.1663832 , 0.69085611, 0.49650163], [ 0.1709484 , 0.69438405, 0.49380294], [ 0.17570671, 0.6978996 , 0.49103252], [ 0.18065314, 0.70140222, 0.48818938], [ 0.18578266, 0.70489133, 0.48527326], [ 0.19109018, 0.70836635, 0.48228395], [ 0.19657063, 0.71182668, 0.47922108], [ 0.20221902, 0.71527175, 0.47608431], [ 0.20803045, 0.71870095, 0.4728733 ], [ 0.21400015, 0.72211371, 0.46958774], [ 0.22012381, 0.72550945, 0.46622638], [ 0.2263969 , 0.72888753, 0.46278934], [ 0.23281498, 0.73224735, 0.45927675], [ 0.2393739 , 0.73558828, 0.45568838], [ 0.24606968, 0.73890972, 0.45202405], [ 0.25289851, 0.74221104, 0.44828355], [ 0.25985676, 0.74549162, 0.44446673], [ 0.26694127, 0.74875084, 0.44057284], [ 0.27414922, 0.75198807, 0.4366009 ], [ 0.28147681, 0.75520266, 0.43255207], [ 0.28892102, 0.75839399, 0.42842626], [ 0.29647899, 0.76156142, 0.42422341], [ 0.30414796, 0.76470433, 0.41994346], [ 0.31192534, 0.76782207, 0.41558638], [ 0.3198086 , 0.77091403, 0.41115215], [ 0.3277958 , 0.77397953, 0.40664011], [ 0.33588539, 0.7770179 , 0.40204917], [ 0.34407411, 0.78002855, 0.39738103], [ 0.35235985, 0.78301086, 0.39263579], [ 0.36074053, 0.78596419, 0.38781353], [ 0.3692142 , 0.78888793, 0.38291438], [ 0.37777892, 0.79178146, 0.3779385 ], [ 0.38643282, 0.79464415, 0.37288606], [ 0.39517408, 0.79747541, 0.36775726], [ 0.40400101, 0.80027461, 0.36255223], [ 0.4129135 , 0.80304099, 0.35726893], [ 0.42190813, 0.80577412, 0.35191009], [ 0.43098317, 0.80847343, 0.34647607], [ 0.44013691, 0.81113836, 0.3409673 ], [ 0.44936763, 0.81376835, 0.33538426], [ 0.45867362, 0.81636288, 0.32972749], [ 0.46805314, 0.81892143, 0.32399761], [ 0.47750446, 0.82144351, 0.31819529], [ 0.4870258 , 0.82392862, 0.31232133], [ 0.49661536, 0.82637633, 0.30637661], [ 0.5062713 , 0.82878621, 0.30036211], [ 0.51599182, 0.83115784, 0.29427888], [ 0.52577622, 0.83349064, 0.2881265 ], [ 0.5356211 , 0.83578452, 0.28190832], [ 0.5455244 , 0.83803918, 0.27562602], [ 0.55548397, 0.84025437, 0.26928147], [ 0.5654976 , 0.8424299 , 0.26287683], [ 0.57556297, 0.84456561, 0.25641457], [ 0.58567772, 0.84666139, 0.24989748], [ 0.59583934, 0.84871722, 0.24332878], [ 0.60604528, 0.8507331 , 0.23671214], [ 0.61629283, 0.85270912, 0.23005179], [ 0.62657923, 0.85464543, 0.22335258], [ 0.63690157, 0.85654226, 0.21662012], [ 0.64725685, 0.85839991, 0.20986086], [ 0.65764197, 0.86021878, 0.20308229], [ 0.66805369, 0.86199932, 0.19629307], [ 0.67848868, 0.86374211, 0.18950326], [ 0.68894351, 0.86544779, 0.18272455], [ 0.69941463, 0.86711711, 0.17597055], [ 0.70989842, 0.86875092, 0.16925712], [ 0.72039115, 0.87035015, 0.16260273], [ 0.73088902, 0.87191584, 0.15602894], [ 0.74138803, 0.87344918, 0.14956101], [ 0.75188414, 0.87495143, 0.14322828], [ 0.76237342, 0.87642392, 0.13706449], [ 0.77285183, 0.87786808, 0.13110864], [ 0.78331535, 0.87928545, 0.12540538], [ 0.79375994, 0.88067763, 0.12000532], [ 0.80418159, 0.88204632, 0.11496505], [ 0.81457634, 0.88339329, 0.11034678], [ 0.82494028, 0.88472036, 0.10621724], [ 0.83526959, 0.88602943, 0.1026459 ], [ 0.84556056, 0.88732243, 0.09970219], [ 0.8558096 , 0.88860134, 0.09745186], [ 0.86601325, 0.88986815, 0.09595277], [ 0.87616824, 0.89112487, 0.09525046], [ 0.88627146, 0.89237353, 0.09537439], [ 0.89632002, 0.89361614, 0.09633538], [ 0.90631121, 0.89485467, 0.09812496], [ 0.91624212, 0.89609127, 0.1007168 ], [ 0.92610579, 0.89732977, 0.10407067], [ 0.93590444, 0.8985704 , 0.10813094], [ 0.94563626, 0.899815 , 0.11283773], [ 0.95529972, 0.90106534, 0.11812832], [ 0.96489353, 0.90232311, 0.12394051], [ 0.97441665, 0.90358991, 0.13021494], [ 0.98386829, 0.90486726, 0.13689671], [ 0.99324789, 0.90615657, 0.1439362 ]] viridis = LinearSegmentedColormap.from_list('viridis', viridis_data)
tobias47n9e/innstereo
innstereo/viridis.py
Python
gpl-2.0
12,423
/* * Microblaze kernel loader * * Copyright (c) 2012 Peter Crosthwaite <peter.crosthwaite@petalogix.com> * Copyright (c) 2012 PetaLogix * Copyright (c) 2009 Edgar E. Iglesias. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu-common.h" #include "sysemu/device_tree.h" #include "sysemu/sysemu.h" #include "hw/loader.h" #include "elf.h" #include "boot.h" static struct { void (*machine_cpu_reset)(MicroBlazeCPU *); uint32_t bootstrap_pc; uint32_t cmdline; uint32_t initrd_start; uint32_t initrd_end; uint32_t fdt; } boot_info; static void main_cpu_reset(void *opaque) { MicroBlazeCPU *cpu = opaque; CPUMBState *env = &cpu->env; cpu_reset(CPU(cpu)); env->regs[5] = boot_info.cmdline; env->regs[6] = boot_info.initrd_start; env->regs[7] = boot_info.fdt; env->sregs[SR_PC] = boot_info.bootstrap_pc; if (boot_info.machine_cpu_reset) { boot_info.machine_cpu_reset(cpu); } } static int microblaze_load_dtb(hwaddr addr, uint32_t ramsize, uint32_t initrd_start, uint32_t initrd_end, const char *kernel_cmdline, const char *dtb_filename) { int fdt_size; void *fdt = NULL; int r; if (dtb_filename) { fdt = load_device_tree(dtb_filename, &fdt_size); } if (!fdt) { return 0; } if (kernel_cmdline) { r = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); if (r < 0) { fprintf(stderr, "couldn't set /chosen/bootargs\n"); } } if (initrd_start) { qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_start); qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", initrd_end); } cpu_physical_memory_write(addr, fdt, fdt_size); return fdt_size; } static uint64_t translate_kernel_address(void *opaque, uint64_t addr) { return addr - 0x30000000LL; } void microblaze_load_kernel(MicroBlazeCPU *cpu, hwaddr ddr_base, uint32_t ramsize, const char *initrd_filename, const char *dtb_filename, void (*machine_cpu_reset)(MicroBlazeCPU *)) { QemuOpts *machine_opts; const char *kernel_filename; const char *kernel_cmdline; const char *dtb_arg; machine_opts = qemu_get_machine_opts(); kernel_filename = qemu_opt_get(machine_opts, "kernel"); kernel_cmdline = qemu_opt_get(machine_opts, "append"); dtb_arg = qemu_opt_get(machine_opts, "dtb"); if (dtb_arg) { /* Preference a -dtb argument */ dtb_filename = dtb_arg; } else { /* default to pcbios dtb as passed by machine_init */ dtb_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, dtb_filename); } boot_info.machine_cpu_reset = machine_cpu_reset; qemu_register_reset(main_cpu_reset, cpu); if (kernel_filename) { int kernel_size; uint64_t entry, low, high; uint32_t base32; int big_endian = 0; #ifdef TARGET_WORDS_BIGENDIAN big_endian = 1; #endif /* Boots a kernel elf binary. */ kernel_size = load_elf(kernel_filename, NULL, NULL, &entry, &low, &high, big_endian, ELF_MACHINE, 0); base32 = entry; if (base32 == 0xc0000000) { kernel_size = load_elf(kernel_filename, translate_kernel_address, NULL, &entry, NULL, NULL, big_endian, ELF_MACHINE, 0); } /* Always boot into physical ram. */ boot_info.bootstrap_pc = ddr_base + (entry & 0x0fffffff); /* If it wasn't an ELF image, try an u-boot image. */ if (kernel_size < 0) { hwaddr uentry, loadaddr; kernel_size = load_uimage(kernel_filename, &uentry, &loadaddr, 0); boot_info.bootstrap_pc = uentry; high = (loadaddr + kernel_size + 3) & ~3; } /* Not an ELF image nor an u-boot image, try a RAW image. */ if (kernel_size < 0) { kernel_size = load_image_targphys(kernel_filename, ddr_base, ram_size); boot_info.bootstrap_pc = ddr_base; high = (ddr_base + kernel_size + 3) & ~3; } if (initrd_filename) { int initrd_size; uint32_t initrd_offset; high = ROUND_UP(high + kernel_size, 4); boot_info.initrd_start = high; initrd_offset = boot_info.initrd_start - ddr_base; initrd_size = load_image_targphys(initrd_filename, boot_info.initrd_start, ram_size - initrd_offset); if (initrd_size < 0) { error_report("qemu: could not load initrd '%s'\n", initrd_filename); exit(EXIT_FAILURE); } boot_info.initrd_end = boot_info.initrd_start + initrd_size; high = ROUND_UP(high + initrd_size, 4); } boot_info.cmdline = high + 4096; if (kernel_cmdline && strlen(kernel_cmdline)) { pstrcpy_targphys("cmdline", boot_info.cmdline, 256, kernel_cmdline); } /* Provide a device-tree. */ boot_info.fdt = boot_info.cmdline + 4096; microblaze_load_dtb(boot_info.fdt, ram_size, boot_info.initrd_start, boot_info.initrd_end, kernel_cmdline, dtb_filename); } }
cedric-vincent/qemu
hw/microblaze/boot.c
C
gpl-2.0
7,044
/* * Copyright (C) 2004 the ffmpeg project * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /** * @file * Standard C DSP-oriented functions cribbed from the original VP3 * source code. */ #include "libavutil/attributes.h" #include "libavutil/common.h" #include "avcodec.h" #include "dsputil.h" #include "rnd_avg.h" #include "vp3dsp.h" #define IdctAdjustBeforeShift 8 #define xC1S7 64277 #define xC2S6 60547 #define xC3S5 54491 #define xC4S4 46341 #define xC5S3 36410 #define xC6S2 25080 #define xC7S1 12785 #define M(a,b) (((a) * (b))>>16) static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int type) { int16_t *ip = input; int A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H; int Ed, Gd, Add, Bdd, Fd, Hd; int i; /* Inverse DCT on the rows now */ for (i = 0; i < 8; i++) { /* Check for non-zero values */ if ( ip[0 * 8] | ip[1 * 8] | ip[2 * 8] | ip[3 * 8] | ip[4 * 8] | ip[5 * 8] | ip[6 * 8] | ip[7 * 8] ) { A = M(xC1S7, ip[1 * 8]) + M(xC7S1, ip[7 * 8]); B = M(xC7S1, ip[1 * 8]) - M(xC1S7, ip[7 * 8]); C = M(xC3S5, ip[3 * 8]) + M(xC5S3, ip[5 * 8]); D = M(xC3S5, ip[5 * 8]) - M(xC5S3, ip[3 * 8]); Ad = M(xC4S4, (A - C)); Bd = M(xC4S4, (B - D)); Cd = A + C; Dd = B + D; E = M(xC4S4, (ip[0 * 8] + ip[4 * 8])); F = M(xC4S4, (ip[0 * 8] - ip[4 * 8])); G = M(xC2S6, ip[2 * 8]) + M(xC6S2, ip[6 * 8]); H = M(xC6S2, ip[2 * 8]) - M(xC2S6, ip[6 * 8]); Ed = E - G; Gd = E + G; Add = F + Ad; Bdd = Bd - H; Fd = F - Ad; Hd = Bd + H; /* Final sequence of operations over-write original inputs. */ ip[0 * 8] = Gd + Cd ; ip[7 * 8] = Gd - Cd ; ip[1 * 8] = Add + Hd; ip[2 * 8] = Add - Hd; ip[3 * 8] = Ed + Dd ; ip[4 * 8] = Ed - Dd ; ip[5 * 8] = Fd + Bdd; ip[6 * 8] = Fd - Bdd; } ip += 1; /* next row */ } ip = input; for ( i = 0; i < 8; i++) { /* Check for non-zero values (bitwise or faster than ||) */ if ( ip[1] | ip[2] | ip[3] | ip[4] | ip[5] | ip[6] | ip[7] ) { A = M(xC1S7, ip[1]) + M(xC7S1, ip[7]); B = M(xC7S1, ip[1]) - M(xC1S7, ip[7]); C = M(xC3S5, ip[3]) + M(xC5S3, ip[5]); D = M(xC3S5, ip[5]) - M(xC5S3, ip[3]); Ad = M(xC4S4, (A - C)); Bd = M(xC4S4, (B - D)); Cd = A + C; Dd = B + D; E = M(xC4S4, (ip[0] + ip[4])) + 8; F = M(xC4S4, (ip[0] - ip[4])) + 8; if(type==1){ //HACK E += 16*128; F += 16*128; } G = M(xC2S6, ip[2]) + M(xC6S2, ip[6]); H = M(xC6S2, ip[2]) - M(xC2S6, ip[6]); Ed = E - G; Gd = E + G; Add = F + Ad; Bdd = Bd - H; Fd = F - Ad; Hd = Bd + H; /* Final sequence of operations over-write original inputs. */ if (type == 1) { dst[0*stride] = av_clip_uint8((Gd + Cd ) >> 4); dst[7*stride] = av_clip_uint8((Gd - Cd ) >> 4); dst[1*stride] = av_clip_uint8((Add + Hd ) >> 4); dst[2*stride] = av_clip_uint8((Add - Hd ) >> 4); dst[3*stride] = av_clip_uint8((Ed + Dd ) >> 4); dst[4*stride] = av_clip_uint8((Ed - Dd ) >> 4); dst[5*stride] = av_clip_uint8((Fd + Bdd ) >> 4); dst[6*stride] = av_clip_uint8((Fd - Bdd ) >> 4); }else{ dst[0*stride] = av_clip_uint8(dst[0*stride] + ((Gd + Cd ) >> 4)); dst[7*stride] = av_clip_uint8(dst[7*stride] + ((Gd - Cd ) >> 4)); dst[1*stride] = av_clip_uint8(dst[1*stride] + ((Add + Hd ) >> 4)); dst[2*stride] = av_clip_uint8(dst[2*stride] + ((Add - Hd ) >> 4)); dst[3*stride] = av_clip_uint8(dst[3*stride] + ((Ed + Dd ) >> 4)); dst[4*stride] = av_clip_uint8(dst[4*stride] + ((Ed - Dd ) >> 4)); dst[5*stride] = av_clip_uint8(dst[5*stride] + ((Fd + Bdd ) >> 4)); dst[6*stride] = av_clip_uint8(dst[6*stride] + ((Fd - Bdd ) >> 4)); } } else { if (type == 1) { dst[0*stride]= dst[1*stride]= dst[2*stride]= dst[3*stride]= dst[4*stride]= dst[5*stride]= dst[6*stride]= dst[7*stride]= av_clip_uint8(128 + ((xC4S4 * ip[0] + (IdctAdjustBeforeShift<<16))>>20)); }else{ if(ip[0]){ int v= ((xC4S4 * ip[0] + (IdctAdjustBeforeShift<<16))>>20); dst[0*stride] = av_clip_uint8(dst[0*stride] + v); dst[1*stride] = av_clip_uint8(dst[1*stride] + v); dst[2*stride] = av_clip_uint8(dst[2*stride] + v); dst[3*stride] = av_clip_uint8(dst[3*stride] + v); dst[4*stride] = av_clip_uint8(dst[4*stride] + v); dst[5*stride] = av_clip_uint8(dst[5*stride] + v); dst[6*stride] = av_clip_uint8(dst[6*stride] + v); dst[7*stride] = av_clip_uint8(dst[7*stride] + v); } } } ip += 8; /* next column */ dst++; } } static void vp3_idct_put_c(uint8_t *dest/*align 8*/, int line_size, int16_t *block/*align 16*/) { idct(dest, line_size, block, 1); memset(block, 0, sizeof(*block) * 64); } static void vp3_idct_add_c(uint8_t *dest/*align 8*/, int line_size, int16_t *block/*align 16*/) { idct(dest, line_size, block, 2); memset(block, 0, sizeof(*block) * 64); } static void vp3_idct_dc_add_c(uint8_t *dest/*align 8*/, int line_size, int16_t *block/*align 16*/) { int i, dc = (block[0] + 15) >> 5; for(i = 0; i < 8; i++){ dest[0] = av_clip_uint8(dest[0] + dc); dest[1] = av_clip_uint8(dest[1] + dc); dest[2] = av_clip_uint8(dest[2] + dc); dest[3] = av_clip_uint8(dest[3] + dc); dest[4] = av_clip_uint8(dest[4] + dc); dest[5] = av_clip_uint8(dest[5] + dc); dest[6] = av_clip_uint8(dest[6] + dc); dest[7] = av_clip_uint8(dest[7] + dc); dest += line_size; } block[0] = 0; } static void vp3_v_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values) { unsigned char *end; int filter_value; const int nstride= -stride; for (end= first_pixel + 8; first_pixel < end; first_pixel++) { filter_value = (first_pixel[2 * nstride] - first_pixel[ stride]) +3*(first_pixel[0 ] - first_pixel[nstride]); filter_value = bounding_values[(filter_value + 4) >> 3]; first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value); first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value); } } static void vp3_h_loop_filter_c(uint8_t *first_pixel, int stride, int *bounding_values) { unsigned char *end; int filter_value; for (end= first_pixel + 8*stride; first_pixel != end; first_pixel += stride) { filter_value = (first_pixel[-2] - first_pixel[ 1]) +3*(first_pixel[ 0] - first_pixel[-1]); filter_value = bounding_values[(filter_value + 4) >> 3]; first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value); first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value); } } static void put_no_rnd_pixels_l2(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, ptrdiff_t stride, int h) { int i; for (i = 0; i < h; i++) { uint32_t a, b; a = AV_RN32(&src1[i * stride]); b = AV_RN32(&src2[i * stride]); AV_WN32A(&dst[i * stride], no_rnd_avg32(a, b)); a = AV_RN32(&src1[i * stride + 4]); b = AV_RN32(&src2[i * stride + 4]); AV_WN32A(&dst[i * stride + 4], no_rnd_avg32(a, b)); } } av_cold void ff_vp3dsp_init(VP3DSPContext *c, int flags) { c->put_no_rnd_pixels_l2 = put_no_rnd_pixels_l2; c->idct_put = vp3_idct_put_c; c->idct_add = vp3_idct_add_c; c->idct_dc_add = vp3_idct_dc_add_c; c->v_loop_filter = vp3_v_loop_filter_c; c->h_loop_filter = vp3_h_loop_filter_c; if (ARCH_ARM) ff_vp3dsp_init_arm(c, flags); if (ARCH_BFIN) ff_vp3dsp_init_bfin(c, flags); if (ARCH_PPC) ff_vp3dsp_init_ppc(c, flags); if (ARCH_X86) ff_vp3dsp_init_x86(c, flags); }
helloiloveit/VkxPhoneProject
submodules/externals/ffmpeg/libavcodec/vp3dsp.c
C
gpl-2.0
9,668
<?php namespace Drupal\Tests\views\Kernel\Handler; use Drupal\Tests\views\Kernel\ViewsKernelTestBase; use Drupal\views\Views; /** * Tests the core Drupal\views\Plugin\views\filter\InOperator handler. * * @group views */ class FilterInOperatorTest extends ViewsKernelTestBase { public static $modules = array('system'); /** * Views used by this test. * * @var array */ public static $testViews = array('test_view'); /** * Map column names. * * @var array */ protected $columnMap = array( 'views_test_data_name' => 'name', 'views_test_data_age' => 'age', ); function viewsData() { $data = parent::viewsData(); $data['views_test_data']['age']['filter']['id'] = 'in_operator'; return $data; } public function testFilterInOperatorSimple() { $view = Views::getView('test_view'); $view->setDisplay(); // Add a in_operator ordering. $view->displayHandlers->get('default')->overrideOption('filters', array( 'age' => array( 'id' => 'age', 'field' => 'age', 'table' => 'views_test_data', 'value' => array(26, 30), 'operator' => 'in', ), )); $this->executeView($view); $expected_result = array( array( 'name' => 'Paul', 'age' => 26, ), array( 'name' => 'Meredith', 'age' => 30, ), ); $this->assertEqual(2, count($view->result)); $this->assertIdenticalResultset($view, $expected_result, $this->columnMap); $view->destroy(); $view->setDisplay(); // Add a in_operator ordering. $view->displayHandlers->get('default')->overrideOption('filters', array( 'age' => array( 'id' => 'age', 'field' => 'age', 'table' => 'views_test_data', 'value' => array(26, 30), 'operator' => 'not in', ), )); $this->executeView($view); $expected_result = array( array( 'name' => 'John', 'age' => 25, ), array( 'name' => 'George', 'age' => 27, ), array( 'name' => 'Ringo', 'age' => 28, ), ); $this->assertEqual(3, count($view->result)); $this->assertIdenticalResultset($view, $expected_result, $this->columnMap); } public function testFilterInOperatorGroupedExposedSimple() { $filters = $this->getGroupedExposedFilters(); $view = Views::getView('test_view'); // Filter: Age, Operator: in, Value: 26, 30 $filters['age']['group_info']['default_group'] = 1; $view->setDisplay(); $view->displayHandlers->get('default')->overrideOption('filters', $filters); $this->executeView($view); $expected_result = array( array( 'name' => 'Paul', 'age' => 26, ), array( 'name' => 'Meredith', 'age' => 30, ), ); $this->assertEqual(2, count($view->result)); $this->assertIdenticalResultset($view, $expected_result, $this->columnMap); } public function testFilterNotInOperatorGroupedExposedSimple() { $filters = $this->getGroupedExposedFilters(); $view = Views::getView('test_view'); // Filter: Age, Operator: in, Value: 26, 30 $filters['age']['group_info']['default_group'] = 2; $view->setDisplay(); $view->displayHandlers->get('default')->overrideOption('filters', $filters); $this->executeView($view); $expected_result = array( array( 'name' => 'John', 'age' => 25, ), array( 'name' => 'George', 'age' => 27, ), array( 'name' => 'Ringo', 'age' => 28, ), ); $this->assertEqual(3, count($view->result)); $this->assertIdenticalResultset($view, $expected_result, $this->columnMap); } protected function getGroupedExposedFilters() { $filters = array( 'age' => array( 'id' => 'age', 'table' => 'views_test_data', 'field' => 'age', 'relationship' => 'none', 'exposed' => TRUE, 'expose' => array( 'operator' => 'age_op', 'label' => 'age', 'identifier' => 'age', ), 'is_grouped' => TRUE, 'group_info' => array( 'label' => 'age', 'identifier' => 'age', 'default_group' => 'All', 'group_items' => array( 1 => array( 'title' => 'Age is one of 26, 30', 'operator' => 'in', 'value' => array(26, 30), ), 2 => array( 'title' => 'Age is not one of 26, 30', 'operator' => 'not in', 'value' => array(26, 30), ), ), ), ), ); return $filters; } }
Fluxusio/reserviet-restaurants
core/modules/views/tests/src/Kernel/Handler/FilterInOperatorTest.php
PHP
gpl-2.0
4,749
/*- * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * Copyright (c) 2004-2005 Atheros Communications, Inc. * Copyright (c) 2006 Devicescape Software, Inc. * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu> * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * */ #include <linux/module.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/if.h> #include <linux/io.h> #include <linux/netdevice.h> #include <linux/cache.h> #include <linux/pci.h> #include <linux/ethtool.h> #include <linux/uaccess.h> #include <net/ieee80211_radiotap.h> #include <asm/unaligned.h> #include "base.h" #include "reg.h" #include "debug.h" static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */ static int modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static int modparam_all_channels; module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO); MODULE_PARM_DESC(all_channels, "Expose all channels the device can use."); /******************\ * Internal defines * \******************/ /* Module info */ MODULE_AUTHOR("Jiri Slaby"); MODULE_AUTHOR("Nick Kossifidis"); MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); /* Known PCI ids */ static const struct pci_device_id ath5k_pci_id_table[] = { { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */ { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */ { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/ { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */ { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */ { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */ { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */ { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */ { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */ { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */ { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */ { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */ { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */ { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */ { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */ { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ { 0 } }; MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); /* Known SREVs */ static const struct ath5k_srev_name srev_names[] = { { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B }, { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 }, { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 }, { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 }, { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A }, { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 }, { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 }, { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 }, { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 }, { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 }, { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 }, { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 }, { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A }, { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 }, { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 }, { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B }, { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 }, { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 }, { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, }; static const struct ieee80211_rate ath5k_rates[] = { { .bitrate = 10, .hw_value = ATH5K_RATE_CODE_1M, }, { .bitrate = 20, .hw_value = ATH5K_RATE_CODE_2M, .hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = ATH5K_RATE_CODE_5_5M, .hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = ATH5K_RATE_CODE_11M, .hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = ATH5K_RATE_CODE_6M, .flags = 0 }, { .bitrate = 90, .hw_value = ATH5K_RATE_CODE_9M, .flags = 0 }, { .bitrate = 120, .hw_value = ATH5K_RATE_CODE_12M, .flags = 0 }, { .bitrate = 180, .hw_value = ATH5K_RATE_CODE_18M, .flags = 0 }, { .bitrate = 240, .hw_value = ATH5K_RATE_CODE_24M, .flags = 0 }, { .bitrate = 360, .hw_value = ATH5K_RATE_CODE_36M, .flags = 0 }, { .bitrate = 480, .hw_value = ATH5K_RATE_CODE_48M, .flags = 0 }, { .bitrate = 540, .hw_value = ATH5K_RATE_CODE_54M, .flags = 0 }, /* XR missing */ }; /* * Prototypes - PCI stack related functions */ static int __devinit ath5k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void __devexit ath5k_pci_remove(struct pci_dev *pdev); #ifdef CONFIG_PM static int ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state); static int ath5k_pci_resume(struct pci_dev *pdev); #else #define ath5k_pci_suspend NULL #define ath5k_pci_resume NULL #endif /* CONFIG_PM */ static struct pci_driver ath5k_pci_driver = { .name = KBUILD_MODNAME, .id_table = ath5k_pci_id_table, .probe = ath5k_pci_probe, .remove = __devexit_p(ath5k_pci_remove), .suspend = ath5k_pci_suspend, .resume = ath5k_pci_resume, }; /* * Prototypes - MAC 802.11 stack related functions */ static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb); static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath5k_txq *txq); static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); static int ath5k_reset_wake(struct ath5k_softc *sc); static int ath5k_start(struct ieee80211_hw *hw); static void ath5k_stop(struct ieee80211_hw *hw); static int ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf); static void ath5k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf); static int ath5k_config(struct ieee80211_hw *hw, u32 changed); static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, int mc_count, struct dev_addr_list *mc_list); static void ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast); static int ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key); static int ath5k_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats); static int ath5k_get_tx_stats(struct ieee80211_hw *hw, struct ieee80211_tx_queue_stats *stats); static u64 ath5k_get_tsf(struct ieee80211_hw *hw); static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); static void ath5k_reset_tsf(struct ieee80211_hw *hw); static int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif); static void ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes); static void ath5k_sw_scan_start(struct ieee80211_hw *hw); static void ath5k_sw_scan_complete(struct ieee80211_hw *hw); static const struct ieee80211_ops ath5k_hw_ops = { .tx = ath5k_tx, .start = ath5k_start, .stop = ath5k_stop, .add_interface = ath5k_add_interface, .remove_interface = ath5k_remove_interface, .config = ath5k_config, .prepare_multicast = ath5k_prepare_multicast, .configure_filter = ath5k_configure_filter, .set_key = ath5k_set_key, .get_stats = ath5k_get_stats, .conf_tx = NULL, .get_tx_stats = ath5k_get_tx_stats, .get_tsf = ath5k_get_tsf, .set_tsf = ath5k_set_tsf, .reset_tsf = ath5k_reset_tsf, .bss_info_changed = ath5k_bss_info_changed, .sw_scan_start = ath5k_sw_scan_start, .sw_scan_complete = ath5k_sw_scan_complete, }; /* * Prototypes - Internal functions */ /* Attach detach */ static int ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw); static void ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw); /* Channel/mode setup */ static inline short ath5k_ieee2mhz(short chan); static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, unsigned int mode, unsigned int max); static int ath5k_setup_bands(struct ieee80211_hw *hw); static int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan); static void ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode); static void ath5k_mode_setup(struct ath5k_softc *sc); /* Descriptor setup */ static int ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev); static void ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev); /* Buffers setup */ static int ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf); static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, struct ath5k_txq *txq); static inline void ath5k_txbuf_free(struct ath5k_softc *sc, struct ath5k_buf *bf) { BUG_ON(!bf); if (!bf->skb) return; pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; } static inline void ath5k_rxbuf_free(struct ath5k_softc *sc, struct ath5k_buf *bf) { BUG_ON(!bf); if (!bf->skb) return; pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, PCI_DMA_FROMDEVICE); dev_kfree_skb_any(bf->skb); bf->skb = NULL; } /* Queues setup */ static struct ath5k_txq *ath5k_txq_setup(struct ath5k_softc *sc, int qtype, int subtype); static int ath5k_beaconq_setup(struct ath5k_hw *ah); static int ath5k_beaconq_config(struct ath5k_softc *sc); static void ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq); static void ath5k_txq_cleanup(struct ath5k_softc *sc); static void ath5k_txq_release(struct ath5k_softc *sc); /* Rx handling */ static int ath5k_rx_start(struct ath5k_softc *sc); static void ath5k_rx_stop(struct ath5k_softc *sc); static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, struct sk_buff *skb, struct ath5k_rx_status *rs); static void ath5k_tasklet_rx(unsigned long data); /* Tx handling */ static void ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq); static void ath5k_tasklet_tx(unsigned long data); /* Beacon handling */ static int ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf); static void ath5k_beacon_send(struct ath5k_softc *sc); static void ath5k_beacon_config(struct ath5k_softc *sc); static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); static void ath5k_tasklet_beacon(unsigned long data); static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) { u64 tsf = ath5k_hw_get_tsf64(ah); if ((tsf & 0x7fff) < rstamp) tsf -= 0x8000; return (tsf & ~0x7fff) | rstamp; } /* Interrupt handling */ static int ath5k_init(struct ath5k_softc *sc); static int ath5k_stop_locked(struct ath5k_softc *sc); static int ath5k_stop_hw(struct ath5k_softc *sc); static irqreturn_t ath5k_intr(int irq, void *dev_id); static void ath5k_tasklet_reset(unsigned long data); static void ath5k_tasklet_calibrate(unsigned long data); /* * Module init/exit functions */ static int __init init_ath5k_pci(void) { int ret; ath5k_debug_init(); ret = pci_register_driver(&ath5k_pci_driver); if (ret) { printk(KERN_ERR "ath5k_pci: can't register pci driver\n"); return ret; } return 0; } static void __exit exit_ath5k_pci(void) { pci_unregister_driver(&ath5k_pci_driver); ath5k_debug_finish(); } module_init(init_ath5k_pci); module_exit(exit_ath5k_pci); /********************\ * PCI Initialization * \********************/ static const char * ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) { const char *name = "xxxxx"; unsigned int i; for (i = 0; i < ARRAY_SIZE(srev_names); i++) { if (srev_names[i].sr_type != type) continue; if ((val & 0xf0) == srev_names[i].sr_val) name = srev_names[i].sr_name; if ((val & 0xff) == srev_names[i].sr_val) { name = srev_names[i].sr_name; break; } } return name; } static int __devinit ath5k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem *mem; struct ath5k_softc *sc; struct ieee80211_hw *hw; int ret; u8 csz; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "can't enable device\n"); goto err; } /* XXX 32-bit addressing only */ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "32-bit DMA not available\n"); goto err_dis; } /* * Cache line size is used to size and align various * structures used to communicate with the hardware. */ pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz); if (csz == 0) { /* * Linux 2.4.18 (at least) writes the cache line size * register as a 16-bit wide register which is wrong. * We must have this setup properly for rx buffer * DMA to work so force a reasonable value here if it * comes up zero. */ csz = L1_CACHE_BYTES >> 2; pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz); } /* * The default setting of latency timer yields poor results, * set it to the value used by other systems. It may be worth * tweaking this setting more. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8); /* Enable bus mastering */ pci_set_master(pdev); /* * Disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state. */ pci_write_config_byte(pdev, 0x41, 0); ret = pci_request_region(pdev, 0, "ath5k"); if (ret) { dev_err(&pdev->dev, "cannot reserve PCI memory region\n"); goto err_dis; } mem = pci_iomap(pdev, 0, 0); if (!mem) { dev_err(&pdev->dev, "cannot remap PCI memory region\n") ; ret = -EIO; goto err_reg; } /* * Allocate hw (mac80211 main struct) * and hw->priv (driver private data) */ hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops); if (hw == NULL) { dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n"); ret = -ENOMEM; goto err_map; } dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy)); /* Initialize driver private data */ SET_IEEE80211_DEV(hw, &pdev->dev); hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); hw->extra_tx_headroom = 2; hw->channel_change_time = 5000; sc = hw->priv; sc->hw = hw; sc->pdev = pdev; ath5k_debug_init_device(sc); /* * Mark the device as detached to avoid processing * interrupts until setup is complete. */ __set_bit(ATH_STAT_INVALID, sc->status); sc->iobase = mem; /* So we can unmap it on detach */ sc->common.cachelsz = csz << 2; /* convert to bytes */ sc->opmode = NL80211_IFTYPE_STATION; sc->bintval = 1000; mutex_init(&sc->lock); spin_lock_init(&sc->rxbuflock); spin_lock_init(&sc->txbuflock); spin_lock_init(&sc->block); /* Set private data */ pci_set_drvdata(pdev, hw); /* Setup interrupt handler */ ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc); if (ret) { ATH5K_ERR(sc, "request_irq failed\n"); goto err_free; } /* Initialize device */ sc->ah = ath5k_hw_attach(sc); if (IS_ERR(sc->ah)) { ret = PTR_ERR(sc->ah); goto err_irq; } /* set up multi-rate retry capabilities */ if (sc->ah->ah_version == AR5K_AR5212) { hw->max_rates = 4; hw->max_rate_tries = 11; } /* Finish private driver data initialization */ ret = ath5k_attach(pdev, hw); if (ret) goto err_ah; ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n", ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev), sc->ah->ah_mac_srev, sc->ah->ah_phy_revision); if (!sc->ah->ah_single_chip) { /* Single chip radio (!RF5111) */ if (sc->ah->ah_radio_5ghz_revision && !sc->ah->ah_radio_2ghz_revision) { /* No 5GHz support -> report 2GHz radio */ if (!test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, sc->ah->ah_radio_5ghz_revision), sc->ah->ah_radio_5ghz_revision); /* No 2GHz support (5110 and some * 5Ghz only cards) -> report 5Ghz radio */ } else if (!test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) { ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, sc->ah->ah_radio_5ghz_revision), sc->ah->ah_radio_5ghz_revision); /* Multiband radio */ } else { ATH5K_INFO(sc, "RF%s multiband radio found" " (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, sc->ah->ah_radio_5ghz_revision), sc->ah->ah_radio_5ghz_revision); } } /* Multi chip radio (RF5111 - RF2111) -> * report both 2GHz/5GHz radios */ else if (sc->ah->ah_radio_5ghz_revision && sc->ah->ah_radio_2ghz_revision){ ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, sc->ah->ah_radio_5ghz_revision), sc->ah->ah_radio_5ghz_revision); ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", ath5k_chip_name(AR5K_VERSION_RAD, sc->ah->ah_radio_2ghz_revision), sc->ah->ah_radio_2ghz_revision); } } /* ready to process interrupts */ __clear_bit(ATH_STAT_INVALID, sc->status); return 0; err_ah: ath5k_hw_detach(sc->ah); err_irq: free_irq(pdev->irq, sc); err_free: ieee80211_free_hw(hw); err_map: pci_iounmap(pdev, mem); err_reg: pci_release_region(pdev, 0); err_dis: pci_disable_device(pdev); err: return ret; } static void __devexit ath5k_pci_remove(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath5k_softc *sc = hw->priv; ath5k_debug_finish_device(sc); ath5k_detach(pdev, hw); ath5k_hw_detach(sc->ah); free_irq(pdev->irq, sc); pci_iounmap(pdev, sc->iobase); pci_release_region(pdev, 0); pci_disable_device(pdev); ieee80211_free_hw(hw); } #ifdef CONFIG_PM static int ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath5k_softc *sc = hw->priv; ath5k_led_off(sc); pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int ath5k_pci_resume(struct pci_dev *pdev) { struct ieee80211_hw *hw = pci_get_drvdata(pdev); struct ath5k_softc *sc = hw->priv; int err; pci_restore_state(pdev); err = pci_enable_device(pdev); if (err) return err; /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, 0x41, 0); ath5k_led_enable(sc); return 0; } #endif /* CONFIG_PM */ /***********************\ * Driver Initialization * \***********************/ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct ath5k_softc *sc = hw->priv; struct ath_regulatory *regulatory = &sc->common.regulatory; return ath_reg_notifier_apply(wiphy, request, regulatory); } static int ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ath_regulatory *regulatory = &sc->common.regulatory; u8 mac[ETH_ALEN] = {}; int ret; ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device); /* * Check if the MAC has multi-rate retry support. * We do this by trying to setup a fake extended * descriptor. MAC's that don't have support will * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ ret = ah->ah_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0); if (ret < 0) goto err; if (ret > 0) __set_bit(ATH_STAT_MRRETRY, sc->status); /* * Collect the channel list. The 802.11 layer * is resposible for filtering this list based * on settings like the phy mode and regulatory * domain restrictions. */ ret = ath5k_setup_bands(hw); if (ret) { ATH5K_ERR(sc, "can't get channels\n"); goto err; } /* NB: setup here so ath5k_rate_update is happy */ if (test_bit(AR5K_MODE_11A, ah->ah_modes)) ath5k_setcurmode(sc, AR5K_MODE_11A); else ath5k_setcurmode(sc, AR5K_MODE_11B); /* * Allocate tx+rx descriptors and populate the lists. */ ret = ath5k_desc_alloc(sc, pdev); if (ret) { ATH5K_ERR(sc, "can't allocate descriptors\n"); goto err; } /* * Allocate hardware transmit queues: one queue for * beacon frames and one data queue for each QoS * priority. Note that hw functions handle reseting * these queues at the needed time. */ ret = ath5k_beaconq_setup(ah); if (ret < 0) { ATH5K_ERR(sc, "can't setup a beacon xmit queue\n"); goto err_desc; } sc->bhalq = ret; sc->cabq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_CAB, 0); if (IS_ERR(sc->cabq)) { ATH5K_ERR(sc, "can't setup cab queue\n"); ret = PTR_ERR(sc->cabq); goto err_bhal; } sc->txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); if (IS_ERR(sc->txq)) { ATH5K_ERR(sc, "can't setup xmit queue\n"); ret = PTR_ERR(sc->txq); goto err_queues; } tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); ret = ath5k_eeprom_read_mac(ah, mac); if (ret) { ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", sc->pdev->device); goto err_queues; } SET_IEEE80211_PERM_ADDR(hw, mac); /* All MAC address bits matter for ACKs */ memset(sc->bssidmask, 0xff, ETH_ALEN); ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier); if (ret) { ATH5K_ERR(sc, "can't initialize regulatory system\n"); goto err_queues; } ret = ieee80211_register_hw(hw); if (ret) { ATH5K_ERR(sc, "can't register ieee80211 hw\n"); goto err_queues; } if (!ath_is_world_regd(regulatory)) regulatory_hint(hw->wiphy, regulatory->alpha2); ath5k_init_leds(sc); return 0; err_queues: ath5k_txq_release(sc); err_bhal: ath5k_hw_release_tx_queue(ah, sc->bhalq); err_desc: ath5k_desc_free(sc, pdev); err: return ret; } static void ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; /* * NB: the order of these is important: * o call the 802.11 layer before detaching ath5k_hw to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * XXX: ??? detach ath5k_hw ??? * Other than that, it's straightforward... */ ieee80211_unregister_hw(hw); ath5k_desc_free(sc, pdev); ath5k_txq_release(sc); ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); ath5k_unregister_leds(sc); /* * NB: can't reclaim these until after ieee80211_ifdetach * returns because we'll get called back to reclaim node * state and potentially want to use them. */ } /********************\ * Channel/mode setup * \********************/ /* * Convert IEEE channel number to MHz frequency. */ static inline short ath5k_ieee2mhz(short chan) { if (chan <= 14 || chan >= 27) return ieee80211chan2mhz(chan); else return 2212 + chan * 20; } /* * Returns true for the channel numbers used without all_channels modparam. */ static bool ath5k_is_standard_channel(short chan) { return ((chan <= 14) || /* UNII 1,2 */ ((chan & 3) == 0 && chan >= 36 && chan <= 64) || /* midband */ ((chan & 3) == 0 && chan >= 100 && chan <= 140) || /* UNII-3 */ ((chan & 3) == 1 && chan >= 149 && chan <= 165)); } static unsigned int ath5k_copy_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels, unsigned int mode, unsigned int max) { unsigned int i, count, size, chfreq, freq, ch; if (!test_bit(mode, ah->ah_modes)) return 0; switch (mode) { case AR5K_MODE_11A: case AR5K_MODE_11A_TURBO: /* 1..220, but 2GHz frequencies are filtered by check_channel */ size = 220 ; chfreq = CHANNEL_5GHZ; break; case AR5K_MODE_11B: case AR5K_MODE_11G: case AR5K_MODE_11G_TURBO: size = 26; chfreq = CHANNEL_2GHZ; break; default: ATH5K_WARN(ah->ah_sc, "bad mode, not copying channels\n"); return 0; } for (i = 0, count = 0; i < size && max > 0; i++) { ch = i + 1 ; freq = ath5k_ieee2mhz(ch); /* Check if channel is supported by the chipset */ if (!ath5k_channel_ok(ah, freq, chfreq)) continue; if (!modparam_all_channels && !ath5k_is_standard_channel(ch)) continue; /* Write channel info and increment counter */ channels[count].center_freq = freq; channels[count].band = (chfreq == CHANNEL_2GHZ) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; switch (mode) { case AR5K_MODE_11A: case AR5K_MODE_11G: channels[count].hw_value = chfreq | CHANNEL_OFDM; break; case AR5K_MODE_11A_TURBO: case AR5K_MODE_11G_TURBO: channels[count].hw_value = chfreq | CHANNEL_OFDM | CHANNEL_TURBO; break; case AR5K_MODE_11B: channels[count].hw_value = CHANNEL_B; } count++; max--; } return count; } static void ath5k_setup_rate_idx(struct ath5k_softc *sc, struct ieee80211_supported_band *b) { u8 i; for (i = 0; i < AR5K_MAX_RATES; i++) sc->rate_idx[b->band][i] = -1; for (i = 0; i < b->n_bitrates; i++) { sc->rate_idx[b->band][b->bitrates[i].hw_value] = i; if (b->bitrates[i].hw_value_short) sc->rate_idx[b->band][b->bitrates[i].hw_value_short] = i; } } static int ath5k_setup_bands(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ieee80211_supported_band *sband; int max_c, count_c = 0; int i; BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS); max_c = ARRAY_SIZE(sc->channels); /* 2GHz band */ sband = &sc->sbands[IEEE80211_BAND_2GHZ]; sband->band = IEEE80211_BAND_2GHZ; sband->bitrates = &sc->rates[IEEE80211_BAND_2GHZ][0]; if (test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) { /* G mode */ memcpy(sband->bitrates, &ath5k_rates[0], sizeof(struct ieee80211_rate) * 12); sband->n_bitrates = 12; sband->channels = sc->channels; sband->n_channels = ath5k_copy_channels(ah, sband->channels, AR5K_MODE_11G, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } else if (test_bit(AR5K_MODE_11B, sc->ah->ah_capabilities.cap_mode)) { /* B mode */ memcpy(sband->bitrates, &ath5k_rates[0], sizeof(struct ieee80211_rate) * 4); sband->n_bitrates = 4; /* 5211 only supports B rates and uses 4bit rate codes * (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B) * fix them up here: */ if (ah->ah_version == AR5K_AR5211) { for (i = 0; i < 4; i++) { sband->bitrates[i].hw_value = sband->bitrates[i].hw_value & 0xF; sband->bitrates[i].hw_value_short = sband->bitrates[i].hw_value_short & 0xF; } } sband->channels = sc->channels; sband->n_channels = ath5k_copy_channels(ah, sband->channels, AR5K_MODE_11B, max_c); hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband; count_c = sband->n_channels; max_c -= count_c; } ath5k_setup_rate_idx(sc, sband); /* 5GHz band, A mode */ if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) { sband = &sc->sbands[IEEE80211_BAND_5GHZ]; sband->band = IEEE80211_BAND_5GHZ; sband->bitrates = &sc->rates[IEEE80211_BAND_5GHZ][0]; memcpy(sband->bitrates, &ath5k_rates[4], sizeof(struct ieee80211_rate) * 8); sband->n_bitrates = 8; sband->channels = &sc->channels[count_c]; sband->n_channels = ath5k_copy_channels(ah, sband->channels, AR5K_MODE_11A, max_c); hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband; } ath5k_setup_rate_idx(sc, sband); ath5k_debug_dump_bands(sc); return 0; } /* * Set/change channels. We always reset the chip. * To accomplish this we must first cleanup any pending DMA, * then restart stuff after a la ath5k_init. * * Called with sc->lock. */ static int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) { ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n", sc->curchan->center_freq, chan->center_freq); /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ return ath5k_reset(sc, chan); } static void ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) { sc->curmode = mode; if (mode == AR5K_MODE_11A) { sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ]; } else { sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ]; } } static void ath5k_mode_setup(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; u32 rfilt; ah->ah_op_mode = sc->opmode; /* configure rx filter */ rfilt = sc->filter_flags; ath5k_hw_set_rx_filter(ah, rfilt); if (ath5k_hw_hasbssidmask(ah)) ath5k_hw_set_bssid_mask(ah, sc->bssidmask); /* configure operational mode */ ath5k_hw_set_opmode(ah); ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); } static inline int ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) { int rix; /* return base rate on errors */ if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES, "hw_rix out of bounds: %x\n", hw_rix)) return 0; rix = sc->rate_idx[sc->curband->band][hw_rix]; if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix)) rix = 0; return rix; } /***************\ * Buffers setup * \***************/ static struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) { struct sk_buff *skb; /* * Allocate buffer with headroom_needed space for the * fake physical layer header at the start. */ skb = ath_rxbuf_alloc(&sc->common, sc->rxbufsize + sc->common.cachelsz - 1, GFP_ATOMIC); if (!skb) { ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", sc->rxbufsize + sc->common.cachelsz - 1); return NULL; } *skb_addr = pci_map_single(sc->pdev, skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE); if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); dev_kfree_skb(skb); return NULL; } return skb; } static int ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) { struct ath5k_hw *ah = sc->ah; struct sk_buff *skb = bf->skb; struct ath5k_desc *ds; if (!skb) { skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); if (!skb) return -ENOMEM; bf->skb = skb; } /* * Setup descriptors. For receive we always terminate * the descriptor list with a self-linked entry so we'll * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * * To insure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked * entry is ``fixed'' naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the * descriptor list. This insures the hardware always has * someplace to write a new frame. */ ds = bf->desc; ds->ds_link = bf->daddr; /* link to self */ ds->ds_data = bf->skbaddr; ah->ah_setup_rx_desc(ah, ds, skb_tailroom(skb), /* buffer size */ 0); if (sc->rxlink != NULL) *sc->rxlink = bf->daddr; sc->rxlink = &ds->ds_link; return 0; } static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, struct ath5k_txq *txq) { struct ath5k_hw *ah = sc->ah; struct ath5k_desc *ds = bf->desc; struct sk_buff *skb = bf->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID; struct ieee80211_rate *rate; unsigned int mrr_rate[3], mrr_tries[3]; int i, ret; u16 hw_rate; u16 cts_rate = 0; u16 duration = 0; u8 rc_flags; flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; /* XXX endianness */ bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); rate = ieee80211_get_tx_rate(sc->hw, info); if (info->flags & IEEE80211_TX_CTL_NO_ACK) flags |= AR5K_TXDESC_NOACK; rc_flags = info->control.rates[0].flags; hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ? rate->hw_value_short : rate->hw_value; pktlen = skb->len; /* FIXME: If we are in g mode and rate is a CCK rate * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta * from tx power (value is in dB units already) */ if (info->control.hw_key) { keyidx = info->control.hw_key->hw_key_idx; pktlen += info->control.hw_key->icv_len; } if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { flags |= AR5K_TXDESC_RTSENA; cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; duration = le16_to_cpu(ieee80211_rts_duration(sc->hw, sc->vif, pktlen, info)); } if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { flags |= AR5K_TXDESC_CTSENA; cts_rate = ieee80211_get_rts_cts_rate(sc->hw, info)->hw_value; duration = le16_to_cpu(ieee80211_ctstoself_duration(sc->hw, sc->vif, pktlen, info)); } ret = ah->ah_setup_tx_desc(ah, ds, pktlen, ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, (sc->power_level * 2), hw_rate, info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags, cts_rate, duration); if (ret) goto err_unmap; memset(mrr_rate, 0, sizeof(mrr_rate)); memset(mrr_tries, 0, sizeof(mrr_tries)); for (i = 0; i < 3; i++) { rate = ieee80211_get_alt_retry_rate(sc->hw, info, i); if (!rate) break; mrr_rate[i] = rate->hw_value; mrr_tries[i] = info->control.rates[i + 1].count; } ah->ah_setup_mrr_tx_desc(ah, ds, mrr_rate[0], mrr_tries[0], mrr_rate[1], mrr_tries[1], mrr_rate[2], mrr_tries[2]); ds->ds_link = 0; ds->ds_data = bf->skbaddr; spin_lock_bh(&txq->lock); list_add_tail(&bf->list, &txq->q); sc->tx_stats[txq->qnum].len++; if (txq->link == NULL) /* is this first packet? */ ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr); else /* no, so only link it */ *txq->link = bf->daddr; txq->link = &ds->ds_link; ath5k_hw_start_tx_dma(ah, txq->qnum); mmiowb(); spin_unlock_bh(&txq->lock); return 0; err_unmap: pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); return ret; } /*******************\ * Descriptors setup * \*******************/ static int ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev) { struct ath5k_desc *ds; struct ath5k_buf *bf; dma_addr_t da; unsigned int i; int ret; /* allocate descriptors */ sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); if (sc->desc == NULL) { ATH5K_ERR(sc, "can't allocate descriptors\n"); ret = -ENOMEM; goto err; } ds = sc->desc; da = sc->desc_daddr; ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n", ds, sc->desc_len, (unsigned long long)sc->desc_daddr); bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF, sizeof(struct ath5k_buf), GFP_KERNEL); if (bf == NULL) { ATH5K_ERR(sc, "can't allocate bufptr\n"); ret = -ENOMEM; goto err_free; } sc->bufptr = bf; INIT_LIST_HEAD(&sc->rxbuf); for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) { bf->desc = ds; bf->daddr = da; list_add_tail(&bf->list, &sc->rxbuf); } INIT_LIST_HEAD(&sc->txbuf); sc->txbuf_len = ATH_TXBUF; for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) { bf->desc = ds; bf->daddr = da; list_add_tail(&bf->list, &sc->txbuf); } /* beacon buffer */ bf->desc = ds; bf->daddr = da; sc->bbuf = bf; return 0; err_free: pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); err: sc->desc = NULL; return ret; } static void ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) { struct ath5k_buf *bf; ath5k_txbuf_free(sc, sc->bbuf); list_for_each_entry(bf, &sc->txbuf, list) ath5k_txbuf_free(sc, bf); list_for_each_entry(bf, &sc->rxbuf, list) ath5k_rxbuf_free(sc, bf); /* Free memory associated with all descriptors */ pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); kfree(sc->bufptr); sc->bufptr = NULL; } /**************\ * Queues setup * \**************/ static struct ath5k_txq * ath5k_txq_setup(struct ath5k_softc *sc, int qtype, int subtype) { struct ath5k_hw *ah = sc->ah; struct ath5k_txq *txq; struct ath5k_txq_info qi = { .tqi_subtype = subtype, .tqi_aifs = AR5K_TXQ_USEDEFAULT, .tqi_cw_min = AR5K_TXQ_USEDEFAULT, .tqi_cw_max = AR5K_TXQ_USEDEFAULT }; int qnum; /* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise waiting for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors. */ qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE | AR5K_TXQ_FLAG_TXDESCINT_ENABLE; qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi); if (qnum < 0) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return ERR_PTR(qnum); } if (qnum >= ARRAY_SIZE(sc->txqs)) { ATH5K_ERR(sc, "hw qnum %u out of range, max %tu!\n", qnum, ARRAY_SIZE(sc->txqs)); ath5k_hw_release_tx_queue(ah, qnum); return ERR_PTR(-EINVAL); } txq = &sc->txqs[qnum]; if (!txq->setup) { txq->qnum = qnum; txq->link = NULL; INIT_LIST_HEAD(&txq->q); spin_lock_init(&txq->lock); txq->setup = true; } return &sc->txqs[qnum]; } static int ath5k_beaconq_setup(struct ath5k_hw *ah) { struct ath5k_txq_info qi = { .tqi_aifs = AR5K_TXQ_USEDEFAULT, .tqi_cw_min = AR5K_TXQ_USEDEFAULT, .tqi_cw_max = AR5K_TXQ_USEDEFAULT, /* NB: for dynamic turbo, don't enable any other interrupts */ .tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE }; return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi); } static int ath5k_beaconq_config(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; struct ath5k_txq_info qi; int ret; ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi); if (ret) return ret; if (sc->opmode == NL80211_IFTYPE_AP || sc->opmode == NL80211_IFTYPE_MESH_POINT) { /* * Always burst out beacon and CAB traffic * (aifs = cwmin = cwmax = 0) */ qi.tqi_aifs = 0; qi.tqi_cw_min = 0; qi.tqi_cw_max = 0; } else if (sc->opmode == NL80211_IFTYPE_ADHOC) { /* * Adhoc mode; backoff between 0 and (2 * cw_min). */ qi.tqi_aifs = 0; qi.tqi_cw_min = 0; qi.tqi_cw_max = 2 * ah->ah_cw_min; } ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n", qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max); ret = ath5k_hw_set_tx_queueprops(ah, sc->bhalq, &qi); if (ret) { ATH5K_ERR(sc, "%s: unable to update parameters for beacon " "hardware queue!\n", __func__); return ret; } return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */; } static void ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) { struct ath5k_buf *bf, *bf0; /* * NB: this assumes output has been stopped and * we do not need to block ath5k_tx_tasklet */ spin_lock_bh(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) { ath5k_debug_printtxbuf(sc, bf); ath5k_txbuf_free(sc, bf); spin_lock_bh(&sc->txbuflock); sc->tx_stats[txq->qnum].len--; list_move_tail(&bf->list, &sc->txbuf); sc->txbuf_len++; spin_unlock_bh(&sc->txbuflock); } txq->link = NULL; spin_unlock_bh(&txq->lock); } /* * Drain the transmit queues and reclaim resources. */ static void ath5k_txq_cleanup(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; unsigned int i; /* XXX return value */ if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) { /* don't touch the hardware if marked invalid */ ath5k_hw_stop_tx_dma(ah, sc->bhalq); ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", ath5k_hw_get_txdp(ah, sc->bhalq)); for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) if (sc->txqs[i].setup) { ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum); ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, " "link %p\n", sc->txqs[i].qnum, ath5k_hw_get_txdp(ah, sc->txqs[i].qnum), sc->txqs[i].link); } } ieee80211_wake_queues(sc->hw); /* XXX move to callers */ for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) if (sc->txqs[i].setup) ath5k_txq_drainq(sc, &sc->txqs[i]); } static void ath5k_txq_release(struct ath5k_softc *sc) { struct ath5k_txq *txq = sc->txqs; unsigned int i; for (i = 0; i < ARRAY_SIZE(sc->txqs); i++, txq++) if (txq->setup) { ath5k_hw_release_tx_queue(sc->ah, txq->qnum); txq->setup = false; } } /*************\ * RX Handling * \*************/ /* * Enable the receive h/w following a reset. */ static int ath5k_rx_start(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; struct ath5k_buf *bf; int ret; sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->common.cachelsz); ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", sc->common.cachelsz, sc->rxbufsize); spin_lock_bh(&sc->rxbuflock); sc->rxlink = NULL; list_for_each_entry(bf, &sc->rxbuf, list) { ret = ath5k_rxbuf_setup(sc, bf); if (ret != 0) { spin_unlock_bh(&sc->rxbuflock); goto err; } } bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); ath5k_hw_set_rxdp(ah, bf->daddr); spin_unlock_bh(&sc->rxbuflock); ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ ath5k_mode_setup(sc); /* set filters, etc. */ ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ return 0; err: return ret; } /* * Disable the receive h/w in preparation for a reset. */ static void ath5k_rx_stop(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; ath5k_hw_stop_rx_pcu(ah); /* disable PCU */ ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ ath5k_debug_printrxbuffs(sc, ah); sc->rxlink = NULL; /* just in case */ } static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, struct sk_buff *skb, struct ath5k_rx_status *rs) { struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int keyix, hlen; if (!(rs->rs_status & AR5K_RXERR_DECRYPT) && rs->rs_keyix != AR5K_RXKEYIX_INVALID) return RX_FLAG_DECRYPTED; /* Apparently when a default key is used to decrypt the packet the hw does not set the index used to decrypt. In such cases get the index from the packet. */ hlen = ieee80211_hdrlen(hdr->frame_control); if (ieee80211_has_protected(hdr->frame_control) && !(rs->rs_status & AR5K_RXERR_DECRYPT) && skb->len >= hlen + 4) { keyix = skb->data[hlen + 3] >> 6; if (test_bit(keyix, sc->keymap)) return RX_FLAG_DECRYPTED; } return 0; } static void ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb, struct ieee80211_rx_status *rxs) { u64 tsf, bc_tstamp; u32 hw_tu; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; if (ieee80211_is_beacon(mgmt->frame_control) && le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) { /* * Received an IBSS beacon with the same BSSID. Hardware *must* * have updated the local TSF. We have to work around various * hardware bugs, though... */ tsf = ath5k_hw_get_tsf64(sc->ah); bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp); hw_tu = TSF_TO_TU(tsf); ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "beacon %llx mactime %llx (diff %lld) tsf now %llx\n", (unsigned long long)bc_tstamp, (unsigned long long)rxs->mactime, (unsigned long long)(rxs->mactime - bc_tstamp), (unsigned long long)tsf); /* * Sometimes the HW will give us a wrong tstamp in the rx * status, causing the timestamp extension to go wrong. * (This seems to happen especially with beacon frames bigger * than 78 byte (incl. FCS)) * But we know that the receive timestamp must be later than the * timestamp of the beacon since HW must have synced to that. * * NOTE: here we assume mactime to be after the frame was * received, not like mac80211 which defines it at the start. */ if (bc_tstamp > rxs->mactime) { ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "fixing mactime from %llx to %llx\n", (unsigned long long)rxs->mactime, (unsigned long long)tsf); rxs->mactime = tsf; } /* * Local TSF might have moved higher than our beacon timers, * in that case we have to update them to continue sending * beacons. This also takes care of synchronizing beacon sending * times with other stations. */ if (hw_tu >= sc->nexttbtt) ath5k_beacon_update_timers(sc, bc_tstamp); } } static void ath5k_tasklet_rx(unsigned long data) { struct ieee80211_rx_status *rxs; struct ath5k_rx_status rs = {}; struct sk_buff *skb, *next_skb; dma_addr_t next_skb_addr; struct ath5k_softc *sc = (void *)data; struct ath5k_buf *bf; struct ath5k_desc *ds; int ret; int hdrlen; int padsize; int rx_flag; spin_lock(&sc->rxbuflock); if (list_empty(&sc->rxbuf)) { ATH5K_WARN(sc, "empty rx buf pool\n"); goto unlock; } do { rx_flag = 0; bf = list_first_entry(&sc->rxbuf, struct ath5k_buf, list); BUG_ON(bf->skb == NULL); skb = bf->skb; ds = bf->desc; /* bail if HW is still using self-linked descriptor */ if (ath5k_hw_get_rxdp(sc->ah) == bf->daddr) break; ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs); if (unlikely(ret == -EINPROGRESS)) break; else if (unlikely(ret)) { ATH5K_ERR(sc, "error in processing rx descriptor\n"); spin_unlock(&sc->rxbuflock); return; } if (unlikely(rs.rs_more)) { ATH5K_WARN(sc, "unsupported jumbo\n"); goto next; } if (unlikely(rs.rs_status)) { if (rs.rs_status & AR5K_RXERR_PHY) goto next; if (rs.rs_status & AR5K_RXERR_DECRYPT) { /* * Decrypt error. If the error occurred * because there was no hardware key, then * let the frame through so the upper layers * can process it. This is necessary for 5210 * parts which have no way to setup a ``clear'' * key cache entry. * * XXX do key cache faulting */ if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && !(rs.rs_status & AR5K_RXERR_CRC)) goto accept; } if (rs.rs_status & AR5K_RXERR_MIC) { rx_flag |= RX_FLAG_MMIC_ERROR; goto accept; } /* let crypto-error packets fall through in MNTR */ if ((rs.rs_status & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || sc->opmode != NL80211_IFTYPE_MONITOR) goto next; } accept: next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); /* * If we can't replace bf->skb with a new skb under memory * pressure, just skip this packet */ if (!next_skb) goto next; pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, PCI_DMA_FROMDEVICE); skb_put(skb, rs.rs_datalen); /* The MAC header is padded to have 32-bit boundary if the * packet payload is non-zero. The general calculation for * padsize would take into account odd header lengths: * padsize = (4 - hdrlen % 4) % 4; However, since only * even-length headers are used, padding can only be 0 or 2 * bytes and we can optimize this a bit. In addition, we must * not try to remove padding from short control frames that do * not have payload. */ hdrlen = ieee80211_get_hdrlen_from_skb(skb); padsize = ath5k_pad_size(hdrlen); if (padsize) { memmove(skb->data + padsize, skb->data, hdrlen); skb_pull(skb, padsize); } rxs = IEEE80211_SKB_RXCB(skb); /* * always extend the mac timestamp, since this information is * also needed for proper IBSS merging. * * XXX: it might be too late to do it here, since rs_tstamp is * 15bit only. that means TSF extension has to be done within * 32768usec (about 32ms). it might be necessary to move this to * the interrupt handler, like it is done in madwifi. * * Unfortunately we don't know when the hardware takes the rx * timestamp (beginning of phy frame, data frame, end of rx?). * The only thing we know is that it is hardware specific... * On AR5213 it seems the rx timestamp is at the end of the * frame, but i'm not sure. * * NOTE: mac80211 defines mactime at the beginning of the first * data symbol. Since we don't have any time references it's * impossible to comply to that. This affects IBSS merge only * right now, so it's not too bad... */ rxs->mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp); rxs->flag = rx_flag | RX_FLAG_TSFT; rxs->freq = sc->curchan->center_freq; rxs->band = sc->curband->band; rxs->noise = sc->ah->ah_noise_floor; rxs->signal = rxs->noise + rs.rs_rssi; /* An rssi of 35 indicates you should be able use * 54 Mbps reliably. A more elaborate scheme can be used * here but it requires a map of SNR/throughput for each * possible mode used */ rxs->qual = rs.rs_rssi * 100 / 35; /* rssi can be more than 35 though, anything above that * should be considered at 100% */ if (rxs->qual > 100) rxs->qual = 100; rxs->antenna = rs.rs_antenna; rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); if (rxs->rate_idx >= 0 && rs.rs_rate == sc->curband->bitrates[rxs->rate_idx].hw_value_short) rxs->flag |= RX_FLAG_SHORTPRE; ath5k_debug_dump_skb(sc, skb, "RX ", 0); /* check beacons in IBSS mode */ if (sc->opmode == NL80211_IFTYPE_ADHOC) ath5k_check_ibss_tsf(sc, skb, rxs); ieee80211_rx(sc->hw, skb); bf->skb = next_skb; bf->skbaddr = next_skb_addr; next: list_move_tail(&bf->list, &sc->rxbuf); } while (ath5k_rxbuf_setup(sc, bf) == 0); unlock: spin_unlock(&sc->rxbuflock); } /*************\ * TX Handling * \*************/ static void ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) { struct ath5k_tx_status ts = {}; struct ath5k_buf *bf, *bf0; struct ath5k_desc *ds; struct sk_buff *skb; struct ieee80211_tx_info *info; int i, ret; spin_lock(&txq->lock); list_for_each_entry_safe(bf, bf0, &txq->q, list) { ds = bf->desc; ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); if (unlikely(ret == -EINPROGRESS)) break; else if (unlikely(ret)) { ATH5K_ERR(sc, "error %d while processing queue %u\n", ret, txq->qnum); break; } skb = bf->skb; info = IEEE80211_SKB_CB(skb); bf->skb = NULL; pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); ieee80211_tx_info_clear_status(info); for (i = 0; i < 4; i++) { struct ieee80211_tx_rate *r = &info->status.rates[i]; if (ts.ts_rate[i]) { r->idx = ath5k_hw_to_driver_rix(sc, ts.ts_rate[i]); r->count = ts.ts_retry[i]; } else { r->idx = -1; r->count = 0; } } /* count the successful attempt as well */ info->status.rates[ts.ts_final_idx].count++; if (unlikely(ts.ts_status)) { sc->ll_stats.dot11ACKFailureCount++; if (ts.ts_status & AR5K_TXERR_FILT) info->flags |= IEEE80211_TX_STAT_TX_FILTERED; } else { info->flags |= IEEE80211_TX_STAT_ACK; info->status.ack_signal = ts.ts_rssi; } ieee80211_tx_status(sc->hw, skb); sc->tx_stats[txq->qnum].count++; spin_lock(&sc->txbuflock); sc->tx_stats[txq->qnum].len--; list_move_tail(&bf->list, &sc->txbuf); sc->txbuf_len++; spin_unlock(&sc->txbuflock); } if (likely(list_empty(&txq->q))) txq->link = NULL; spin_unlock(&txq->lock); if (sc->txbuf_len > ATH_TXBUF / 5) ieee80211_wake_queues(sc->hw); } static void ath5k_tasklet_tx(unsigned long data) { int i; struct ath5k_softc *sc = (void *)data; for (i=0; i < AR5K_NUM_TX_QUEUES; i++) if (sc->txqs[i].setup && (sc->ah->ah_txq_isr & BIT(i))) ath5k_tx_processq(sc, &sc->txqs[i]); } /*****************\ * Beacon handling * \*****************/ /* * Setup the beacon frame for transmit. */ static int ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) { struct sk_buff *skb = bf->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ath5k_hw *ah = sc->ah; struct ath5k_desc *ds; int ret = 0; u8 antenna; u32 flags; bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " "skbaddr %llx\n", skb, skb->data, skb->len, (unsigned long long)bf->skbaddr); if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { ATH5K_ERR(sc, "beacon DMA mapping failed\n"); return -EIO; } ds = bf->desc; antenna = ah->ah_tx_ant; flags = AR5K_TXDESC_NOACK; if (sc->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) { ds->ds_link = bf->daddr; /* self-linked */ flags |= AR5K_TXDESC_VEOL; } else ds->ds_link = 0; /* * If we use multiple antennas on AP and use * the Sectored AP scenario, switch antenna every * 4 beacons to make sure everybody hears our AP. * When a client tries to associate, hw will keep * track of the tx antenna to be used for this client * automaticaly, based on ACKed packets. * * Note: AP still listens and transmits RTS on the * default antenna which is supposed to be an omni. * * Note2: On sectored scenarios it's possible to have * multiple antennas (1omni -the default- and 14 sectors) * so if we choose to actually support this mode we need * to allow user to set how many antennas we have and tweak * the code below to send beacons on all of them. */ if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP) antenna = sc->bsent & 4 ? 2 : 1; /* FIXME: If we are in g mode and rate is a CCK rate * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta * from tx power (value is in dB units already) */ ds->ds_data = bf->skbaddr; ret = ah->ah_setup_tx_desc(ah, ds, skb->len, ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), ieee80211_get_tx_rate(sc->hw, info)->hw_value, 1, AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); if (ret) goto err_unmap; return 0; err_unmap: pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); return ret; } /* * Transmit a beacon frame at SWBA. Dynamic updates to the * frame contents are done as needed and the slot time is * also adjusted based on current state. * * This is called from software irq context (beacontq or restq * tasklets) or user context from ath5k_beacon_config. */ static void ath5k_beacon_send(struct ath5k_softc *sc) { struct ath5k_buf *bf = sc->bbuf; struct ath5k_hw *ah = sc->ah; struct sk_buff *skb; ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "in beacon_send\n"); if (unlikely(bf->skb == NULL || sc->opmode == NL80211_IFTYPE_STATION || sc->opmode == NL80211_IFTYPE_MONITOR)) { ATH5K_WARN(sc, "bf=%p bf_skb=%p\n", bf, bf ? bf->skb : NULL); return; } /* * Check if the previous beacon has gone out. If * not don't don't try to post another, skip this * period and wait for the next. Missed beacons * indicate a problem and should not occur. If we * miss too many consecutive beacons reset the device. */ if (unlikely(ath5k_hw_num_tx_pending(ah, sc->bhalq) != 0)) { sc->bmisscount++; ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "missed %u consecutive beacons\n", sc->bmisscount); if (sc->bmisscount > 10) { /* NB: 10 is a guess */ ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "stuck beacon time (%u missed)\n", sc->bmisscount); tasklet_schedule(&sc->restq); } return; } if (unlikely(sc->bmisscount != 0)) { ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "resume beacon xmit after %u misses\n", sc->bmisscount); sc->bmisscount = 0; } /* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue. */ if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq); /* NB: hw still stops DMA, so proceed */ } /* refresh the beacon for AP mode */ if (sc->opmode == NL80211_IFTYPE_AP) ath5k_beacon_update(sc->hw, sc->vif); ath5k_hw_set_txdp(ah, sc->bhalq, bf->daddr); ath5k_hw_start_tx_dma(ah, sc->bhalq); ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n", sc->bhalq, (unsigned long long)bf->daddr, bf->desc); skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); while (skb) { ath5k_tx_queue(sc->hw, skb, sc->cabq); skb = ieee80211_get_buffered_bc(sc->hw, sc->vif); } sc->bsent++; } /** * ath5k_beacon_update_timers - update beacon timers * * @sc: struct ath5k_softc pointer we are operating on * @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a * beacon timer update based on the current HW TSF. * * Calculate the next target beacon transmit time (TBTT) based on the timestamp * of a received beacon or the current local hardware TSF and write it to the * beacon timer registers. * * This is called in a variety of situations, e.g. when a beacon is received, * when a TSF update has been detected, but also when an new IBSS is created or * when we otherwise know we have to update the timers, but we keep it in this * function to have it all together in one place. */ static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf) { struct ath5k_hw *ah = sc->ah; u32 nexttbtt, intval, hw_tu, bc_tu; u64 hw_tsf; intval = sc->bintval & AR5K_BEACON_PERIOD; if (WARN_ON(!intval)) return; /* beacon TSF converted to TU */ bc_tu = TSF_TO_TU(bc_tsf); /* current TSF converted to TU */ hw_tsf = ath5k_hw_get_tsf64(ah); hw_tu = TSF_TO_TU(hw_tsf); #define FUDGE 3 /* we use FUDGE to make sure the next TBTT is ahead of the current TU */ if (bc_tsf == -1) { /* * no beacons received, called internally. * just need to refresh timers based on HW TSF. */ nexttbtt = roundup(hw_tu + FUDGE, intval); } else if (bc_tsf == 0) { /* * no beacon received, probably called by ath5k_reset_tsf(). * reset TSF to start with 0. */ nexttbtt = intval; intval |= AR5K_BEACON_RESET_TSF; } else if (bc_tsf > hw_tsf) { /* * beacon received, SW merge happend but HW TSF not yet updated. * not possible to reconfigure timers yet, but next time we * receive a beacon with the same BSSID, the hardware will * automatically update the TSF and then we need to reconfigure * the timers. */ ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "need to wait for HW TSF sync\n"); return; } else { /* * most important case for beacon synchronization between STA. * * beacon received and HW TSF has been already updated by HW. * update next TBTT based on the TSF of the beacon, but make * sure it is ahead of our local TSF timer. */ nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval); } #undef FUDGE sc->nexttbtt = nexttbtt; intval |= AR5K_BEACON_ENA; ath5k_hw_init_beacon(ah, nexttbtt, intval); /* * debugging output last in order to preserve the time critical aspect * of this function */ if (bc_tsf == -1) ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "reconfigured timers based on HW TSF\n"); else if (bc_tsf == 0) ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "reset HW TSF and timers\n"); else ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "updated timers based on beacon TSF\n"); ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n", (unsigned long long) bc_tsf, (unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt); ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, "intval %u %s %s\n", intval & AR5K_BEACON_PERIOD, intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "", intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : ""); } /** * ath5k_beacon_config - Configure the beacon queues and interrupts * * @sc: struct ath5k_softc pointer we are operating on * * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA * interrupts to detect TSF updates only. */ static void ath5k_beacon_config(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; unsigned long flags; spin_lock_irqsave(&sc->block, flags); sc->bmisscount = 0; sc->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); if (sc->enable_beacon) { /* * In IBSS mode we use a self-linked tx descriptor and let the * hardware send the beacons automatically. We have to load it * only once here. * We use the SWBA interrupt only to keep track of the beacon * timers in order to detect automatic TSF updates. */ ath5k_beaconq_config(sc); sc->imask |= AR5K_INT_SWBA; if (sc->opmode == NL80211_IFTYPE_ADHOC) { if (ath5k_hw_hasveol(ah)) ath5k_beacon_send(sc); } else ath5k_beacon_update_timers(sc, -1); } else { ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq); } ath5k_hw_set_imr(ah, sc->imask); mmiowb(); spin_unlock_irqrestore(&sc->block, flags); } static void ath5k_tasklet_beacon(unsigned long data) { struct ath5k_softc *sc = (struct ath5k_softc *) data; /* * Software beacon alert--time to send a beacon. * * In IBSS mode we use this interrupt just to * keep track of the next TBTT (target beacon * transmission time) in order to detect wether * automatic TSF updates happened. */ if (sc->opmode == NL80211_IFTYPE_ADHOC) { /* XXX: only if VEOL suppported */ u64 tsf = ath5k_hw_get_tsf64(sc->ah); sc->nexttbtt += sc->bintval; ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "SWBA nexttbtt: %x hw_tu: %x " "TSF: %llx\n", sc->nexttbtt, TSF_TO_TU(tsf), (unsigned long long) tsf); } else { spin_lock(&sc->block); ath5k_beacon_send(sc); spin_unlock(&sc->block); } } /********************\ * Interrupt handling * \********************/ static int ath5k_init(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; int ret, i; mutex_lock(&sc->lock); ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode); /* * Stop anything previously setup. This is safe * no matter this is the first time through or not. */ ath5k_stop_locked(sc); /* Set PHY calibration interval */ ah->ah_cal_intval = ath5k_calinterval; /* * The basic interface to setting the hardware in a good * state is ``reset''. On return the hardware is known to * be powered up and with interrupts disabled. This must * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ sc->curchan = sc->hw->conf.channel; sc->curband = &sc->sbands[sc->curchan->band]; sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI; ret = ath5k_reset(sc, NULL); if (ret) goto done; ath5k_rfkill_hw_start(ah); /* * Reset the key cache since some parts do not reset the * contents on initial power up or resume from suspend. */ for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) ath5k_hw_reset_key(ah, i); /* Set ack to be sent at low bit-rates */ ath5k_hw_set_ack_bitrate_high(ah, false); ret = 0; done: mmiowb(); mutex_unlock(&sc->lock); return ret; } static int ath5k_stop_locked(struct ath5k_softc *sc) { struct ath5k_hw *ah = sc->ah; ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "invalid %u\n", test_bit(ATH_STAT_INVALID, sc->status)); /* * Shutdown the hardware and driver: * stop output from above * disable interrupts * turn off timers * turn off the radio * clear transmit machinery * clear receive machinery * drain and release tx queues * reclaim beacon resources * power down hardware * * Note that some of this work is not possible if the * hardware is gone (invalid). */ ieee80211_stop_queues(sc->hw); if (!test_bit(ATH_STAT_INVALID, sc->status)) { ath5k_led_off(sc); ath5k_hw_set_imr(ah, 0); synchronize_irq(sc->pdev->irq); } ath5k_txq_cleanup(sc); if (!test_bit(ATH_STAT_INVALID, sc->status)) { ath5k_rx_stop(sc); ath5k_hw_phy_disable(ah); } else sc->rxlink = NULL; return 0; } /* * Stop the device, grabbing the top-level lock to protect * against concurrent entry through ath5k_init (which can happen * if another thread does a system call and the thread doing the * stop is preempted). */ static int ath5k_stop_hw(struct ath5k_softc *sc) { int ret; mutex_lock(&sc->lock); ret = ath5k_stop_locked(sc); if (ret == 0 && !test_bit(ATH_STAT_INVALID, sc->status)) { /* * Don't set the card in full sleep mode! * * a) When the device is in this state it must be carefully * woken up or references to registers in the PCI clock * domain may freeze the bus (and system). This varies * by chip and is mostly an issue with newer parts * (madwifi sources mentioned srev >= 0x78) that go to * sleep more quickly. * * b) On older chips full sleep results a weird behaviour * during wakeup. I tested various cards with srev < 0x78 * and they don't wake up after module reload, a second * module reload is needed to bring the card up again. * * Until we figure out what's going on don't enable * full chip reset on any chip (this is what Legacy HAL * and Sam's HAL do anyway). Instead Perform a full reset * on the device (same as initial state after attach) and * leave it idle (keep MAC/BB on warm reset) */ ret = ath5k_hw_on_hold(sc->ah); ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "putting device to sleep\n"); } ath5k_txbuf_free(sc, sc->bbuf); mmiowb(); mutex_unlock(&sc->lock); tasklet_kill(&sc->rxtq); tasklet_kill(&sc->txtq); tasklet_kill(&sc->restq); tasklet_kill(&sc->calib); tasklet_kill(&sc->beacontq); ath5k_rfkill_hw_stop(sc->ah); return ret; } static irqreturn_t ath5k_intr(int irq, void *dev_id) { struct ath5k_softc *sc = dev_id; struct ath5k_hw *ah = sc->ah; enum ath5k_int status; unsigned int counter = 1000; if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) || !ath5k_hw_is_intr_pending(ah))) return IRQ_NONE; do { ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */ ATH5K_DBG(sc, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n", status, sc->imask); if (unlikely(status & AR5K_INT_FATAL)) { /* * Fatal errors are unrecoverable. * Typically these are caused by DMA errors. */ tasklet_schedule(&sc->restq); } else if (unlikely(status & AR5K_INT_RXORN)) { tasklet_schedule(&sc->restq); } else { if (status & AR5K_INT_SWBA) { tasklet_hi_schedule(&sc->beacontq); } if (status & AR5K_INT_RXEOL) { /* * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ sc->rxlink = NULL; } if (status & AR5K_INT_TXURN) { /* bump tx trigger level */ ath5k_hw_update_tx_triglevel(ah, true); } if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR)) tasklet_schedule(&sc->rxtq); if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC | AR5K_INT_TXERR | AR5K_INT_TXEOL)) tasklet_schedule(&sc->txtq); if (status & AR5K_INT_BMISS) { /* TODO */ } if (status & AR5K_INT_SWI) { tasklet_schedule(&sc->calib); } if (status & AR5K_INT_MIB) { /* * These stats are also used for ANI i think * so how about updating them more often ? */ ath5k_hw_update_mib_counters(ah, &sc->ll_stats); } if (status & AR5K_INT_GPIO) tasklet_schedule(&sc->rf_kill.toggleq); } } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); if (unlikely(!counter)) ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); ath5k_hw_calibration_poll(ah); return IRQ_HANDLED; } static void ath5k_tasklet_reset(unsigned long data) { struct ath5k_softc *sc = (void *)data; ath5k_reset_wake(sc); } /* * Periodically recalibrate the PHY to account * for temperature/environment changes. */ static void ath5k_tasklet_calibrate(unsigned long data) { struct ath5k_softc *sc = (void *)data; struct ath5k_hw *ah = sc->ah; /* Only full calibration for now */ if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION) return; /* Stop queues so that calibration * doesn't interfere with tx */ ieee80211_stop_queues(sc->hw); ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", ieee80211_frequency_to_channel(sc->curchan->center_freq), sc->curchan->hw_value); if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) { /* * Rfgain is out of bounds, reset the chip * to load new gain values. */ ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); ath5k_reset_wake(sc); } if (ath5k_hw_phy_calibrate(ah, sc->curchan)) ATH5K_ERR(sc, "calibration of channel %u failed\n", ieee80211_frequency_to_channel( sc->curchan->center_freq)); ah->ah_swi_mask = 0; /* Wake queues */ ieee80211_wake_queues(sc->hw); } /********************\ * Mac80211 functions * \********************/ static int ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb) { struct ath5k_softc *sc = hw->priv; return ath5k_tx_queue(hw, skb, sc->txq); } static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, struct ath5k_txq *txq) { struct ath5k_softc *sc = hw->priv; struct ath5k_buf *bf; unsigned long flags; int hdrlen; int padsize; ath5k_debug_dump_skb(sc, skb, "TX ", 1); if (sc->opmode == NL80211_IFTYPE_MONITOR) ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, "tx in monitor (scan?)\n"); /* * the hardware expects the header padded to 4 byte boundaries * if this is not the case we add the padding after the header */ hdrlen = ieee80211_get_hdrlen_from_skb(skb); padsize = ath5k_pad_size(hdrlen); if (padsize) { if (skb_headroom(skb) < padsize) { ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough" " headroom to pad %d\n", hdrlen, padsize); goto drop_packet; } skb_push(skb, padsize); memmove(skb->data, skb->data+padsize, hdrlen); } spin_lock_irqsave(&sc->txbuflock, flags); if (list_empty(&sc->txbuf)) { ATH5K_ERR(sc, "no further txbuf available, dropping packet\n"); spin_unlock_irqrestore(&sc->txbuflock, flags); ieee80211_stop_queue(hw, skb_get_queue_mapping(skb)); goto drop_packet; } bf = list_first_entry(&sc->txbuf, struct ath5k_buf, list); list_del(&bf->list); sc->txbuf_len--; if (list_empty(&sc->txbuf)) ieee80211_stop_queues(hw); spin_unlock_irqrestore(&sc->txbuflock, flags); bf->skb = skb; if (ath5k_txbuf_setup(sc, bf, txq)) { bf->skb = NULL; spin_lock_irqsave(&sc->txbuflock, flags); list_add_tail(&bf->list, &sc->txbuf); sc->txbuf_len++; spin_unlock_irqrestore(&sc->txbuflock, flags); goto drop_packet; } return NETDEV_TX_OK; drop_packet: dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* * Reset the hardware. If chan is not NULL, then also pause rx/tx * and change to the given channel. */ static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) { struct ath5k_hw *ah = sc->ah; int ret; ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); if (chan) { ath5k_hw_set_imr(ah, 0); ath5k_txq_cleanup(sc); ath5k_rx_stop(sc); sc->curchan = chan; sc->curband = &sc->sbands[chan->band]; } ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL); if (ret) { ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); goto err; } ret = ath5k_rx_start(sc); if (ret) { ATH5K_ERR(sc, "can't start recv logic\n"); goto err; } /* * Change channels and update the h/w rate map if we're switching; * e.g. 11a to 11b/g. * * We may be doing a reset in response to an ioctl that changes the * channel so update any state that might change as a result. * * XXX needed? */ /* ath5k_chan_change(sc, c); */ ath5k_beacon_config(sc); /* intrs are enabled by ath5k_beacon_config */ return 0; err: return ret; } static int ath5k_reset_wake(struct ath5k_softc *sc) { int ret; ret = ath5k_reset(sc, sc->curchan); if (!ret) ieee80211_wake_queues(sc->hw); return ret; } static int ath5k_start(struct ieee80211_hw *hw) { return ath5k_init(hw->priv); } static void ath5k_stop(struct ieee80211_hw *hw) { ath5k_stop_hw(hw->priv); } static int ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) { struct ath5k_softc *sc = hw->priv; int ret; mutex_lock(&sc->lock); if (sc->vif) { ret = 0; goto end; } sc->vif = conf->vif; switch (conf->type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_MONITOR: sc->opmode = conf->type; break; default: ret = -EOPNOTSUPP; goto end; } ath5k_hw_set_lladdr(sc->ah, conf->mac_addr); ath5k_mode_setup(sc); ret = 0; end: mutex_unlock(&sc->lock); return ret; } static void ath5k_remove_interface(struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf) { struct ath5k_softc *sc = hw->priv; u8 mac[ETH_ALEN] = {}; mutex_lock(&sc->lock); if (sc->vif != conf->vif) goto end; ath5k_hw_set_lladdr(sc->ah, mac); sc->vif = NULL; end: mutex_unlock(&sc->lock); } /* * TODO: Phy disable/diversity etc */ static int ath5k_config(struct ieee80211_hw *hw, u32 changed) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; struct ieee80211_conf *conf = &hw->conf; int ret = 0; mutex_lock(&sc->lock); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ret = ath5k_chan_set(sc, conf->channel); if (ret < 0) goto unlock; } if ((changed & IEEE80211_CONF_CHANGE_POWER) && (sc->power_level != conf->power_level)) { sc->power_level = conf->power_level; /* Half dB steps */ ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2)); } /* TODO: * 1) Move this on config_interface and handle each case * separately eg. when we have only one STA vif, use * AR5K_ANTMODE_SINGLE_AP * * 2) Allow the user to change antenna mode eg. when only * one antenna is present * * 3) Allow the user to set default/tx antenna when possible * * 4) Default mode should handle 90% of the cases, together * with fixed a/b and single AP modes we should be able to * handle 99%. Sectored modes are extreme cases and i still * haven't found a usage for them. If we decide to support them, * then we must allow the user to set how many tx antennas we * have available */ ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); unlock: mutex_unlock(&sc->lock); return ret; } static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, int mc_count, struct dev_addr_list *mclist) { u32 mfilt[2], val; int i; u8 pos; mfilt[0] = 0; mfilt[1] = 1; for (i = 0; i < mc_count; i++) { if (!mclist) break; /* calculate XOR of eight 6-bit values */ val = get_unaligned_le32(mclist->dmi_addr + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; val = get_unaligned_le32(mclist->dmi_addr + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); /* XXX: we might be able to just do this instead, * but not sure, needs testing, if we do use this we'd * neet to inform below to not reset the mcast */ /* ath5k_hw_set_mcast_filterindex(ah, * mclist->dmi_addr[5]); */ mclist = mclist->next; } return ((u64)(mfilt[1]) << 32) | mfilt[0]; } #define SUPPORTED_FIF_FLAGS \ FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \ FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \ FIF_BCN_PRBRESP_PROMISC /* * o always accept unicast, broadcast, and multicast traffic * o multicast traffic for all BSSIDs will be enabled if mac80211 * says it should be * o maintain current state of phy ofdm or phy cck error reception. * If the hardware detects any of these type of errors then * ath5k_hw_get_rx_filter() will pass to us the respective * hardware filters to be able to receive these type of frames. * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode according to the interface state * o accept beacons: * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when scanning */ static void ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; u32 mfilt[2], rfilt; mutex_lock(&sc->lock); mfilt[0] = multicast; mfilt[1] = multicast >> 32; /* Only deal with supported flags */ changed_flags &= SUPPORTED_FIF_FLAGS; *new_flags &= SUPPORTED_FIF_FLAGS; /* If HW detects any phy or radar errors, leave those filters on. * Also, always enable Unicast, Broadcasts and Multicast * XXX: move unicast, bssid broadcasts and multicast to mac80211 */ rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) | (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST | AR5K_RX_FILTER_MCAST); if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { if (*new_flags & FIF_PROMISC_IN_BSS) { rfilt |= AR5K_RX_FILTER_PROM; __set_bit(ATH_STAT_PROMISC, sc->status); } else { __clear_bit(ATH_STAT_PROMISC, sc->status); } } /* Note, AR5K_RX_FILTER_MCAST is already enabled */ if (*new_flags & FIF_ALLMULTI) { mfilt[0] = ~0; mfilt[1] = ~0; } /* This is the best we can do */ if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)) rfilt |= AR5K_RX_FILTER_PHYERR; /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons * and probes for any BSSID, this needs testing */ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) rfilt |= AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ; /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not * set we should only pass on control frames for this * station. This needs testing. I believe right now this * enables *all* control frames, which is OK.. but * but we should see if we can improve on granularity */ if (*new_flags & FIF_CONTROL) rfilt |= AR5K_RX_FILTER_CONTROL; /* Additional settings per mode -- this is per ath5k */ /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */ switch (sc->opmode) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_MONITOR: rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON | AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: rfilt |= AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_BEACON; break; case NL80211_IFTYPE_STATION: if (sc->assoc) rfilt |= AR5K_RX_FILTER_BEACON; default: break; } /* Set filters */ ath5k_hw_set_rx_filter(ah, rfilt); /* Set multicast bits */ ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]); /* Set the cached hw filter flags, this will alter actually * be set in HW */ sc->filter_flags = rfilt; mutex_unlock(&sc->lock); } static int ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct ath5k_softc *sc = hw->priv; int ret = 0; if (modparam_nohwcrypt) return -EOPNOTSUPP; if (sc->opmode == NL80211_IFTYPE_AP) return -EOPNOTSUPP; switch (key->alg) { case ALG_WEP: case ALG_TKIP: break; case ALG_CCMP: if (sc->ah->ah_aes_support) break; return -EOPNOTSUPP; default: WARN_ON(1); return -EINVAL; } mutex_lock(&sc->lock); switch (cmd) { case SET_KEY: ret = ath5k_hw_set_key(sc->ah, key->keyidx, key, sta ? sta->addr : NULL); if (ret) { ATH5K_ERR(sc, "can't set the key\n"); goto unlock; } __set_bit(key->keyidx, sc->keymap); key->hw_key_idx = key->keyidx; key->flags |= (IEEE80211_KEY_FLAG_GENERATE_IV | IEEE80211_KEY_FLAG_GENERATE_MMIC); break; case DISABLE_KEY: ath5k_hw_reset_key(sc->ah, key->keyidx); __clear_bit(key->keyidx, sc->keymap); break; default: ret = -EINVAL; goto unlock; } unlock: mmiowb(); mutex_unlock(&sc->lock); return ret; } static int ath5k_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; /* Force update */ ath5k_hw_update_mib_counters(ah, &sc->ll_stats); memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); return 0; } static int ath5k_get_tx_stats(struct ieee80211_hw *hw, struct ieee80211_tx_queue_stats *stats) { struct ath5k_softc *sc = hw->priv; memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats)); return 0; } static u64 ath5k_get_tsf(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; return ath5k_hw_get_tsf64(sc->ah); } static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf) { struct ath5k_softc *sc = hw->priv; ath5k_hw_set_tsf64(sc->ah, tsf); } static void ath5k_reset_tsf(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; /* * in IBSS mode we need to update the beacon timers too. * this will also reset the TSF if we call it with 0 */ if (sc->opmode == NL80211_IFTYPE_ADHOC) ath5k_beacon_update_timers(sc, 0); else ath5k_hw_reset_tsf(sc->ah); } /* * Updates the beacon that is sent by ath5k_beacon_send. For adhoc, * this is called only once at config_bss time, for AP we do it every * SWBA interrupt so that the TIM will reflect buffered frames. * * Called with the beacon lock. */ static int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { int ret; struct ath5k_softc *sc = hw->priv; struct sk_buff *skb; if (WARN_ON(!vif)) { ret = -EINVAL; goto out; } skb = ieee80211_beacon_get(hw, vif); if (!skb) { ret = -ENOMEM; goto out; } ath5k_debug_dump_skb(sc, skb, "BC ", 1); ath5k_txbuf_free(sc, sc->bbuf); sc->bbuf->skb = skb; ret = ath5k_beacon_setup(sc, sc->bbuf); if (ret) sc->bbuf->skb = NULL; out: return ret; } static void set_beacon_filter(struct ieee80211_hw *hw, bool enable) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; u32 rfilt; rfilt = ath5k_hw_get_rx_filter(ah); if (enable) rfilt |= AR5K_RX_FILTER_BEACON; else rfilt &= ~AR5K_RX_FILTER_BEACON; ath5k_hw_set_rx_filter(ah, rfilt); sc->filter_flags = rfilt; } static void ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u32 changes) { struct ath5k_softc *sc = hw->priv; struct ath5k_hw *ah = sc->ah; unsigned long flags; mutex_lock(&sc->lock); if (WARN_ON(sc->vif != vif)) goto unlock; if (changes & BSS_CHANGED_BSSID) { /* Cache for later use during resets */ memcpy(ah->ah_bssid, bss_conf->bssid, ETH_ALEN); /* XXX: assoc id is set to 0 for now, mac80211 doesn't have * a clean way of letting us retrieve this yet. */ ath5k_hw_set_associd(ah, ah->ah_bssid, 0); mmiowb(); } if (changes & BSS_CHANGED_BEACON_INT) sc->bintval = bss_conf->beacon_int; if (changes & BSS_CHANGED_ASSOC) { sc->assoc = bss_conf->assoc; if (sc->opmode == NL80211_IFTYPE_STATION) set_beacon_filter(hw, sc->assoc); ath5k_hw_set_ledstate(sc->ah, sc->assoc ? AR5K_LED_ASSOC : AR5K_LED_INIT); } if (changes & BSS_CHANGED_BEACON) { spin_lock_irqsave(&sc->block, flags); ath5k_beacon_update(hw, vif); spin_unlock_irqrestore(&sc->block, flags); } if (changes & BSS_CHANGED_BEACON_ENABLED) sc->enable_beacon = bss_conf->enable_beacon; if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) ath5k_beacon_config(sc); unlock: mutex_unlock(&sc->lock); } static void ath5k_sw_scan_start(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; if (!sc->assoc) ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN); } static void ath5k_sw_scan_complete(struct ieee80211_hw *hw) { struct ath5k_softc *sc = hw->priv; ath5k_hw_set_ledstate(sc->ah, sc->assoc ? AR5K_LED_ASSOC : AR5K_LED_INIT); }
martyj/LGP999_V10c_Kernel
drivers/net/wireless/ath/ath5k/base.c
C
gpl-2.0
87,490
/* * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework * for Non-CPU Devices. * * Copyright (C) 2011 Samsung Electronics * MyungJoo Ham <myungjoo.ham@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/pm_opp.h> #include <linux/devfreq.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/list.h> #include <linux/printk.h> #include <linux/hrtimer.h> #include "governor.h" static struct class *devfreq_class; /* * devfreq core provides delayed work based load monitoring helper * functions. Governors can use these or can implement their own * monitoring mechanism. */ static struct workqueue_struct *devfreq_wq; /* The list of all device-devfreq governors */ static LIST_HEAD(devfreq_governor_list); /* The list of all device-devfreq */ static LIST_HEAD(devfreq_list); static DEFINE_MUTEX(devfreq_list_lock); /** * find_device_devfreq() - find devfreq struct using device pointer * @dev: device pointer used to lookup device devfreq. * * Search the list of device devfreqs and return the matched device's * devfreq info. devfreq_list_lock should be held by the caller. */ static struct devfreq *find_device_devfreq(struct device *dev) { struct devfreq *tmp_devfreq; if (unlikely(IS_ERR_OR_NULL(dev))) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } WARN(!mutex_is_locked(&devfreq_list_lock), "devfreq_list_lock must be locked."); list_for_each_entry(tmp_devfreq, &devfreq_list, node) { if (tmp_devfreq->dev.parent == dev) return tmp_devfreq; } return ERR_PTR(-ENODEV); } /** * devfreq_get_freq_level() - Lookup freq_table for the frequency * @devfreq: the devfreq instance * @freq: the target frequency */ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq) { int lev; for (lev = 0; lev < devfreq->profile->max_state; lev++) if (freq == devfreq->profile->freq_table[lev]) return lev; return -EINVAL; } /** * devfreq_update_status() - Update statistics of devfreq behavior * @devfreq: the devfreq instance * @freq: the update target frequency */ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq) { int lev, prev_lev; unsigned long cur_time; lev = devfreq_get_freq_level(devfreq, freq); if (lev < 0) return lev; cur_time = jiffies; devfreq->time_in_state[lev] += cur_time - devfreq->last_stat_updated; if (freq != devfreq->previous_freq) { prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq); devfreq->trans_table[(prev_lev * devfreq->profile->max_state) + lev]++; devfreq->total_trans++; } devfreq->last_stat_updated = cur_time; return 0; } /** * find_devfreq_governor() - find devfreq governor from name * @name: name of the governor * * Search the list of devfreq governors and return the matched * governor's pointer. devfreq_list_lock should be held by the caller. */ static struct devfreq_governor *find_devfreq_governor(const char *name) { struct devfreq_governor *tmp_governor; if (unlikely(IS_ERR_OR_NULL(name))) { pr_err("DEVFREQ: %s: Invalid parameters\n", __func__); return ERR_PTR(-EINVAL); } WARN(!mutex_is_locked(&devfreq_list_lock), "devfreq_list_lock must be locked."); list_for_each_entry(tmp_governor, &devfreq_governor_list, node) { if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN)) return tmp_governor; } return ERR_PTR(-ENODEV); } /* Load monitoring helper functions for governors use */ /** * update_devfreq() - Reevaluate the device and configure frequency. * @devfreq: the devfreq instance. * * Note: Lock devfreq->lock before calling update_devfreq * This function is exported for governors. */ int update_devfreq(struct devfreq *devfreq) { unsigned long freq; int err = 0; u32 flags = 0; if (!mutex_is_locked(&devfreq->lock)) { WARN(true, "devfreq->lock must be locked by the caller.\n"); return -EINVAL; } if (!devfreq->governor) return -EINVAL; /* Reevaluate the proper frequency */ err = devfreq->governor->get_target_freq(devfreq, &freq); if (err) return err; /* * Adjust the freuqency with user freq and QoS. * * List from the highest proiority * max_freq (probably called by thermal when it's too hot) * min_freq */ if (devfreq->min_freq && freq < devfreq->min_freq) { freq = devfreq->min_freq; flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */ } if (devfreq->max_freq && freq > devfreq->max_freq) { freq = devfreq->max_freq; flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */ } err = devfreq->profile->target(devfreq->dev.parent, &freq, flags); if (err) return err; if (devfreq->profile->freq_table) if (devfreq_update_status(devfreq, freq)) dev_err(&devfreq->dev, "Couldn't update frequency transition information.\n"); devfreq->previous_freq = freq; return err; } EXPORT_SYMBOL(update_devfreq); /** * devfreq_monitor() - Periodically poll devfreq objects. * @work: the work struct used to run devfreq_monitor periodically. * */ static void devfreq_monitor(struct work_struct *work) { int err; struct devfreq *devfreq = container_of(work, struct devfreq, work.work); mutex_lock(&devfreq->lock); err = update_devfreq(devfreq); if (err) dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err); queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); mutex_unlock(&devfreq->lock); } /** * devfreq_monitor_start() - Start load monitoring of devfreq instance * @devfreq: the devfreq instance. * * Helper function for starting devfreq device load monitoing. By * default delayed work based monitoring is supported. Function * to be called from governor in response to DEVFREQ_GOV_START * event when device is added to devfreq framework. */ void devfreq_monitor_start(struct devfreq *devfreq) { INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor); if (devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); } EXPORT_SYMBOL(devfreq_monitor_start); /** * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance * @devfreq: the devfreq instance. * * Helper function to stop devfreq device load monitoing. Function * to be called from governor in response to DEVFREQ_GOV_STOP * event when device is removed from devfreq framework. */ void devfreq_monitor_stop(struct devfreq *devfreq) { cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_stop); /** * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance * @devfreq: the devfreq instance. * * Helper function to suspend devfreq device load monitoing. Function * to be called from governor in response to DEVFREQ_GOV_SUSPEND * event or when polling interval is set to zero. * * Note: Though this function is same as devfreq_monitor_stop(), * intentionally kept separate to provide hooks for collecting * transition statistics. */ void devfreq_monitor_suspend(struct devfreq *devfreq) { mutex_lock(&devfreq->lock); if (devfreq->stop_polling) { mutex_unlock(&devfreq->lock); return; } devfreq_update_status(devfreq, devfreq->previous_freq); devfreq->stop_polling = true; mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); } EXPORT_SYMBOL(devfreq_monitor_suspend); /** * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance * @devfreq: the devfreq instance. * * Helper function to resume devfreq device load monitoing. Function * to be called from governor in response to DEVFREQ_GOV_RESUME * event or when polling interval is set to non-zero. */ void devfreq_monitor_resume(struct devfreq *devfreq) { unsigned long freq; mutex_lock(&devfreq->lock); if (!devfreq->stop_polling) goto out; if (!delayed_work_pending(&devfreq->work) && devfreq->profile->polling_ms) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); devfreq->last_stat_updated = jiffies; devfreq->stop_polling = false; if (devfreq->profile->get_cur_freq && !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) devfreq->previous_freq = freq; out: mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_monitor_resume); /** * devfreq_interval_update() - Update device devfreq monitoring interval * @devfreq: the devfreq instance. * @delay: new polling interval to be set. * * Helper function to set new load monitoring polling interval. Function * to be called from governor in response to DEVFREQ_GOV_INTERVAL event. */ void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay) { unsigned int cur_delay = devfreq->profile->polling_ms; unsigned int new_delay = *delay; mutex_lock(&devfreq->lock); devfreq->profile->polling_ms = new_delay; if (devfreq->stop_polling) goto out; /* if new delay is zero, stop polling */ if (!new_delay) { mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); return; } /* if current delay is zero, start polling with new delay */ if (!cur_delay) { queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); goto out; } /* if current delay is greater than new delay, restart polling */ if (cur_delay > new_delay) { mutex_unlock(&devfreq->lock); cancel_delayed_work_sync(&devfreq->work); mutex_lock(&devfreq->lock); if (!devfreq->stop_polling) queue_delayed_work(devfreq_wq, &devfreq->work, msecs_to_jiffies(devfreq->profile->polling_ms)); } out: mutex_unlock(&devfreq->lock); } EXPORT_SYMBOL(devfreq_interval_update); /** * devfreq_notifier_call() - Notify that the device frequency requirements * has been changed out of devfreq framework. * @nb: the notifier_block (supposed to be devfreq->nb) * @type: not used * @devp: not used * * Called by a notifier that uses devfreq->nb. */ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type, void *devp) { struct devfreq *devfreq = container_of(nb, struct devfreq, nb); int ret; mutex_lock(&devfreq->lock); ret = update_devfreq(devfreq); mutex_unlock(&devfreq->lock); return ret; } /** * _remove_devfreq() - Remove devfreq from the list and release its resources. * @devfreq: the devfreq struct * @skip: skip calling device_unregister(). */ static void _remove_devfreq(struct devfreq *devfreq, bool skip) { mutex_lock(&devfreq_list_lock); if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) { mutex_unlock(&devfreq_list_lock); dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n"); return; } list_del(&devfreq->node); mutex_unlock(&devfreq_list_lock); if (devfreq->governor) devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); if (devfreq->profile->exit) devfreq->profile->exit(devfreq->dev.parent); if (!skip && get_device(&devfreq->dev)) { device_unregister(&devfreq->dev); put_device(&devfreq->dev); } mutex_destroy(&devfreq->lock); kfree(devfreq); } /** * devfreq_dev_release() - Callback for struct device to release the device. * @dev: the devfreq device * * This calls _remove_devfreq() if _remove_devfreq() is not called. * Note that devfreq_dev_release() could be called by _remove_devfreq() as * well as by others unregistering the device. */ static void devfreq_dev_release(struct device *dev) { struct devfreq *devfreq = to_devfreq(dev); _remove_devfreq(devfreq, true); } /** * devfreq_add_device() - Add devfreq feature to the device * @dev: the device to add devfreq feature. * @profile: device-specific profile to run devfreq. * @governor_name: name of the policy to choose frequency. * @data: private data for the governor. The devfreq framework does not * touch this value. */ struct devfreq *devfreq_add_device(struct device *dev, struct devfreq_dev_profile *profile, const char *governor_name, void *data) { struct devfreq *devfreq; struct devfreq_governor *governor; int err = 0; if (!dev || !profile || !governor_name) { dev_err(dev, "%s: Invalid parameters.\n", __func__); return ERR_PTR(-EINVAL); } mutex_lock(&devfreq_list_lock); devfreq = find_device_devfreq(dev); mutex_unlock(&devfreq_list_lock); if (!IS_ERR(devfreq)) { dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); err = -EINVAL; goto err_out; } devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); if (!devfreq) { dev_err(dev, "%s: Unable to create devfreq for the device\n", __func__); err = -ENOMEM; goto err_out; } mutex_init(&devfreq->lock); mutex_lock(&devfreq->lock); devfreq->dev.parent = dev; devfreq->dev.class = devfreq_class; devfreq->dev.release = devfreq_dev_release; devfreq->profile = profile; strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN); devfreq->previous_freq = profile->initial_freq; devfreq->data = data; devfreq->nb.notifier_call = devfreq_notifier_call; devfreq->trans_table = devm_kzalloc(dev, sizeof(unsigned int) * devfreq->profile->max_state * devfreq->profile->max_state, GFP_KERNEL); devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) * devfreq->profile->max_state, GFP_KERNEL); devfreq->last_stat_updated = jiffies; dev_set_name(&devfreq->dev, "%s", dev_name(dev)); err = device_register(&devfreq->dev); if (err) { put_device(&devfreq->dev); mutex_unlock(&devfreq->lock); goto err_dev; } mutex_unlock(&devfreq->lock); mutex_lock(&devfreq_list_lock); list_add(&devfreq->node, &devfreq_list); governor = find_devfreq_governor(devfreq->governor_name); if (!IS_ERR(governor)) devfreq->governor = governor; if (devfreq->governor) err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); mutex_unlock(&devfreq_list_lock); if (err) { dev_err(dev, "%s: Unable to start governor for the device\n", __func__); goto err_init; } return devfreq; err_init: list_del(&devfreq->node); device_unregister(&devfreq->dev); err_dev: kfree(devfreq); err_out: return ERR_PTR(err); } EXPORT_SYMBOL(devfreq_add_device); /** * devfreq_remove_device() - Remove devfreq feature from a device. * @devfreq: the devfreq instance to be removed * * The opposite of devfreq_add_device(). */ int devfreq_remove_device(struct devfreq *devfreq) { if (!devfreq) return -EINVAL; _remove_devfreq(devfreq, false); return 0; } EXPORT_SYMBOL(devfreq_remove_device); /** * devfreq_suspend_device() - Suspend devfreq of a device. * @devfreq: the devfreq instance to be suspended * * This function is intended to be called by the pm callbacks * (e.g., runtime_suspend, suspend) of the device driver that * holds the devfreq. */ int devfreq_suspend_device(struct devfreq *devfreq) { if (!devfreq) return -EINVAL; if (!devfreq->governor) return 0; return devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_SUSPEND, NULL); } EXPORT_SYMBOL(devfreq_suspend_device); /** * devfreq_resume_device() - Resume devfreq of a device. * @devfreq: the devfreq instance to be resumed * * This function is intended to be called by the pm callbacks * (e.g., runtime_resume, resume) of the device driver that * holds the devfreq. */ int devfreq_resume_device(struct devfreq *devfreq) { if (!devfreq) return -EINVAL; if (!devfreq->governor) return 0; return devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_RESUME, NULL); } EXPORT_SYMBOL(devfreq_resume_device); /** * devfreq_add_governor() - Add devfreq governor * @governor: the devfreq governor to be added */ int devfreq_add_governor(struct devfreq_governor *governor) { struct devfreq_governor *g; struct devfreq *devfreq; int err = 0; if (!governor) { pr_err("%s: Invalid parameters.\n", __func__); return -EINVAL; } mutex_lock(&devfreq_list_lock); g = find_devfreq_governor(governor->name); if (!IS_ERR(g)) { pr_err("%s: governor %s already registered\n", __func__, g->name); err = -EINVAL; goto err_out; } list_add(&governor->node, &devfreq_governor_list); list_for_each_entry(devfreq, &devfreq_list, node) { int ret = 0; struct device *dev = devfreq->dev.parent; if (!strncmp(devfreq->governor_name, governor->name, DEVFREQ_NAME_LEN)) { /* The following should never occur */ if (devfreq->governor) { dev_warn(dev, "%s: Governor %s already present\n", __func__, devfreq->governor->name); ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s stop = %d\n", __func__, devfreq->governor->name, ret); } /* Fall through */ } devfreq->governor = governor; ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START, NULL); if (ret) { dev_warn(dev, "%s: Governor %s start=%d\n", __func__, devfreq->governor->name, ret); } } } err_out: mutex_unlock(&devfreq_list_lock); return err; } EXPORT_SYMBOL(devfreq_add_governor); /** * devfreq_remove_device() - Remove devfreq feature from a device. * @governor: the devfreq governor to be removed */ int devfreq_remove_governor(struct devfreq_governor *governor) { struct devfreq_governor *g; struct devfreq *devfreq; int err = 0; if (!governor) { pr_err("%s: Invalid parameters.\n", __func__); return -EINVAL; } mutex_lock(&devfreq_list_lock); g = find_devfreq_governor(governor->name); if (IS_ERR(g)) { pr_err("%s: governor %s not registered\n", __func__, governor->name); err = PTR_ERR(g); goto err_out; } list_for_each_entry(devfreq, &devfreq_list, node) { int ret; struct device *dev = devfreq->dev.parent; if (!strncmp(devfreq->governor_name, governor->name, DEVFREQ_NAME_LEN)) { /* we should have a devfreq governor! */ if (!devfreq->governor) { dev_warn(dev, "%s: Governor %s NOT present\n", __func__, governor->name); continue; /* Fall through */ } ret = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s stop=%d\n", __func__, devfreq->governor->name, ret); } devfreq->governor = NULL; } } list_del(&governor->node); err_out: mutex_unlock(&devfreq_list_lock); return err; } EXPORT_SYMBOL(devfreq_remove_governor); static ssize_t governor_show(struct device *dev, struct device_attribute *attr, char *buf) { if (!to_devfreq(dev)->governor) return -EINVAL; return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name); } static ssize_t governor_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); int ret; char str_governor[DEVFREQ_NAME_LEN + 1]; struct devfreq_governor *governor; ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor); if (ret != 1) return -EINVAL; mutex_lock(&devfreq_list_lock); governor = find_devfreq_governor(str_governor); if (IS_ERR(governor)) { ret = PTR_ERR(governor); goto out; } if (df->governor == governor) goto out; if (df->governor) { ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL); if (ret) { dev_warn(dev, "%s: Governor %s not stopped(%d)\n", __func__, df->governor->name, ret); goto out; } } df->governor = governor; strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN); ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL); if (ret) dev_warn(dev, "%s: Governor %s not started(%d)\n", __func__, df->governor->name, ret); out: mutex_unlock(&devfreq_list_lock); if (!ret) ret = count; return ret; } static DEVICE_ATTR_RW(governor); static ssize_t available_governors_show(struct device *d, struct device_attribute *attr, char *buf) { struct devfreq_governor *tmp_governor; ssize_t count = 0; mutex_lock(&devfreq_list_lock); list_for_each_entry(tmp_governor, &devfreq_governor_list, node) count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), "%s ", tmp_governor->name); mutex_unlock(&devfreq_list_lock); /* Truncate the trailing space */ if (count) count--; count += sprintf(&buf[count], "\n"); return count; } static DEVICE_ATTR_RO(available_governors); static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long freq; struct devfreq *devfreq = to_devfreq(dev); if (devfreq->profile->get_cur_freq && !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq)) return sprintf(buf, "%lu\n", freq); return sprintf(buf, "%lu\n", devfreq->previous_freq); } static DEVICE_ATTR_RO(cur_freq); static ssize_t target_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq); } static DEVICE_ATTR_RO(target_freq); static ssize_t polling_interval_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms); } static ssize_t polling_interval_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); unsigned int value; int ret; if (!df->governor) return -EINVAL; ret = sscanf(buf, "%u", &value); if (ret != 1) return -EINVAL; df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value); ret = count; return ret; } static DEVICE_ATTR_RW(polling_interval); static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); unsigned long value; int ret; unsigned long max; ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; mutex_lock(&df->lock); max = df->max_freq; if (value && max && value > max) { ret = -EINVAL; goto unlock; } df->min_freq = value; update_devfreq(df); ret = count; unlock: mutex_unlock(&df->lock); return ret; } static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq); } static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct devfreq *df = to_devfreq(dev); unsigned long value; int ret; unsigned long min; ret = sscanf(buf, "%lu", &value); if (ret != 1) return -EINVAL; mutex_lock(&df->lock); min = df->min_freq; if (value && min && value < min) { ret = -EINVAL; goto unlock; } df->max_freq = value; update_devfreq(df); ret = count; unlock: mutex_unlock(&df->lock); return ret; } static DEVICE_ATTR_RW(min_freq); static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq); } static DEVICE_ATTR_RW(max_freq); static ssize_t available_frequencies_show(struct device *d, struct device_attribute *attr, char *buf) { struct devfreq *df = to_devfreq(d); struct device *dev = df->dev.parent; struct dev_pm_opp *opp; ssize_t count = 0; unsigned long freq = 0; rcu_read_lock(); do { opp = dev_pm_opp_find_freq_ceil(dev, &freq); if (IS_ERR(opp)) break; count += scnprintf(&buf[count], (PAGE_SIZE - count - 2), "%lu ", freq); freq++; } while (1); rcu_read_unlock(); /* Truncate the trailing space */ if (count) count--; count += sprintf(&buf[count], "\n"); return count; } static DEVICE_ATTR_RO(available_frequencies); static ssize_t trans_stat_show(struct device *dev, struct device_attribute *attr, char *buf) { struct devfreq *devfreq = to_devfreq(dev); ssize_t len; int i, j; unsigned int max_state = devfreq->profile->max_state; if (!devfreq->stop_polling && devfreq_update_status(devfreq, devfreq->previous_freq)) return 0; len = sprintf(buf, " From : To\n"); len += sprintf(buf + len, " :"); for (i = 0; i < max_state; i++) len += sprintf(buf + len, "%8u", devfreq->profile->freq_table[i]); len += sprintf(buf + len, " time(ms)\n"); for (i = 0; i < max_state; i++) { if (devfreq->profile->freq_table[i] == devfreq->previous_freq) { len += sprintf(buf + len, "*"); } else { len += sprintf(buf + len, " "); } len += sprintf(buf + len, "%8u:", devfreq->profile->freq_table[i]); for (j = 0; j < max_state; j++) len += sprintf(buf + len, "%8u", devfreq->trans_table[(i * max_state) + j]); len += sprintf(buf + len, "%10u\n", jiffies_to_msecs(devfreq->time_in_state[i])); } len += sprintf(buf + len, "Total transition : %u\n", devfreq->total_trans); return len; } static DEVICE_ATTR_RO(trans_stat); static struct attribute *devfreq_attrs[] = { &dev_attr_governor.attr, &dev_attr_available_governors.attr, &dev_attr_cur_freq.attr, &dev_attr_available_frequencies.attr, &dev_attr_target_freq.attr, &dev_attr_polling_interval.attr, &dev_attr_min_freq.attr, &dev_attr_max_freq.attr, &dev_attr_trans_stat.attr, NULL, }; ATTRIBUTE_GROUPS(devfreq); static int __init devfreq_init(void) { devfreq_class = class_create(THIS_MODULE, "devfreq"); if (IS_ERR(devfreq_class)) { pr_err("%s: couldn't create class\n", __FILE__); return PTR_ERR(devfreq_class); } devfreq_wq = create_freezable_workqueue("devfreq_wq"); if (!devfreq_wq) { class_destroy(devfreq_class); pr_err("%s: couldn't create workqueue\n", __FILE__); return -ENOMEM; } devfreq_class->dev_groups = devfreq_groups; return 0; } subsys_initcall(devfreq_init); static void __exit devfreq_exit(void) { class_destroy(devfreq_class); destroy_workqueue(devfreq_wq); } module_exit(devfreq_exit); /* * The followings are helper functions for devfreq user device drivers with * OPP framework. */ /** * devfreq_recommended_opp() - Helper function to get proper OPP for the * freq value given to target callback. * @dev: The devfreq user device. (parent of devfreq) * @freq: The frequency given to target function * @flags: Flags handed from devfreq framework. * * Locking: This function must be called under rcu_read_lock(). opp is a rcu * protected pointer. The reason for the same is that the opp pointer which is * returned will remain valid for use with opp_get_{voltage, freq} only while * under the locked area. The pointer returned must be used prior to unlocking * with rcu_read_unlock() to maintain the integrity of the pointer. */ struct dev_pm_opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq, u32 flags) { struct dev_pm_opp *opp; if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) { /* The freq is an upper bound. opp should be lower */ opp = dev_pm_opp_find_freq_floor(dev, freq); /* If not available, use the closest opp */ if (opp == ERR_PTR(-ERANGE)) opp = dev_pm_opp_find_freq_ceil(dev, freq); } else { /* The freq is an lower bound. opp should be higher */ opp = dev_pm_opp_find_freq_ceil(dev, freq); /* If not available, use the closest opp */ if (opp == ERR_PTR(-ERANGE)) opp = dev_pm_opp_find_freq_floor(dev, freq); } return opp; } /** * devfreq_register_opp_notifier() - Helper function to get devfreq notified * for any changes in the OPP availability * changes * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. */ int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq) { struct srcu_notifier_head *nh; int ret = 0; rcu_read_lock(); nh = dev_pm_opp_get_notifier(dev); if (IS_ERR(nh)) ret = PTR_ERR(nh); rcu_read_unlock(); if (!ret) ret = srcu_notifier_chain_register(nh, &devfreq->nb); return ret; } /** * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq * notified for any changes in the OPP * availability changes anymore. * @dev: The devfreq user device. (parent of devfreq) * @devfreq: The devfreq object. * * At exit() callback of devfreq_dev_profile, this must be included if * devfreq_recommended_opp is used. */ int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq) { struct srcu_notifier_head *nh; int ret = 0; rcu_read_lock(); nh = dev_pm_opp_get_notifier(dev); if (IS_ERR(nh)) ret = PTR_ERR(nh); rcu_read_unlock(); if (!ret) ret = srcu_notifier_chain_unregister(nh, &devfreq->nb); return ret; } MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_DESCRIPTION("devfreq class support"); MODULE_LICENSE("GPL");
rlnelson-git/linux-nvme
drivers/devfreq/devfreq.c
C
gpl-2.0
29,004
<?php namespace Drupal\Core\Database\Driver\mysql\Install; use Drupal\Core\Database\Install\Tasks as InstallTasks; use Drupal\Core\Database\Database; use Drupal\Core\Database\Driver\mysql\Connection; use Drupal\Core\Database\DatabaseNotFoundException; /** * Specifies installation tasks for MySQL and equivalent databases. */ class Tasks extends InstallTasks { /** * Minimum required MySQLnd version. */ const MYSQLND_MINIMUM_VERSION = '5.0.9'; /** * Minimum required libmysqlclient version. */ const LIBMYSQLCLIENT_MINIMUM_VERSION = '5.5.3'; /** * The PDO driver name for MySQL and equivalent databases. * * @var string */ protected $pdoDriver = 'mysql'; /** * Constructs a \Drupal\Core\Database\Driver\mysql\Install\Tasks object. */ public function __construct() { $this->tasks[] = array( 'arguments' => array(), 'function' => 'ensureInnoDbAvailable', ); } /** * {@inheritdoc} */ public function name() { return t('MySQL, MariaDB, Percona Server, or equivalent'); } /** * {@inheritdoc} */ public function minimumVersion() { return '5.5.3'; } /** * {@inheritdoc} */ protected function connect() { try { // This doesn't actually test the connection. db_set_active(); // Now actually do a check. try { Database::getConnection(); } catch (\Exception $e) { // Detect utf8mb4 incompability. if ($e->getCode() == Connection::UNSUPPORTED_CHARSET || ($e->getCode() == Connection::SQLSTATE_SYNTAX_ERROR && $e->errorInfo[1] == Connection::UNKNOWN_CHARSET)) { $this->fail(t('Your MySQL server and PHP MySQL driver must support utf8mb4 character encoding. Make sure to use a database system that supports this (such as MySQL/MariaDB/Percona 5.5.3 and up), and that the utf8mb4 character set is compiled in. See the <a href=":documentation" target="_blank">MySQL documentation</a> for more information.', array(':documentation' => 'https://dev.mysql.com/doc/refman/5.0/en/cannot-initialize-character-set.html'))); $info = Database::getConnectionInfo(); $info_copy = $info; // Set a flag to fall back to utf8. Note: this flag should only be // used here and is for internal use only. $info_copy['default']['_dsn_utf8_fallback'] = TRUE; // In order to change the Database::$databaseInfo array, we need to // remove the active connection, then re-add it with the new info. Database::removeConnection('default'); Database::addConnectionInfo('default', 'default', $info_copy['default']); // Connect with the new database info, using the utf8 character set so // that we can run the checkEngineVersion test. Database::getConnection(); // Revert to the old settings. Database::removeConnection('default'); Database::addConnectionInfo('default', 'default', $info['default']); } else { // Rethrow the exception. throw $e; } } $this->pass('Drupal can CONNECT to the database ok.'); } catch (\Exception $e) { // Attempt to create the database if it is not found. if ($e->getCode() == Connection::DATABASE_NOT_FOUND) { // Remove the database string from connection info. $connection_info = Database::getConnectionInfo(); $database = $connection_info['default']['database']; unset($connection_info['default']['database']); // In order to change the Database::$databaseInfo array, need to remove // the active connection, then re-add it with the new info. Database::removeConnection('default'); Database::addConnectionInfo('default', 'default', $connection_info['default']); try { // Now, attempt the connection again; if it's successful, attempt to // create the database. Database::getConnection()->createDatabase($database); } catch (DatabaseNotFoundException $e) { // Still no dice; probably a permission issue. Raise the error to the // installer. $this->fail(t('Database %database not found. The server reports the following message when attempting to create the database: %error.', array('%database' => $database, '%error' => $e->getMessage()))); } } else { // Database connection failed for some other reason than the database // not existing. $this->fail(t('Failed to connect to your database server. The server reports the following message: %error.<ul><li>Is the database server running?</li><li>Does the database exist or does the database user have sufficient privileges to create the database?</li><li>Have you entered the correct database name?</li><li>Have you entered the correct username and password?</li><li>Have you entered the correct database hostname?</li></ul>', array('%error' => $e->getMessage()))); return FALSE; } } return TRUE; } /** * {@inheritdoc} */ public function getFormOptions(array $database) { $form = parent::getFormOptions($database); if (empty($form['advanced_options']['port']['#default_value'])) { $form['advanced_options']['port']['#default_value'] = '3306'; } return $form; } /** * Ensure that InnoDB is available. */ function ensureInnoDbAvailable() { $engines = Database::getConnection()->query('SHOW ENGINES')->fetchAllKeyed(); if (isset($engines['MyISAM']) && $engines['MyISAM'] == 'DEFAULT' && !isset($engines['InnoDB'])) { $this->fail(t('The MyISAM storage engine is not supported.')); } } /** * {@inheritdoc} */ protected function checkEngineVersion() { parent::checkEngineVersion(); // Ensure that the MySQL driver supports utf8mb4 encoding. $version = Database::getConnection()->clientVersion(); if (FALSE !== strpos($version, 'mysqlnd')) { // The mysqlnd driver supports utf8mb4 starting at version 5.0.9. $version = preg_replace('/^\D+([\d.]+).*/', '$1', $version); if (version_compare($version, self::MYSQLND_MINIMUM_VERSION, '<')) { $this->fail(t("The MySQLnd driver version %version is less than the minimum required version. Upgrade to MySQLnd version %mysqlnd_minimum_version or up, or alternatively switch mysql drivers to libmysqlclient version %libmysqlclient_minimum_version or up.", array('%version' => $version, '%mysqlnd_minimum_version' => self::MYSQLND_MINIMUM_VERSION, '%libmysqlclient_minimum_version' => self::LIBMYSQLCLIENT_MINIMUM_VERSION))); } } else { // The libmysqlclient driver supports utf8mb4 starting at version 5.5.3. if (version_compare($version, self::LIBMYSQLCLIENT_MINIMUM_VERSION, '<')) { $this->fail(t("The libmysqlclient driver version %version is less than the minimum required version. Upgrade to libmysqlclient version %libmysqlclient_minimum_version or up, or alternatively switch mysql drivers to MySQLnd version %mysqlnd_minimum_version or up.", array('%version' => $version, '%libmysqlclient_minimum_version' => self::LIBMYSQLCLIENT_MINIMUM_VERSION, '%mysqlnd_minimum_version' => self::MYSQLND_MINIMUM_VERSION))); } } } }
scaule/acmecinema
core/lib/Drupal/Core/Database/Driver/mysql/Install/Tasks.php
PHP
gpl-2.0
7,303
/* * Copyright (C) 2010 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/init.h> #include <linux/irqchip.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/clk/bcm2835.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> static void __init bcm2835_init(void) { int ret; bcm2835_init_clocks(); ret = of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); if (ret) { pr_err("of_platform_populate failed: %d\n", ret); BUG(); } } static const char * const bcm2835_compat[] = { "brcm,bcm2835", NULL }; DT_MACHINE_START(BCM2835, "BCM2835") .init_machine = bcm2835_init, .dt_compat = bcm2835_compat MACHINE_END
danielschwierzeck/linux
arch/arm/mach-bcm/board_bcm2835.c
C
gpl-2.0
1,161
/* * PQ2 ADS-style PCI interrupt controller * * Copyright 2007 Freescale Semiconductor, Inc. * Author: Scott Wood <scottwood@freescale.com> * * Loosely based on mpc82xx ADS support by Vitaly Bordug <vbordug@ru.mvista.com> * Copyright (c) 2006 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/irq.h> #include <linux/types.h> #include <linux/bootmem.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/cpm2.h> #include "pq2.h" static DEFINE_RAW_SPINLOCK(pci_pic_lock); struct pq2ads_pci_pic { struct device_node *node; struct irq_host *host; struct { u32 stat; u32 mask; } __iomem *regs; }; #define NUM_IRQS 32 static void pq2ads_pci_mask_irq(unsigned int virq) { struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); int irq = NUM_IRQS - virq_to_hw(virq) - 1; if (irq != -1) { unsigned long flags; raw_spin_lock_irqsave(&pci_pic_lock, flags); setbits32(&priv->regs->mask, 1 << irq); mb(); raw_spin_unlock_irqrestore(&pci_pic_lock, flags); } } static void pq2ads_pci_unmask_irq(unsigned int virq) { struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); int irq = NUM_IRQS - virq_to_hw(virq) - 1; if (irq != -1) { unsigned long flags; raw_spin_lock_irqsave(&pci_pic_lock, flags); clrbits32(&priv->regs->mask, 1 << irq); raw_spin_unlock_irqrestore(&pci_pic_lock, flags); } } static struct irq_chip pq2ads_pci_ic = { .name = "PQ2 ADS PCI", .end = pq2ads_pci_unmask_irq, .mask = pq2ads_pci_mask_irq, .mask_ack = pq2ads_pci_mask_irq, .ack = pq2ads_pci_mask_irq, .unmask = pq2ads_pci_unmask_irq, .enable = pq2ads_pci_unmask_irq, .disable = pq2ads_pci_mask_irq }; static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) { struct pq2ads_pci_pic *priv = desc->handler_data; u32 stat, mask, pend; int bit; for (;;) { stat = in_be32(&priv->regs->stat); mask = in_be32(&priv->regs->mask); pend = stat & ~mask; if (!pend) break; for (bit = 0; pend != 0; ++bit, pend <<= 1) { if (pend & 0x80000000) { int virq = irq_linear_revmap(priv->host, bit); generic_handle_irq(virq); } } } } static int pci_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { irq_to_desc(virq)->status |= IRQ_LEVEL; set_irq_chip_data(virq, h->host_data); set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); return 0; } static void pci_host_unmap(struct irq_host *h, unsigned int virq) { /* remove chip and handler */ set_irq_chip_data(virq, NULL); set_irq_chip(virq, NULL); } static struct irq_host_ops pci_pic_host_ops = { .map = pci_pic_host_map, .unmap = pci_host_unmap, }; int __init pq2ads_pci_init_irq(void) { struct pq2ads_pci_pic *priv; struct irq_host *host; struct device_node *np; int ret = -ENODEV; int irq; np = of_find_compatible_node(NULL, NULL, "fsl,pq2ads-pci-pic"); if (!np) { printk(KERN_ERR "No pci pic node in device tree.\n"); of_node_put(np); goto out; } irq = irq_of_parse_and_map(np, 0); if (irq == NO_IRQ) { printk(KERN_ERR "No interrupt in pci pic node.\n"); of_node_put(np); goto out; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { of_node_put(np); ret = -ENOMEM; goto out_unmap_irq; } /* PCI interrupt controller registers: status and mask */ priv->regs = of_iomap(np, 0); if (!priv->regs) { printk(KERN_ERR "Cannot map PCI PIC registers.\n"); goto out_free_bootmem; } /* mask all PCI interrupts */ out_be32(&priv->regs->mask, ~0); mb(); host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, &pci_pic_host_ops, NUM_IRQS); if (!host) { ret = -ENOMEM; goto out_unmap_regs; } host->host_data = priv; priv->host = host; host->host_data = priv; set_irq_data(irq, priv); set_irq_chained_handler(irq, pq2ads_pci_irq_demux); of_node_put(np); return 0; out_unmap_regs: iounmap(priv->regs); out_free_bootmem: free_bootmem((unsigned long)priv, sizeof(struct pq2ads_pci_pic)); of_node_put(np); out_unmap_irq: irq_dispose_mapping(irq); out: return ret; }
ics2/kernelics
arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
C
gpl-2.0
4,300
/* * Platform dependent support for SGI SN * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2000-2008 Silicon Graphics, Inc. All Rights Reserved. */ #include <linux/irq.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/rculist.h> #include <linux/slab.h> #include <asm/sn/addrs.h> #include <asm/sn/arch.h> #include <asm/sn/intr.h> #include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> #include <asm/sn/shub_mmr.h> #include <asm/sn/sn_sal.h> #include <asm/sn/sn_feature_sets.h> static void force_interrupt(int irq); static void register_intr_pda(struct sn_irq_info *sn_irq_info); static void unregister_intr_pda(struct sn_irq_info *sn_irq_info); int sn_force_interrupt_flag = 1; extern int sn_ioif_inited; struct list_head **sn_irq_lh; static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */ u64 sn_intr_alloc(nasid_t local_nasid, int local_widget, struct sn_irq_info *sn_irq_info, int req_irq, nasid_t req_nasid, int req_slice) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, (u64) SAL_INTR_ALLOC, (u64) local_nasid, (u64) local_widget, __pa(sn_irq_info), (u64) req_irq, (u64) req_nasid, (u64) req_slice); return ret_stuff.status; } void sn_intr_free(nasid_t local_nasid, int local_widget, struct sn_irq_info *sn_irq_info) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, (u64) SAL_INTR_FREE, (u64) local_nasid, (u64) local_widget, (u64) sn_irq_info->irq_irq, (u64) sn_irq_info->irq_cookie, 0, 0); } u64 sn_intr_redirect(nasid_t local_nasid, int local_widget, struct sn_irq_info *sn_irq_info, nasid_t req_nasid, int req_slice) { struct ia64_sal_retval ret_stuff; ret_stuff.status = 0; ret_stuff.v0 = 0; SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT, (u64) SAL_INTR_REDIRECT, (u64) local_nasid, (u64) local_widget, __pa(sn_irq_info), (u64) req_nasid, (u64) req_slice, 0); return ret_stuff.status; } static unsigned int sn_startup_irq(unsigned int irq) { return 0; } static void sn_shutdown_irq(unsigned int irq) { } extern void ia64_mca_register_cpev(int); static void sn_disable_irq(unsigned int irq) { if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) ia64_mca_register_cpev(0); } static void sn_enable_irq(unsigned int irq) { if (irq == local_vector_to_irq(IA64_CPE_VECTOR)) ia64_mca_register_cpev(irq); } static void sn_ack_irq(unsigned int irq) { u64 event_occurred, mask; irq = irq & 0xff; event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)); mask = event_occurred & SH_ALL_INT_MASK; HUB_S((u64*)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), mask); __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); move_native_irq(irq); } static void sn_end_irq(unsigned int irq) { int ivec; u64 event_occurred; ivec = irq & 0xff; if (ivec == SGI_UART_VECTOR) { event_occurred = HUB_L((u64*)LOCAL_MMR_ADDR (SH_EVENT_OCCURRED)); /* If the UART bit is set here, we may have received an * interrupt from the UART that the driver missed. To * make sure, we IPI ourselves to force us to look again. */ if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0); } } __clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs); if (sn_force_interrupt_flag) force_interrupt(irq); } static void sn_irq_info_free(struct rcu_head *head); struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info, nasid_t nasid, int slice) { int vector; int cpuid; #ifdef CONFIG_SMP int cpuphys; #endif int64_t bridge; int local_widget, status; nasid_t local_nasid; struct sn_irq_info *new_irq_info; struct sn_pcibus_provider *pci_provider; bridge = (u64) sn_irq_info->irq_bridge; if (!bridge) { return NULL; /* irq is not a device interrupt */ } local_nasid = NASID_GET(bridge); if (local_nasid & 1) local_widget = TIO_SWIN_WIDGETNUM(bridge); else local_widget = SWIN_WIDGETNUM(bridge); vector = sn_irq_info->irq_irq; /* Make use of SAL_INTR_REDIRECT if PROM supports it */ status = sn_intr_redirect(local_nasid, local_widget, sn_irq_info, nasid, slice); if (!status) { new_irq_info = sn_irq_info; goto finish_up; } /* * PROM does not support SAL_INTR_REDIRECT, or it failed. * Revert to old method. */ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC); if (new_irq_info == NULL) return NULL; memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info)); /* Free the old PROM new_irq_info structure */ sn_intr_free(local_nasid, local_widget, new_irq_info); unregister_intr_pda(new_irq_info); /* allocate a new PROM new_irq_info struct */ status = sn_intr_alloc(local_nasid, local_widget, new_irq_info, vector, nasid, slice); /* SAL call failed */ if (status) { kfree(new_irq_info); return NULL; } register_intr_pda(new_irq_info); spin_lock(&sn_irq_info_lock); list_replace_rcu(&sn_irq_info->list, &new_irq_info->list); spin_unlock(&sn_irq_info_lock); call_rcu(&sn_irq_info->rcu, sn_irq_info_free); finish_up: /* Update kernels new_irq_info with new target info */ cpuid = nasid_slice_to_cpuid(new_irq_info->irq_nasid, new_irq_info->irq_slice); new_irq_info->irq_cpuid = cpuid; pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type]; /* * If this represents a line interrupt, target it. If it's * an msi (irq_int_bit < 0), it's already targeted. */ if (new_irq_info->irq_int_bit >= 0 && pci_provider && pci_provider->target_interrupt) (pci_provider->target_interrupt)(new_irq_info); #ifdef CONFIG_SMP cpuphys = cpu_physical_id(cpuid); set_irq_affinity_info((vector & 0xff), cpuphys, 0); #endif return new_irq_info; } static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask) { struct sn_irq_info *sn_irq_info, *sn_irq_info_safe; nasid_t nasid; int slice; nasid = cpuid_to_nasid(cpumask_first(mask)); slice = cpuid_to_slice(cpumask_first(mask)); list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe, sn_irq_lh[irq], list) (void)sn_retarget_vector(sn_irq_info, nasid, slice); return 0; } #ifdef CONFIG_SMP void sn_set_err_irq_affinity(unsigned int irq) { /* * On systems which support CPU disabling (SHub2), all error interrupts * are targetted at the boot CPU. */ if (is_shub2() && sn_prom_feature_available(PRF_CPU_DISABLE_SUPPORT)) set_irq_affinity_info(irq, cpu_physical_id(0), 0); } #else void sn_set_err_irq_affinity(unsigned int irq) { } #endif static void sn_mask_irq(unsigned int irq) { } static void sn_unmask_irq(unsigned int irq) { } struct irq_chip irq_type_sn = { .name = "SN hub", .startup = sn_startup_irq, .shutdown = sn_shutdown_irq, .enable = sn_enable_irq, .disable = sn_disable_irq, .ack = sn_ack_irq, .end = sn_end_irq, .mask = sn_mask_irq, .unmask = sn_unmask_irq, .set_affinity = sn_set_affinity_irq }; ia64_vector sn_irq_to_vector(int irq) { if (irq >= IA64_NUM_VECTORS) return 0; return (ia64_vector)irq; } unsigned int sn_local_vector_to_irq(u8 vector) { return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector)); } void sn_irq_init(void) { int i; struct irq_desc *base_desc = irq_desc; ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR; ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR; for (i = 0; i < NR_IRQS; i++) { if (base_desc[i].chip == &no_irq_chip) { base_desc[i].chip = &irq_type_sn; } } } static void register_intr_pda(struct sn_irq_info *sn_irq_info) { int irq = sn_irq_info->irq_irq; int cpu = sn_irq_info->irq_cpuid; if (pdacpu(cpu)->sn_last_irq < irq) { pdacpu(cpu)->sn_last_irq = irq; } if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq; } static void unregister_intr_pda(struct sn_irq_info *sn_irq_info) { int irq = sn_irq_info->irq_irq; int cpu = sn_irq_info->irq_cpuid; struct sn_irq_info *tmp_irq_info; int i, foundmatch; rcu_read_lock(); if (pdacpu(cpu)->sn_last_irq == irq) { foundmatch = 0; for (i = pdacpu(cpu)->sn_last_irq - 1; i && !foundmatch; i--) { list_for_each_entry_rcu(tmp_irq_info, sn_irq_lh[i], list) { if (tmp_irq_info->irq_cpuid == cpu) { foundmatch = 1; break; } } } pdacpu(cpu)->sn_last_irq = i; } if (pdacpu(cpu)->sn_first_irq == irq) { foundmatch = 0; for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS && !foundmatch; i++) { list_for_each_entry_rcu(tmp_irq_info, sn_irq_lh[i], list) { if (tmp_irq_info->irq_cpuid == cpu) { foundmatch = 1; break; } } } pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i); } rcu_read_unlock(); } static void sn_irq_info_free(struct rcu_head *head) { struct sn_irq_info *sn_irq_info; sn_irq_info = container_of(head, struct sn_irq_info, rcu); kfree(sn_irq_info); } void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info) { nasid_t nasid = sn_irq_info->irq_nasid; int slice = sn_irq_info->irq_slice; int cpu = nasid_slice_to_cpuid(nasid, slice); #ifdef CONFIG_SMP int cpuphys; struct irq_desc *desc; #endif pci_dev_get(pci_dev); sn_irq_info->irq_cpuid = cpu; sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev); /* link it into the sn_irq[irq] list */ spin_lock(&sn_irq_info_lock); list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]); reserve_irq_vector(sn_irq_info->irq_irq); spin_unlock(&sn_irq_info_lock); register_intr_pda(sn_irq_info); #ifdef CONFIG_SMP cpuphys = cpu_physical_id(cpu); set_irq_affinity_info(sn_irq_info->irq_irq, cpuphys, 0); desc = irq_to_desc(sn_irq_info->irq_irq); /* * Affinity was set by the PROM, prevent it from * being reset by the request_irq() path. */ desc->status |= IRQ_AFFINITY_SET; #endif } void sn_irq_unfixup(struct pci_dev *pci_dev) { struct sn_irq_info *sn_irq_info; /* Only cleanup IRQ stuff if this device has a host bus context */ if (!SN_PCIDEV_BUSSOFT(pci_dev)) return; sn_irq_info = SN_PCIDEV_INFO(pci_dev)->pdi_sn_irq_info; if (!sn_irq_info) return; if (!sn_irq_info->irq_irq) { kfree(sn_irq_info); return; } unregister_intr_pda(sn_irq_info); spin_lock(&sn_irq_info_lock); list_del_rcu(&sn_irq_info->list); spin_unlock(&sn_irq_info_lock); if (list_empty(sn_irq_lh[sn_irq_info->irq_irq])) free_irq_vector(sn_irq_info->irq_irq); call_rcu(&sn_irq_info->rcu, sn_irq_info_free); pci_dev_put(pci_dev); } static inline void sn_call_force_intr_provider(struct sn_irq_info *sn_irq_info) { struct sn_pcibus_provider *pci_provider; pci_provider = sn_pci_provider[sn_irq_info->irq_bridge_type]; /* Don't force an interrupt if the irq has been disabled */ if (!(irq_desc[sn_irq_info->irq_irq].status & IRQ_DISABLED) && pci_provider && pci_provider->force_interrupt) (*pci_provider->force_interrupt)(sn_irq_info); } static void force_interrupt(int irq) { struct sn_irq_info *sn_irq_info; if (!sn_ioif_inited) return; rcu_read_lock(); list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[irq], list) sn_call_force_intr_provider(sn_irq_info); rcu_read_unlock(); } /* * Check for lost interrupts. If the PIC int_status reg. says that * an interrupt has been sent, but not handled, and the interrupt * is not pending in either the cpu irr regs or in the soft irr regs, * and the interrupt is not in service, then the interrupt may have * been lost. Force an interrupt on that pin. It is possible that * the interrupt is in flight, so we may generate a spurious interrupt, * but we should never miss a real lost interrupt. */ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) { u64 regval; struct pcidev_info *pcidev_info; struct pcibus_info *pcibus_info; /* * Bridge types attached to TIO (anything but PIC) do not need this WAR * since they do not target Shub II interrupt registers. If that * ever changes, this check needs to accomodate. */ if (sn_irq_info->irq_bridge_type != PCIIO_ASIC_TYPE_PIC) return; pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo; if (!pcidev_info) return; pcibus_info = (struct pcibus_info *)pcidev_info->pdi_host_pcidev_info-> pdi_pcibus_info; regval = pcireg_intr_status_get(pcibus_info); if (!ia64_get_irr(irq_to_vector(irq))) { if (!test_bit(irq, pda->sn_in_service_ivecs)) { regval &= 0xff; if (sn_irq_info->irq_int_bit & regval & sn_irq_info->irq_last_intr) { regval &= ~(sn_irq_info->irq_int_bit & regval); sn_call_force_intr_provider(sn_irq_info); } } } sn_irq_info->irq_last_intr = regval; } void sn_lb_int_war_check(void) { struct sn_irq_info *sn_irq_info; int i; if (!sn_ioif_inited || pda->sn_first_irq == 0) return; rcu_read_lock(); for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) { list_for_each_entry_rcu(sn_irq_info, sn_irq_lh[i], list) { sn_check_intr(i, sn_irq_info); } } rcu_read_unlock(); } void __init sn_irq_lh_init(void) { int i; sn_irq_lh = kmalloc(sizeof(struct list_head *) * NR_IRQS, GFP_KERNEL); if (!sn_irq_lh) panic("SN PCI INIT: Failed to allocate memory for PCI init\n"); for (i = 0; i < NR_IRQS; i++) { sn_irq_lh[i] = kmalloc(sizeof(struct list_head), GFP_KERNEL); if (!sn_irq_lh[i]) panic("SN PCI INIT: Failed IRQ memory allocation\n"); INIT_LIST_HEAD(sn_irq_lh[i]); } }
ProjectOpenCannibal/GingerKernel-VM701-2.6.35
arch/ia64/sn/kernel/irq.c
C
gpl-2.0
13,753
/* Include margin and padding in the width calculation of input and textarea. */ input, textarea { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } input[type="text"], input[type="password"], input[type="checkbox"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="email"], input[type="month"], input[type="number"], input[type="password"], input[type="search"], input[type="radio"], input[type="tel"], input[type="text"], input[type="time"], input[type="url"], input[type="week"], select, textarea { border: 1px solid #ddd; -webkit-box-shadow: inset 0 1px 2px rgba( 0, 0, 0, 0.07 ); box-shadow: inset 0 1px 2px rgba( 0, 0, 0, 0.07 ); background-color: #fff; color: #333; outline: none; -webkit-transition: 0.05s border-color ease-in-out; transition: 0.05s border-color ease-in-out; } input[type="text"]:focus, input[type="password"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="email"]:focus, input[type="month"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="time"]:focus, input[type="url"]:focus, input[type="week"]:focus, input[type="checkbox"]:focus, input[type="radio"]:focus, select:focus, textarea:focus { border-color: #5b9dd9; -webkit-box-shadow: 0 0 2px rgba( 30, 140, 190, 0.8 ); box-shadow: 0 0 2px rgba( 30, 140, 190, 0.8 ); } /* @noflip */ input[type="email"], input[type="url"] { direction: ltr; } input[type="checkbox"], input[type="radio"] { border: 1px solid #bbb; background: #fff; color: #555; clear: none; cursor: pointer; display: inline-block; line-height: 0; height: 16px; margin: -4px 4px 0 0; outline: 0; padding: 0 !important; text-align: center; vertical-align: middle; width: 16px; min-width: 16px; -webkit-appearance: none; -webkit-box-shadow: inset 0 1px 2px rgba( 0, 0, 0, 0.1 ); box-shadow: inset 0 1px 2px rgba( 0, 0, 0, 0.1 ); -webkit-transition: .05s border-color ease-in-out; transition: .05s border-color ease-in-out; } input[type="radio"]:checked + label:before { color: #888; } .wp-core-ui input[type="reset"]:hover, .wp-core-ui input[type="reset"]:active { color: #2ea2cc; } td > input[type="checkbox"], .wp-admin p input[type="checkbox"], .wp-admin p input[type="radio"] { margin-top: 0; } .wp-admin p label input[type="checkbox"] { margin-top: -4px; } .wp-admin p label input[type="radio"] { margin-top: -2px; } input[type="radio"] { -webkit-border-radius: 50%; border-radius: 50%; margin-right: 4px; line-height: 10px; } input[type="checkbox"]:checked:before, input[type="radio"]:checked:before { float: left; display: inline-block; vertical-align: middle; width: 16px; font: normal 21px/1 'dashicons'; speak: none; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } input[type="checkbox"]:checked:before { content: '\f147'; margin: -3px 0 0 -4px; color: #1e8cbe; } input[type="radio"]:checked:before { content: '\2022'; text-indent: -9999px; -webkit-border-radius: 50px; border-radius: 50px; font-size: 24px; width: 6px; height: 6px; margin: 4px; line-height: 16px; background-color: #1e8cbe; } @-moz-document url-prefix() { input[type="checkbox"], input[type="radio"], .form-table input.tog { margin-bottom: -1px; } } /* Search */ input[type="search"] { -webkit-appearance: textfield; } input[type="search"]::-webkit-search-decoration { display: none; } .ie8 input[type="password"] { font-family: sans-serif; } textarea, input, select, button { font-family: inherit; font-size: inherit; font-weight: inherit; } textarea, input, select { font-size: 14px; padding: 3px 5px; -webkit-border-radius: 0; border-radius: 0; /* Reset mobile webkit's default element styling */ } textarea { overflow: auto; padding: 2px 6px; line-height: 1.4; } .wp-admin input[type="file"] { padding: 3px 0; } label { cursor: pointer; } input, select { margin: 1px; padding: 3px 5px; } input.code { padding-top: 6px; } textarea.code { line-height: 1.4; padding: 4px 6px 1px 6px; } input.readonly, input[readonly], textarea.readonly, textarea[readonly] { background-color: #eee; } :-moz-placeholder, .wp-core-ui :-moz-placeholder { color: #a9a9a9; } .form-invalid { background-color: #ffebe8 !important; } .form-invalid input, .form-invalid select { border-color: #c00 !important; } .form-input-tip { color: #666; } input:disabled, input.disabled, select:disabled, select.disabled, textarea:disabled, textarea.disabled { background: rgba( 255, 255, 255, 0.5 ); border-color: rgba( 222, 222, 222, 0.75 ); -webkit-box-shadow: inset 0 1px 2px rgba( 0, 0, 0, 0.04 ); box-shadow: inset 0 1px 2px rgba( 0, 0, 0, 0.04 ); color: rgba( 51, 51, 51, 0.5 ); } input[type="file"]:disabled, input[type="file"].disabled, input[type="range"]:disabled, input[type="range"].disabled { background: none; -webkit-box-shadow: none; box-shadow: none; } input[type="checkbox"]:disabled, input[type="checkbox"].disabled, input[type="radio"]:disabled, input[type="radio"].disabled, input[type="checkbox"]:disabled:checked:before, input[type="checkbox"].disabled:checked:before, input[type="radio"]:disabled:checked:before, input[type="radio"].disabled:checked:before { opacity: 0.7; } /*------------------------------------------------------------------------------ 2.0 - Forms ------------------------------------------------------------------------------*/ .wp-admin select { padding: 2px; line-height: 28px; height: 28px; vertical-align: middle; } .wp-admin .button-cancel { padding: 0 5px; line-height: 2; } .meta-box-sortables select { max-width: 100%; } .wp-admin select[multiple] { height: auto; } .submit { padding: 1.5em 0; margin: 5px 0; -webkit-border-bottom-left-radius: 3px; border-bottom-left-radius: 3px; -webkit-border-bottom-right-radius: 3px; border-bottom-right-radius: 3px; border: none; } form p.submit a.cancel:hover { text-decoration: none; } p.submit { text-align: left; max-width: 100%; margin-top: 20px; padding-top: 10px; } .textright p.submit { border: none; text-align: right; } table.form-table + p.submit, table.form-table + input + p.submit, table.form-table + input + input + p.submit { border-top: none; padding-top: 0; } #minor-publishing-actions input, #major-publishing-actions input, #minor-publishing-actions .preview { text-align: center; } textarea.all-options, input.all-options { width: 250px; } input.large-text, textarea.large-text { width: 99%; } input.regular-text, #adduser .form-field input { width: 25em; } input.small-text { width: 50px; padding: 1px 6px; } input[type="number"].small-text { width: 65px; } #doaction, #doaction2, #post-query-submit { margin: 1px 8px 0 0; } .tablenav #changeit, .tablenav #delete_all, .tablenav #clear-recent-list, .wp-filter #delete_all { margin-top: 1px; } .tablenav .actions select { float: left; margin-right: 6px; max-width: 200px; } .ie8 .tablenav .actions select { width: 155px; } .ie8 .tablenav .actions select#cat { width: 200px; } #timezone_string option { margin-left: 1em; } #upload-form label { color: #777; } label, #your-profile label + a { vertical-align: middle; } fieldset label, #your-profile label + a { vertical-align: middle; } .options-media-php label[for*="_size_"], #misc-publishing-actions label { vertical-align: baseline; } #misc-publishing-actions label[for="post_status"]:before { content: '\f173'; display: inline-block; font: normal 20px/1 'dashicons'; speak: none; left: -1px; padding: 0 5px 0 0; position: relative; top: 0; text-decoration: none !important; vertical-align: top; -webkit-font-smoothing: antialiased; -moz-osx-font-smoothing: grayscale; } #pass-strength-result { background-color: #eee; border: 1px solid #ddd; float: left; margin: 13px 5px 5px 1px; padding: 3px 5px; text-align: center; width: 200px; display: none; } #pass-strength-result.short { background-color: #ffa0a0; border-color: #f04040; } #pass-strength-result.bad { background-color: #ffb78c; border-color: #ff853c; } #pass-strength-result.good { background-color: #ffec8b; border-color: #fc0; } #pass-strength-result.strong { background-color: #c3ff88; border-color: #8dff1c; } .indicator-hint { padding-top: 8px; } p.search-box { float: right; margin: 0; } .network-admin.themes-php p.search-box { clear: left; } .search-box input[name="s"], .tablenav .search-plugins input[name="s"], .tagsdiv .newtag { float: left; height: 28px; margin: 0 4px 0 0; } input[type="text"].ui-autocomplete-loading { background: transparent url(../images/loading.gif) no-repeat right center; visibility: visible; } ul#add-to-blog-users { margin: 0 0 0 14px; } .ui-autocomplete-input.open { -webkit-border-bottom-right-radius: 0; border-bottom-right-radius: 0; -webkit-border-bottom-left-radius: 0; border-bottom-left-radius: 0; } .ui-autocomplete { padding: 0; margin: 0; list-style: none; position: absolute; z-index: 10000; -webkit-border-bottom-right-radius: 3px; border-bottom-right-radius: 3px; -webkit-border-bottom-left-radius: 3px; border-bottom-left-radius: 3px; border: 1px solid #aaa; background-color: #efefef; } .ui-autocomplete li { margin-bottom: 0; white-space: nowrap; text-align: left; } .ui-autocomplete li a { display: block; height: 100%; padding: 4px 10px; color: #444; } .ui-autocomplete li a.ui-state-focus { background-color: #ddd; cursor: pointer; } /*------------------------------------------------------------------------------ 15.0 - Comments Screen ------------------------------------------------------------------------------*/ .form-table { border-collapse: collapse; margin-top: 0.5em; width: 100%; clear: both; } .form-table, .form-table td, .form-table th, .form-table td p, .form-wrap label { font-size: 14px; } .form-table td { margin-bottom: 9px; padding: 15px 10px; line-height: 1.3; vertical-align: middle; } .form-table th, .form-wrap label { color: #222; font-weight: normal; text-shadow: none; vertical-align: baseline; } .form-table th { vertical-align: top; text-align: left; padding: 20px 10px 20px 0; width: 200px; line-height: 1.3; font-weight: 600; } .form-table th.th-full { width: auto; font-weight: 400; } .form-table td p { margin-top: 4px; margin-bottom: 0; } .form-table td fieldset label { margin: 0.25em 0 0.5em !important; display: inline-block; } .form-table td fieldset label, .form-table td fieldset p, .form-table td fieldset li { line-height: 1.4em; } .form-table input.tog, .form-table input[type="radio"] { margin-top: -4px; margin-right: 4px; float: none; } .form-table .pre { padding: 8px; margin: 0; } table.form-table td .updated { font-size: 13px; } /*------------------------------------------------------------------------------ 18.0 - Users ------------------------------------------------------------------------------*/ #profile-page .form-table textarea { width: 500px; margin-bottom: 6px; } #profile-page .form-table #rich_editing { margin-right: 5px } #your-profile legend { font-size: 22px; } #display_name { width: 15em; } #createuser .form-field input { width: 25em; } .color-option { display: inline-block; width: 24%; padding: 5px 15px 15px; -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; margin-bottom: 3px; } .color-option:hover, .color-option.selected { background: #ddd; } .color-palette { width: 100%; border-spacing: 0; border-collapse: collapse; } .color-palette td { height: 20px; padding: 0; border: none; } .color-option { cursor: pointer; } /*------------------------------------------------------------------------------ 19.0 - Tools ------------------------------------------------------------------------------*/ .tool-box .title { margin: 8px 0; font-size: 18px; font-weight: normal; line-height: 24px; } .pressthis { margin: 20px 0; } .pressthis a, .pressthis a:hover, .pressthis a:focus, .pressthis a:active { display: inline-block; position: relative; cursor: move; color: #333; background: #e6e6e6; -webkit-border-radius: 5px; border-radius: 5px; border: 1px solid #b4b4b4; font-style: normal; line-height: 16px; font-size: 14px; text-decoration: none; } .pressthis a:active { outline: none; } .pressthis a:hover:after { -webkit-transform: skew(20deg) rotate(9deg); -ms-transform: skew(20deg) rotate(9deg); transform: skew(20deg) rotate(9deg); -webkit-box-shadow: 0 10px 8px rgba(0, 0, 0, 0.7); box-shadow: 0 10px 8px rgba(0, 0, 0, 0.7); } .pressthis a span { display: inline-block; margin: 0px 0 0; padding: 0px 12px 8px 9px; } .pressthis a span:before { color: #777; font: normal 20px/1 'dashicons'; content:'\f157'; position: relative; display: inline-block; top: 4px; margin-right: 4px; } .pressthis a:after { content: ''; width: 70%; height: 55%; z-index: -1; position: absolute; right: 10px; bottom: 9px; background: transparent; -webkit-transform: skew(20deg) rotate(6deg); -ms-transform: skew(20deg) rotate(6deg); transform: skew(20deg) rotate(6deg); -webkit-box-shadow: 0 10px 8px rgba(0, 0, 0, 0.6); box-shadow: 0 10px 8px rgba(0, 0, 0, 0.6); } /*------------------------------------------------------------------------------ 20.0 - Settings ------------------------------------------------------------------------------*/ #utc-time, #local-time { padding-left: 25px; font-style: italic; } .defaultavatarpicker .avatar { margin: 2px 0; vertical-align: middle; } .options-general-php .spinner { float: none; margin: -3px 3px; } /* =Media Queries -------------------------------------------------------------- */ @media screen and ( max-width: 782px ) { /* Input Elements */ textarea { -webkit-appearance: none; } input[type=text], input[type="search"], input[type=password], input[type="number"] { -webkit-appearance: none; padding: 6px 10px; } input.code { padding-bottom: 5px; padding-top: 10px; } input[type="checkbox"], .widefat th input[type="checkbox"] { -webkit-appearance: none; padding: 10px; } .widefat th input[type="checkbox"] { margin-bottom: 8px; } input[type="checkbox"]:checked:before, .widefat th input[type="checkbox"]:before { font: normal 30px/1 'Dashicons'; margin: -3px -5px; } input[type="radio"], input[type="checkbox"] { height: 25px; width: 25px; } .wp-admin p input[type="checkbox"], .wp-admin p input[type="radio"] { margin-top: -3px; } input[type="radio"]:checked:before { vertical-align: middle; width: 9px; height: 9px; margin: 7px; line-height: 16px; } .wp-upload-form input[type="submit"] { margin-top: 10px; } #wpbody select { height: 36px; font-size: 16px; } .wp-admin .button-cancel { padding: 0; font-size: 14px; } #createuser .form-field input { width: 100%; } .form-table { -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } .form-table th, .form-table td { display: block; width: auto; vertical-align: middle; } .form-table .color-palette td { display: table-cell; width: 15px; } .form-table table.color-palette { margin-right: 10px; } textarea, input { font-size: 16px; } .form-table td input[type="text"], .form-table td input[type="password"], .form-table td select, .form-table td textarea, .form-table span.description, #profile-page .form-table textarea { width: 100%; font-size: 16px; line-height: 1.5; padding: 7px 10px; display: block; max-width: none; -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; } #wpbody .form-table td select { height: 40px; } input[type="text"].small-text, input[type="search"].small-text, input[type="password"].small-text, input[type="number"].small-text, input[type="number"].small-text, .form-table input[type="text"].small-text { width: auto; max-width: 55px; display: inline; padding: 3px 6px; margin: 0 3px; } #pass-strength-result { width: 100%; -webkit-box-sizing: border-box; -moz-box-sizing: border-box; box-sizing: border-box; padding: 8px; } p.search-box { float: none; position: absolute; bottom: 0; width: 98%; height: 90px; margin-bottom: 20px; } p.search-box input[name="s"] { height: auto; float: none; width: 100%; margin-bottom: 10px; vertical-align: middle; -webkit-appearance: none; } p.search-box input[type="submit"] { margin-bottom: 10px; } .form-table span.description { padding: 4px 0 0; line-height: 1.4em; } .form-table th { padding-top: 10px; padding-bottom: 0; border-bottom: 0; } .form-table td { padding-top: 8px; padding-left: 0; } .form-table input.regular-text { width: 100%; } .form-table label { font-size: 14px; } .form-table fieldset label { display: block; } #utc-time { margin-top: 10px; } #utc-time, #local-time { display: block; float: none; padding: 0; line-height: 2; } } @media only screen and (max-width: 768px) { .form-field input, .form-field textarea { width: 99%; } .form-wrap .form-field { padding:0; } /* users */ #profile-page .form-table textarea { max-width: 400px; width: auto; } } /* Smartphone */ @media screen and (max-width: 600px) { /* Color Picker Options */ .color-option { width: 49%; } }
phdduarte/isabellaoliveira
wp-admin/css/forms.css
CSS
gpl-2.0
17,595
/* * Yama Linux Security Module * * Author: Kees Cook <keescook@chromium.org> * * Copyright (C) 2010 Canonical, Ltd. * Copyright (C) 2011 The Chromium OS Authors. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, as * published by the Free Software Foundation. * */ #include <linux/security.h> #include <linux/sysctl.h> #include <linux/ptrace.h> #include <linux/prctl.h> #include <linux/ratelimit.h> static int ptrace_scope = 1; /* describe a ptrace relationship for potential exception */ struct ptrace_relation { struct task_struct *tracer; struct task_struct *tracee; struct list_head node; }; static LIST_HEAD(ptracer_relations); static DEFINE_SPINLOCK(ptracer_relations_lock); /** * yama_ptracer_add - add/replace an exception for this tracer/tracee pair * @tracer: the task_struct of the process doing the ptrace * @tracee: the task_struct of the process to be ptraced * * Each tracee can have, at most, one tracer registered. Each time this * is called, the prior registered tracer will be replaced for the tracee. * * Returns 0 if relationship was added, -ve on error. */ static int yama_ptracer_add(struct task_struct *tracer, struct task_struct *tracee) { int rc = 0; struct ptrace_relation *added; struct ptrace_relation *entry, *relation = NULL; added = kmalloc(sizeof(*added), GFP_KERNEL); if (!added) return -ENOMEM; spin_lock_bh(&ptracer_relations_lock); list_for_each_entry(entry, &ptracer_relations, node) if (entry->tracee == tracee) { relation = entry; break; } if (!relation) { relation = added; relation->tracee = tracee; list_add(&relation->node, &ptracer_relations); } relation->tracer = tracer; spin_unlock_bh(&ptracer_relations_lock); if (added != relation) kfree(added); return rc; } /** * yama_ptracer_del - remove exceptions related to the given tasks * @tracer: remove any relation where tracer task matches * @tracee: remove any relation where tracee task matches */ static void yama_ptracer_del(struct task_struct *tracer, struct task_struct *tracee) { struct ptrace_relation *relation, *safe; spin_lock_bh(&ptracer_relations_lock); list_for_each_entry_safe(relation, safe, &ptracer_relations, node) if (relation->tracee == tracee || (tracer && relation->tracer == tracer)) { list_del(&relation->node); kfree(relation); } spin_unlock_bh(&ptracer_relations_lock); } /** * yama_task_free - check for task_pid to remove from exception list * @task: task being removed */ static void yama_task_free(struct task_struct *task) { yama_ptracer_del(task, task); } /** * yama_task_prctl - check for Yama-specific prctl operations * @option: operation * @arg2: argument * @arg3: argument * @arg4: argument * @arg5: argument * * Return 0 on success, -ve on error. -ENOSYS is returned when Yama * does not handle the given option. */ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int rc; struct task_struct *myself = current; rc = cap_task_prctl(option, arg2, arg3, arg4, arg5); if (rc != -ENOSYS) return rc; switch (option) { case PR_SET_PTRACER: /* Since a thread can call prctl(), find the group leader * before calling _add() or _del() on it, since we want * process-level granularity of control. The tracer group * leader checking is handled later when walking the ancestry * at the time of PTRACE_ATTACH check. */ rcu_read_lock(); if (!thread_group_leader(myself)) myself = rcu_dereference(myself->group_leader); get_task_struct(myself); rcu_read_unlock(); if (arg2 == 0) { yama_ptracer_del(NULL, myself); rc = 0; } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) { rc = yama_ptracer_add(NULL, myself); } else { struct task_struct *tracer; rcu_read_lock(); tracer = find_task_by_vpid(arg2); if (tracer) get_task_struct(tracer); else rc = -EINVAL; rcu_read_unlock(); if (tracer) { rc = yama_ptracer_add(tracer, myself); put_task_struct(tracer); } } put_task_struct(myself); break; } return rc; } /** * task_is_descendant - walk up a process family tree looking for a match * @parent: the process to compare against while walking up from child * @child: the process to start from while looking upwards for parent * * Returns 1 if child is a descendant of parent, 0 if not. */ static int task_is_descendant(struct task_struct *parent, struct task_struct *child) { int rc = 0; struct task_struct *walker = child; if (!parent || !child) return 0; rcu_read_lock(); if (!thread_group_leader(parent)) parent = rcu_dereference(parent->group_leader); while (walker->pid > 0) { if (!thread_group_leader(walker)) walker = rcu_dereference(walker->group_leader); if (walker == parent) { rc = 1; break; } walker = rcu_dereference(walker->real_parent); } rcu_read_unlock(); return rc; } /** * ptracer_exception_found - tracer registered as exception for this tracee * @tracer: the task_struct of the process attempting ptrace * @tracee: the task_struct of the process to be ptraced * * Returns 1 if tracer has is ptracer exception ancestor for tracee. */ static int ptracer_exception_found(struct task_struct *tracer, struct task_struct *tracee) { int rc = 0; struct ptrace_relation *relation; struct task_struct *parent = NULL; bool found = false; spin_lock_bh(&ptracer_relations_lock); rcu_read_lock(); if (!thread_group_leader(tracee)) tracee = rcu_dereference(tracee->group_leader); list_for_each_entry(relation, &ptracer_relations, node) if (relation->tracee == tracee) { parent = relation->tracer; found = true; break; } if (found && (parent == NULL || task_is_descendant(parent, tracer))) rc = 1; rcu_read_unlock(); spin_unlock_bh(&ptracer_relations_lock); return rc; } /** * yama_ptrace_access_check - validate PTRACE_ATTACH calls * @child: task that current task is attempting to ptrace * @mode: ptrace attach mode * * Returns 0 if following the ptrace is allowed, -ve on error. */ static int yama_ptrace_access_check(struct task_struct *child, unsigned int mode) { int rc; /* If standard caps disallows it, so does Yama. We should * only tighten restrictions further. */ rc = cap_ptrace_access_check(child, mode); if (rc) return rc; /* require ptrace target be a child of ptracer on attach */ if (mode == PTRACE_MODE_ATTACH && ptrace_scope && !task_is_descendant(current, child) && !ptracer_exception_found(current, child) && !capable(CAP_SYS_PTRACE)) rc = -EPERM; if (rc) { char name[sizeof(current->comm)]; printk_ratelimited(KERN_NOTICE "ptrace of non-child" " pid %d was attempted by: %s (pid %d)\n", child->pid, get_task_comm(name, current), current->pid); } return rc; } static struct security_operations yama_ops = { .name = "yama", .ptrace_access_check = yama_ptrace_access_check, .task_prctl = yama_task_prctl, .task_free = yama_task_free, }; #ifdef CONFIG_SYSCTL static int zero; static int one = 1; struct ctl_path yama_sysctl_path[] = { { .procname = "kernel", }, { .procname = "yama", }, { } }; static struct ctl_table yama_sysctl_table[] = { { .procname = "ptrace_scope", .data = &ptrace_scope, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { } }; #endif /* CONFIG_SYSCTL */ static __init int yama_init(void) { if (!security_module_enable(&yama_ops)) return 0; printk(KERN_INFO "Yama: becoming mindful.\n"); if (register_security(&yama_ops)) panic("Yama: kernel registration failed.\n"); #ifdef CONFIG_SYSCTL if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table)) panic("Yama: sysctl registration failed.\n"); #endif return 0; } security_initcall(yama_init);
Tesla-M-Devices/android_kernel_motorola_msm8226
security/yama/yama_lsm.c
C
gpl-2.0
8,044
/* * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl) * * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net) * * VME support added by Sam Creasey * * Adapted from sun3_scsi.c -- see there for other headers * * TODO: modify this driver to support multiple Sun3 SCSI VME boards * */ #define AUTOSENSE #include <linux/types.h> #include <linux/stddef.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/blkdev.h> #include <asm/io.h> #include <asm/system.h> #include <asm/sun3ints.h> #include <asm/dvma.h> #include <asm/idprom.h> #include <asm/machines.h> #define SUN3_SCSI_VME #undef SUN3_SCSI_DEBUG /* dma on! */ #define REAL_DMA #include "scsi.h" #include "initio.h" #include <scsi/scsi_host.h> #include "sun3_scsi.h" extern int sun3_map_test(unsigned long, char *); #define USE_WRAPPER /*#define RESET_BOOT */ #define DRIVER_SETUP #define NDEBUG 0 /* * BUG can be used to trigger a strange code-size related hang on 2.1 kernels */ #ifdef BUG #undef RESET_BOOT #undef DRIVER_SETUP #endif /* #define SUPPORT_TAGS */ //#define ENABLE_IRQ() enable_irq( SUN3_VEC_VMESCSI0 ); #define ENABLE_IRQ() static irqreturn_t scsi_sun3_intr(int irq, void *dummy); static inline unsigned char sun3scsi_read(int reg); static inline void sun3scsi_write(int reg, int value); static int setup_can_queue = -1; module_param(setup_can_queue, int, 0); static int setup_cmd_per_lun = -1; module_param(setup_cmd_per_lun, int, 0); static int setup_sg_tablesize = -1; module_param(setup_sg_tablesize, int, 0); #ifdef SUPPORT_TAGS static int setup_use_tagged_queuing = -1; module_param(setup_use_tagged_queuing, int, 0); #endif static int setup_hostid = -1; module_param(setup_hostid, int, 0); static struct scsi_cmnd *sun3_dma_setup_done = NULL; #define AFTER_RESET_DELAY (HZ/2) /* ms to wait after hitting dma regs */ #define SUN3_DMA_DELAY 10 /* dvma buffer to allocate -- 32k should hopefully be more than sufficient */ #define SUN3_DVMA_BUFSIZE 0xe000 /* minimum number of bytes to do dma on */ #define SUN3_DMA_MINSIZE 128 static volatile unsigned char *sun3_scsi_regp; static volatile struct sun3_dma_regs *dregs; #ifdef OLDDMA static unsigned char *dmabuf = NULL; /* dma memory buffer */ #endif static unsigned char *sun3_dma_orig_addr = NULL; static unsigned long sun3_dma_orig_count = 0; static int sun3_dma_active = 0; static unsigned long last_residual = 0; /* * NCR 5380 register access functions */ static inline unsigned char sun3scsi_read(int reg) { return( sun3_scsi_regp[reg] ); } static inline void sun3scsi_write(int reg, int value) { sun3_scsi_regp[reg] = value; } /* * XXX: status debug */ static struct Scsi_Host *default_instance; /* * Function : int sun3scsi_detect(struct scsi_host_template * tpnt) * * Purpose : initializes mac NCR5380 driver based on the * command line / compile time port and irq definitions. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ static int sun3scsi_detect(struct scsi_host_template * tpnt) { unsigned long ioaddr, irq = 0; static int called = 0; struct Scsi_Host *instance; int i; unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, IOBASE_SUN3_VMESCSI + 0x4000, 0 }; unsigned long vecs[3] = { SUN3_VEC_VMESCSI0, SUN3_VEC_VMESCSI1, 0 }; /* check that this machine has an onboard 5380 */ switch(idprom->id_machtype) { case SM_SUN3|SM_3_160: case SM_SUN3|SM_3_260: break; default: return 0; } if(called) return 0; tpnt->proc_name = "Sun3 5380 VME SCSI"; /* setup variables */ tpnt->can_queue = (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE; tpnt->cmd_per_lun = (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN; tpnt->sg_tablesize = (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE; if (setup_hostid >= 0) tpnt->this_id = setup_hostid; else { /* use 7 as default */ tpnt->this_id = 7; } ioaddr = 0; for(i = 0; addrs[i] != 0; i++) { unsigned char x; ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE, SUN3_PAGE_TYPE_VME16); irq = vecs[i]; sun3_scsi_regp = (unsigned char *)ioaddr; dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8); if(sun3_map_test((unsigned long)dregs, &x)) { unsigned short oldcsr; oldcsr = dregs->csr; dregs->csr = 0; udelay(SUN3_DMA_DELAY); if(dregs->csr == 0x1400) break; dregs->csr = oldcsr; } iounmap((void *)ioaddr); ioaddr = 0; } if(!ioaddr) return 0; #ifdef SUPPORT_TAGS if (setup_use_tagged_queuing < 0) setup_use_tagged_queuing = USE_TAGGED_QUEUING; #endif instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) return 0; default_instance = instance; instance->io_port = (unsigned long) ioaddr; instance->irq = irq; NCR5380_init(instance, 0); instance->n_io_port = 32; ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0; if (request_irq(instance->irq, scsi_sun3_intr, 0, "Sun3SCSI-5380VME", instance)) { #ifndef REAL_DMA printk("scsi%d: IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; #else printk("scsi%d: IRQ%d not free, bailing out\n", instance->host_no, instance->irq); return 0; #endif } printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk ("s disabled"); else printk (" %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", instance->can_queue, instance->cmd_per_lun, SUN3SCSI_PUBLIC_RELEASE); printk("\nscsi%d:", instance->host_no); NCR5380_print_options(instance); printk("\n"); dregs->csr = 0; udelay(SUN3_DMA_DELAY); dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR; udelay(SUN3_DMA_DELAY); dregs->fifo_count = 0; dregs->fifo_count_hi = 0; dregs->dma_addr_hi = 0; dregs->dma_addr_lo = 0; dregs->dma_count_hi = 0; dregs->dma_count_lo = 0; dregs->ivect = VME_DATA24 | (instance->irq & 0xff); called = 1; #ifdef RESET_BOOT sun3_scsi_reset_boot(instance); #endif return 1; } int sun3scsi_release (struct Scsi_Host *shpnt) { if (shpnt->irq != SCSI_IRQ_NONE) free_irq(shpnt->irq, shpnt); iounmap((void *)sun3_scsi_regp); return 0; } #ifdef RESET_BOOT /* * Our 'bus reset on boot' function */ static void sun3_scsi_reset_boot(struct Scsi_Host *instance) { unsigned long end; NCR5380_local_declare(); NCR5380_setup(instance); /* * Do a SCSI reset to clean up the bus during initialization. No * messing with the queues, interrupts, or locks necessary here. */ printk( "Sun3 SCSI: resetting the SCSI bus..." ); /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */ // sun3_disable_irq( IRQ_SUN3_SCSI ); /* get in phase */ NCR5380_write( TARGET_COMMAND_REG, PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); /* assert RST */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); /* The min. reset hold time is 25us, so 40us should be enough */ udelay( 50 ); /* reset RST and interrupt */ NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); NCR5380_read( RESET_PARITY_INTERRUPT_REG ); for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); ) barrier(); /* switch on SCSI IRQ again */ // sun3_enable_irq( IRQ_SUN3_SCSI ); printk( " done\n" ); } #endif static const char * sun3scsi_info (struct Scsi_Host *spnt) { return ""; } // safe bits for the CSR #define CSR_GOOD 0x060f static irqreturn_t scsi_sun3_intr(int irq, void *dummy) { unsigned short csr = dregs->csr; int handled = 0; dregs->csr &= ~CSR_DMA_ENABLE; #ifdef SUN3_SCSI_DEBUG printk("scsi_intr csr %x\n", csr); #endif if(csr & ~CSR_GOOD) { if(csr & CSR_DMA_BUSERR) { printk("scsi%d: bus error in dma\n", default_instance->host_no); #ifdef SUN3_SCSI_DEBUG printk("scsi: residual %x count %x addr %p dmaaddr %x\n", dregs->fifo_count, dregs->dma_count_lo | (dregs->dma_count_hi << 16), sun3_dma_orig_addr, dregs->dma_addr_lo | (dregs->dma_addr_hi << 16)); #endif } if(csr & CSR_DMA_CONFLICT) { printk("scsi%d: dma conflict\n", default_instance->host_no); } handled = 1; } if(csr & (CSR_SDB_INT | CSR_DMA_INT)) { NCR5380_intr(irq, dummy); handled = 1; } return IRQ_RETVAL(handled); } /* * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; * reentering NCR5380_print_status seems to have ugly side effects */ /* this doesn't seem to get used at all -- sam */ #if 0 void sun3_sun3_debug (void) { unsigned long flags; NCR5380_local_declare(); if (default_instance) { local_irq_save(flags); NCR5380_print_status(default_instance); local_irq_restore(flags); } } #endif /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag) { void *addr; if(sun3_dma_orig_addr != NULL) dvma_unmap(sun3_dma_orig_addr); // addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf); addr = (void *)dvma_map_vme((unsigned long) data, count); sun3_dma_orig_addr = addr; sun3_dma_orig_count = count; #ifdef SUN3_SCSI_DEBUG printk("scsi: dma_setup addr %p count %x\n", addr, count); #endif // dregs->fifo_count = 0; #if 0 /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; #endif /* set direction */ if(write_flag) dregs->csr |= CSR_SEND; else dregs->csr &= ~CSR_SEND; /* reset fifo */ // dregs->csr &= ~CSR_FIFO; // dregs->csr |= CSR_FIFO; dregs->csr |= CSR_PACK_ENABLE; dregs->dma_addr_hi = ((unsigned long)addr >> 16); dregs->dma_addr_lo = ((unsigned long)addr & 0xffff); dregs->dma_count_hi = 0; dregs->dma_count_lo = 0; dregs->fifo_count_hi = 0; dregs->fifo_count = 0; #ifdef SUN3_SCSI_DEBUG printk("scsi: dma_setup done csr %x\n", dregs->csr); #endif return count; } static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance) { return last_residual; } static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted, struct scsi_cmnd *cmd, int write_flag) { if(blk_fs_request(cmd->request)) return wanted; else return 0; } static int sun3scsi_dma_start(unsigned long count, char *data) { unsigned short csr; csr = dregs->csr; #ifdef SUN3_SCSI_DEBUG printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count); #endif dregs->dma_count_hi = (sun3_dma_orig_count >> 16); dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff); dregs->fifo_count_hi = (sun3_dma_orig_count >> 16); dregs->fifo_count = (sun3_dma_orig_count & 0xffff); // if(!(csr & CSR_DMA_ENABLE)) // dregs->csr |= CSR_DMA_ENABLE; return 0; } /* clean up after our dma is done */ static int sun3scsi_dma_finish(int write_flag) { unsigned short fifo; int ret = 0; sun3_dma_active = 0; dregs->csr &= ~CSR_DMA_ENABLE; fifo = dregs->fifo_count; if(write_flag) { if((fifo > 0) && (fifo < sun3_dma_orig_count)) fifo++; } last_residual = fifo; #ifdef SUN3_SCSI_DEBUG printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count); #endif /* empty bytes from the fifo which didn't make it */ if((!write_flag) && (dregs->csr & CSR_LEFT)) { unsigned char *vaddr; #ifdef SUN3_SCSI_DEBUG printk("scsi: got left over bytes\n"); #endif vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr); vaddr += (sun3_dma_orig_count - fifo); vaddr--; switch(dregs->csr & CSR_LEFT) { case CSR_LEFT_3: *vaddr = (dregs->bpack_lo & 0xff00) >> 8; vaddr--; case CSR_LEFT_2: *vaddr = (dregs->bpack_hi & 0x00ff); vaddr--; case CSR_LEFT_1: *vaddr = (dregs->bpack_hi & 0xff00) >> 8; break; } } dvma_unmap(sun3_dma_orig_addr); sun3_dma_orig_addr = NULL; dregs->dma_addr_hi = 0; dregs->dma_addr_lo = 0; dregs->dma_count_hi = 0; dregs->dma_count_lo = 0; dregs->fifo_count = 0; dregs->fifo_count_hi = 0; dregs->csr &= ~CSR_SEND; // dregs->csr |= CSR_DMA_ENABLE; #if 0 /* reset fifo */ dregs->csr &= ~CSR_FIFO; dregs->csr |= CSR_FIFO; #endif sun3_dma_setup_done = NULL; return ret; } #include "sun3_NCR5380.c" static struct scsi_host_template driver_template = { .name = SUN3_SCSI_NAME, .detect = sun3scsi_detect, .release = sun3scsi_release, .info = sun3scsi_info, .queuecommand = sun3scsi_queue_command, .eh_abort_handler = sun3scsi_abort, .eh_bus_reset_handler = sun3scsi_bus_reset, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_TABLESIZE, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING }; #include "scsi_module.c" MODULE_LICENSE("GPL");
droidzone/Supernova-Kernel
drivers/drivers/scsi/sun3_scsi_vme.c
C
gpl-2.0
12,926
/* * External interrupt handling for AT32AP CPUs * * Copyright (C) 2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/platform_device.h> #include <linux/random.h> #include <linux/slab.h> #include <asm/io.h> /* EIC register offsets */ #define EIC_IER 0x0000 #define EIC_IDR 0x0004 #define EIC_IMR 0x0008 #define EIC_ISR 0x000c #define EIC_ICR 0x0010 #define EIC_MODE 0x0014 #define EIC_EDGE 0x0018 #define EIC_LEVEL 0x001c #define EIC_NMIC 0x0024 /* Bitfields in NMIC */ #define EIC_NMIC_ENABLE (1 << 0) /* Bit manipulation macros */ #define EIC_BIT(name) \ (1 << EIC_##name##_OFFSET) #define EIC_BF(name,value) \ (((value) & ((1 << EIC_##name##_SIZE) - 1)) \ << EIC_##name##_OFFSET) #define EIC_BFEXT(name,value) \ (((value) >> EIC_##name##_OFFSET) \ & ((1 << EIC_##name##_SIZE) - 1)) #define EIC_BFINS(name,value,old) \ (((old) & ~(((1 << EIC_##name##_SIZE) - 1) \ << EIC_##name##_OFFSET)) \ | EIC_BF(name,value)) /* Register access macros */ #define eic_readl(port,reg) \ __raw_readl((port)->regs + EIC_##reg) #define eic_writel(port,reg,value) \ __raw_writel((value), (port)->regs + EIC_##reg) struct eic { void __iomem *regs; struct irq_chip *chip; unsigned int first_irq; }; static struct eic *nmi_eic; static bool nmi_enabled; static void eic_ack_irq(struct irq_data *d) { struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); } static void eic_mask_irq(struct irq_data *d) { struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); } static void eic_mask_ack_irq(struct irq_data *d) { struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, ICR, 1 << (d->irq - eic->first_irq)); eic_writel(eic, IDR, 1 << (d->irq - eic->first_irq)); } static void eic_unmask_irq(struct irq_data *d) { struct eic *eic = irq_data_get_irq_chip_data(d); eic_writel(eic, IER, 1 << (d->irq - eic->first_irq)); } static int eic_set_irq_type(struct irq_data *d, unsigned int flow_type) { struct eic *eic = irq_data_get_irq_chip_data(d); unsigned int irq = d->irq; unsigned int i = irq - eic->first_irq; u32 mode, edge, level; flow_type &= IRQ_TYPE_SENSE_MASK; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; mode = eic_readl(eic, MODE); edge = eic_readl(eic, EDGE); level = eic_readl(eic, LEVEL); switch (flow_type) { case IRQ_TYPE_LEVEL_LOW: mode |= 1 << i; level &= ~(1 << i); break; case IRQ_TYPE_LEVEL_HIGH: mode |= 1 << i; level |= 1 << i; break; case IRQ_TYPE_EDGE_RISING: mode &= ~(1 << i); edge |= 1 << i; break; case IRQ_TYPE_EDGE_FALLING: mode &= ~(1 << i); edge &= ~(1 << i); break; default: return -EINVAL; } eic_writel(eic, MODE, mode); eic_writel(eic, EDGE, edge); eic_writel(eic, LEVEL, level); irqd_set_trigger_type(d, flow_type); if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) __irq_set_handler_locked(irq, handle_level_irq); else __irq_set_handler_locked(irq, handle_edge_irq); return IRQ_SET_MASK_OK_NOCOPY; } static struct irq_chip eic_chip = { .name = "eic", .irq_ack = eic_ack_irq, .irq_mask = eic_mask_irq, .irq_mask_ack = eic_mask_ack_irq, .irq_unmask = eic_unmask_irq, .irq_set_type = eic_set_irq_type, }; static void demux_eic_irq(unsigned int irq, struct irq_desc *desc) { struct eic *eic = irq_desc_get_handler_data(desc); unsigned long status, pending; unsigned int i; status = eic_readl(eic, ISR); pending = status & eic_readl(eic, IMR); while (pending) { i = fls(pending) - 1; pending &= ~(1 << i); generic_handle_irq(i + eic->first_irq); } } int nmi_enable(void) { nmi_enabled = true; if (nmi_eic) eic_writel(nmi_eic, NMIC, EIC_NMIC_ENABLE); return 0; } void nmi_disable(void) { if (nmi_eic) eic_writel(nmi_eic, NMIC, 0); nmi_enabled = false; } static int __init eic_probe(struct platform_device *pdev) { struct eic *eic; struct resource *regs; unsigned int i; unsigned int nr_of_irqs; unsigned int int_irq; int ret; u32 pattern; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); int_irq = platform_get_irq(pdev, 0); if (!regs || (int)int_irq <= 0) { dev_dbg(&pdev->dev, "missing regs and/or irq resource\n"); return -ENXIO; } ret = -ENOMEM; eic = kzalloc(sizeof(struct eic), GFP_KERNEL); if (!eic) { dev_dbg(&pdev->dev, "no memory for eic structure\n"); goto err_kzalloc; } eic->first_irq = EIM_IRQ_BASE + 32 * pdev->id; eic->regs = ioremap(regs->start, regs->end - regs->start + 1); if (!eic->regs) { dev_dbg(&pdev->dev, "failed to map regs\n"); goto err_ioremap; } /* * Find out how many interrupt lines that are actually * implemented in hardware. */ eic_writel(eic, IDR, ~0UL); eic_writel(eic, MODE, ~0UL); pattern = eic_readl(eic, MODE); nr_of_irqs = fls(pattern); /* Trigger on low level unless overridden by driver */ eic_writel(eic, EDGE, 0UL); eic_writel(eic, LEVEL, 0UL); eic->chip = &eic_chip; for (i = 0; i < nr_of_irqs; i++) { irq_set_chip_and_handler(eic->first_irq + i, &eic_chip, handle_level_irq); irq_set_chip_data(eic->first_irq + i, eic); } irq_set_chained_handler(int_irq, demux_eic_irq); irq_set_handler_data(int_irq, eic); if (pdev->id == 0) { nmi_eic = eic; if (nmi_enabled) /* * Someone tried to enable NMI before we were * ready. Do it now. */ nmi_enable(); } dev_info(&pdev->dev, "External Interrupt Controller at 0x%p, IRQ %u\n", eic->regs, int_irq); dev_info(&pdev->dev, "Handling %u external IRQs, starting with IRQ %u\n", nr_of_irqs, eic->first_irq); return 0; err_ioremap: kfree(eic); err_kzalloc: return ret; } static struct platform_driver eic_driver = { .driver = { .name = "at32_eic", }, }; static int __init eic_init(void) { return platform_driver_probe(&eic_driver, eic_probe); } arch_initcall(eic_init);
johnnyslt/OLD_android_kernel_shooter
arch/avr32/mach-at32ap/extint.c
C
gpl-2.0
6,200
/* * Copyright 2007, Michael Ellerman, IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/of_platform.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <asm/dcr.h> #include <asm/machdep.h> #include <asm/prom.h> /* * MSIC registers, specified as offsets from dcr_base */ #define MSIC_CTRL_REG 0x0 /* Base Address registers specify FIFO location in BE memory */ #define MSIC_BASE_ADDR_HI_REG 0x3 #define MSIC_BASE_ADDR_LO_REG 0x4 /* Hold the read/write offsets into the FIFO */ #define MSIC_READ_OFFSET_REG 0x5 #define MSIC_WRITE_OFFSET_REG 0x6 /* MSIC control register flags */ #define MSIC_CTRL_ENABLE 0x0001 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002 #define MSIC_CTRL_IRQ_ENABLE 0x0008 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010 /* * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB. * Currently we're using a 64KB FIFO size. */ #define MSIC_FIFO_SIZE_SHIFT 16 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT) /* * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits * 8-9 of the MSIC control reg. */ #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300) /* * We need to mask the read/write offsets to make sure they stay within * the bounds of the FIFO. Also they should always be 16-byte aligned. */ #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu) /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */ #define MSIC_FIFO_ENTRY_SIZE 0x10 struct axon_msic { struct irq_host *irq_host; __le32 *fifo_virt; dma_addr_t fifo_phys; dcr_host_t dcr_host; u32 read_offset; #ifdef DEBUG u32 __iomem *trigger; #endif }; #ifdef DEBUG void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic); #else static inline void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { } #endif static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) { pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n); dcr_write(msic->dcr_host, dcr_n, val); } static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct axon_msic *msic = irq_get_handler_data(irq); u32 write_offset, msi; int idx; int retry = 0; write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG); pr_devel("axon_msi: original write_offset 0x%x\n", write_offset); /* write_offset doesn't wrap properly, so we have to mask it */ write_offset &= MSIC_FIFO_SIZE_MASK; while (msic->read_offset != write_offset && retry < 100) { idx = msic->read_offset / sizeof(__le32); msi = le32_to_cpu(msic->fifo_virt[idx]); msi &= 0xFFFF; pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { /* * Reading the MSIC_WRITE_OFFSET_REG does not * reliably flush the outstanding DMA to the * FIFO buffer. Here we were reading stale * data, so we need to retry. */ udelay(1); retry++; pr_devel("axon_msi: invalid irq 0x%x!\n", msi); continue; } if (retry) { pr_devel("axon_msi: late irq 0x%x, retry %d\n", msi, retry); retry = 0; } msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } if (retry) { printk(KERN_WARNING "axon_msi: irq timed out\n"); msic->read_offset += MSIC_FIFO_ENTRY_SIZE; msic->read_offset &= MSIC_FIFO_SIZE_MASK; } chip->irq_eoi(&desc->irq_data); } static struct axon_msic *find_msi_translator(struct pci_dev *dev) { struct irq_host *irq_host; struct device_node *dn, *tmp; const phandle *ph; struct axon_msic *msic = NULL; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return NULL; } for (; dn; dn = of_get_next_parent(dn)) { ph = of_get_property(dn, "msi-translator", NULL); if (ph) break; } if (!ph) { dev_dbg(&dev->dev, "axon_msi: no msi-translator property found\n"); goto out_error; } tmp = dn; dn = of_find_node_by_phandle(*ph); of_node_put(tmp); if (!dn) { dev_dbg(&dev->dev, "axon_msi: msi-translator doesn't point to a node\n"); goto out_error; } irq_host = irq_find_host(dn); if (!irq_host) { dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", dn->full_name); goto out_error; } msic = irq_host->host_data; out_error: of_node_put(dn); return msic; } static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type) { if (!find_msi_translator(dev)) return -ENODEV; return 0; } static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg) { struct device_node *dn; struct msi_desc *entry; int len; const u32 *prop; dn = of_node_get(pci_device_to_OF_node(dev)); if (!dn) { dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n"); return -ENODEV; } entry = list_first_entry(&dev->msi_list, struct msi_desc, list); for (; dn; dn = of_get_next_parent(dn)) { if (entry->msi_attrib.is_64) { prop = of_get_property(dn, "msi-address-64", &len); if (prop) break; } prop = of_get_property(dn, "msi-address-32", &len); if (prop) break; } if (!prop) { dev_dbg(&dev->dev, "axon_msi: no msi-address-(32|64) properties found\n"); return -ENOENT; } switch (len) { case 8: msg->address_hi = prop[0]; msg->address_lo = prop[1]; break; case 4: msg->address_hi = 0; msg->address_lo = prop[0]; break; default: dev_dbg(&dev->dev, "axon_msi: malformed msi-address-(32|64) property\n"); of_node_put(dn); return -EINVAL; } of_node_put(dn); return 0; } static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { unsigned int virq, rc; struct msi_desc *entry; struct msi_msg msg; struct axon_msic *msic; msic = find_msi_translator(dev); if (!msic) return -ENODEV; rc = setup_msi_msg_address(dev, &msg); if (rc) return rc; /* We rely on being able to stash a virq in a u16 */ BUILD_BUG_ON(NR_IRQS > 65536); list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_host); if (virq == NO_IRQ) { dev_warn(&dev->dev, "axon_msi: virq allocation failed!\n"); return -1; } dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); irq_set_msi_desc(virq, entry); msg.data = virq; write_msi_msg(virq, &msg); } return 0; } static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *entry; dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n"); list_for_each_entry(entry, &dev->msi_list, list) { if (entry->irq == NO_IRQ) continue; irq_set_msi_desc(entry->irq, NULL); irq_dispose_mapping(entry->irq); } } static struct irq_chip msic_irq_chip = { .irq_mask = mask_msi_irq, .irq_unmask = unmask_msi_irq, .irq_shutdown = mask_msi_irq, .name = "AXON-MSI", }; static int msic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { irq_set_chip_data(virq, h->host_data); irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); return 0; } static struct irq_host_ops msic_host_ops = { .map = msic_host_map, }; static void axon_msi_shutdown(struct platform_device *device) { struct axon_msic *msic = dev_get_drvdata(&device->dev); u32 tmp; pr_devel("axon_msi: disabling %s\n", msic->irq_host->of_node->full_name); tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; msic_dcr_write(msic, MSIC_CTRL_REG, tmp); } static int axon_msi_probe(struct platform_device *device) { struct device_node *dn = device->dev.of_node; struct axon_msic *msic; unsigned int virq; int dcr_base, dcr_len; pr_devel("axon_msi: setting up dn %s\n", dn->full_name); msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL); if (!msic) { printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n", dn->full_name); goto out; } dcr_base = dcr_resource_start(dn, 0); dcr_len = dcr_resource_len(dn, 0); if (dcr_base == 0 || dcr_len == 0) { printk(KERN_ERR "axon_msi: couldn't parse dcr properties on %s\n", dn->full_name); goto out_free_msic; } msic->dcr_host = dcr_map(dn, dcr_base, dcr_len); if (!DCR_MAP_OK(msic->dcr_host)) { printk(KERN_ERR "axon_msi: dcr_map failed for %s\n", dn->full_name); goto out_free_msic; } msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, &msic->fifo_phys, GFP_KERNEL); if (!msic->fifo_virt) { printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n", dn->full_name); goto out_free_msic; } virq = irq_of_parse_and_map(dn, 0); if (virq == NO_IRQ) { printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n", dn->full_name); goto out_free_fifo; } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, NR_IRQS, &msic_host_ops, 0); if (!msic->irq_host) { printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n", dn->full_name); goto out_free_fifo; } msic->irq_host->host_data = msic; irq_set_handler_data(virq, msic); irq_set_chained_handler(virq, axon_msi_cascade); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); /* Enable the MSIC hardware */ msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32); msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG, msic->fifo_phys & 0xFFFFFFFF); msic_dcr_write(msic, MSIC_CTRL_REG, MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE | MSIC_CTRL_FIFO_SIZE); msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG) & MSIC_FIFO_SIZE_MASK; dev_set_drvdata(&device->dev, msic); ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs; ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs; ppc_md.msi_check_device = axon_msi_check_device; axon_msi_debug_setup(dn, msic); printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name); return 0; out_free_fifo: dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt, msic->fifo_phys); out_free_msic: kfree(msic); out: return -1; } static const struct of_device_id axon_msi_device_id[] = { { .compatible = "ibm,axon-msic" }, {} }; static struct platform_driver axon_msi_driver = { .probe = axon_msi_probe, .shutdown = axon_msi_shutdown, .driver = { .name = "axon-msi", .owner = THIS_MODULE, .of_match_table = axon_msi_device_id, }, }; static int __init axon_msi_init(void) { return platform_driver_register(&axon_msi_driver); } subsys_initcall(axon_msi_init); #ifdef DEBUG static int msic_set(void *data, u64 val) { struct axon_msic *msic = data; out_le32(msic->trigger, val); return 0; } static int msic_get(void *data, u64 *val) { *val = 0; return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n"); void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic) { char name[8]; u64 addr; addr = of_translate_address(dn, of_get_property(dn, "reg", NULL)); if (addr == OF_BAD_ADDR) { pr_devel("axon_msi: couldn't translate reg property\n"); return; } msic->trigger = ioremap(addr, 0x4); if (!msic->trigger) { pr_devel("axon_msi: ioremap failed\n"); return; } snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn)); if (!debugfs_create_file(name, 0600, powerpc_debugfs_root, msic, &fops_msic)) { pr_devel("axon_msi: debugfs_create_file failed!\n"); return; } } #endif /* DEBUG */
thesawolf/android_kernel_rockchip_rk3188
arch/powerpc/platforms/cell/axon_msi.c
C
gpl-2.0
11,915
/* * cbe_regs.c * * Accessor routines for the various MMIO register blocks of the CBE * * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. */ #include <linux/percpu.h> #include <linux/types.h> #include <linux/module.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/io.h> #include <asm/pgtable.h> #include <asm/prom.h> #include <asm/ptrace.h> #include <asm/cell-regs.h> /* * Current implementation uses "cpu" nodes. We build our own mapping * array of cpu numbers to cpu nodes locally for now to allow interrupt * time code to have a fast path rather than call of_get_cpu_node(). If * we implement cpu hotplug, we'll have to install an appropriate norifier * in order to release references to the cpu going away */ static struct cbe_regs_map { struct device_node *cpu_node; struct device_node *be_node; struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_iic_regs __iomem *iic_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs; struct cbe_pmd_shadow_regs pmd_shadow_regs; } cbe_regs_maps[MAX_CBE]; static int cbe_regs_map_count; static struct cbe_thread_map { struct device_node *cpu_node; struct device_node *be_node; struct cbe_regs_map *regs; unsigned int thread_id; unsigned int cbe_id; } cbe_thread_map[NR_CPUS]; static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; static struct cbe_regs_map *cbe_find_map(struct device_node *np) { int i; struct device_node *tmp_np; if (strcasecmp(np->type, "spe")) { for (i = 0; i < cbe_regs_map_count; i++) if (cbe_regs_maps[i].cpu_node == np || cbe_regs_maps[i].be_node == np) return &cbe_regs_maps[i]; return NULL; } if (np->data) return np->data; /* walk up path until cpu or be node was found */ tmp_np = np; do { tmp_np = tmp_np->parent; /* on a correct devicetree we wont get up to root */ BUG_ON(!tmp_np); } while (strcasecmp(tmp_np->type, "cpu") && strcasecmp(tmp_np->type, "be")); np->data = cbe_find_map(tmp_np); return np->data; } struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->pmd_regs; } EXPORT_SYMBOL_GPL(cbe_get_pmd_regs); struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->pmd_regs; } EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs); struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return &map->pmd_shadow_regs; } struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return &map->pmd_shadow_regs; } struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->iic_regs; } struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->iic_regs; } struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np) { struct cbe_regs_map *map = cbe_find_map(np); if (map == NULL) return NULL; return map->mic_tm_regs; } struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu) { struct cbe_regs_map *map = cbe_thread_map[cpu].regs; if (map == NULL) return NULL; return map->mic_tm_regs; } EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs); u32 cbe_get_hw_thread_id(int cpu) { return cbe_thread_map[cpu].thread_id; } EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id); u32 cbe_cpu_to_node(int cpu) { return cbe_thread_map[cpu].cbe_id; } EXPORT_SYMBOL_GPL(cbe_cpu_to_node); u32 cbe_node_to_cpu(int node) { return cpumask_first(&cbe_local_mask[node]); } EXPORT_SYMBOL_GPL(cbe_node_to_cpu); static struct device_node *cbe_get_be_node(int cpu_id) { struct device_node *np; for_each_node_by_type (np, "be") { int len,i; const phandle *cpu_handle; cpu_handle = of_get_property(np, "cpus", &len); /* * the CAB SLOF tree is non compliant, so we just assume * there is only one node */ if (WARN_ON_ONCE(!cpu_handle)) return np; for (i=0; i<len; i++) if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL)) return np; } return NULL; } void __init cbe_fill_regs_map(struct cbe_regs_map *map) { if(map->be_node) { struct device_node *be, *np; be = map->be_node; for_each_node_by_type(np, "pervasive") if (of_get_parent(np) == be) map->pmd_regs = of_iomap(np, 0); for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller") if (of_get_parent(np) == be) map->iic_regs = of_iomap(np, 2); for_each_node_by_type(np, "mic-tm") if (of_get_parent(np) == be) map->mic_tm_regs = of_iomap(np, 0); } else { struct device_node *cpu; /* That hack must die die die ! */ const struct address_prop { unsigned long address; unsigned int len; } __attribute__((packed)) *prop; cpu = map->cpu_node; prop = of_get_property(cpu, "pervasive", NULL); if (prop != NULL) map->pmd_regs = ioremap(prop->address, prop->len); prop = of_get_property(cpu, "iic", NULL); if (prop != NULL) map->iic_regs = ioremap(prop->address, prop->len); prop = of_get_property(cpu, "mic-tm", NULL); if (prop != NULL) map->mic_tm_regs = ioremap(prop->address, prop->len); } } void __init cbe_regs_init(void) { int i; unsigned int thread_id; struct device_node *cpu; /* Build local fast map of CPUs */ for_each_possible_cpu(i) { cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id); cbe_thread_map[i].be_node = cbe_get_be_node(i); cbe_thread_map[i].thread_id = thread_id; } /* Find maps for each device tree CPU */ for_each_node_by_type(cpu, "cpu") { struct cbe_regs_map *map; unsigned int cbe_id; cbe_id = cbe_regs_map_count++; map = &cbe_regs_maps[cbe_id]; if (cbe_regs_map_count > MAX_CBE) { printk(KERN_ERR "cbe_regs: More BE chips than supported" "!\n"); cbe_regs_map_count--; of_node_put(cpu); return; } map->cpu_node = cpu; for_each_possible_cpu(i) { struct cbe_thread_map *thread = &cbe_thread_map[i]; if (thread->cpu_node == cpu) { thread->regs = map; thread->cbe_id = cbe_id; map->be_node = thread->be_node; cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); if(thread->thread_id == 0) cpumask_set_cpu(i, &cbe_first_online_cpu); } } cbe_fill_regs_map(map); } }
bcnice20/android-kernel-common
arch/powerpc/platforms/cell/cbe_regs.c
C
gpl-2.0
6,678
/* * NAND Flash Controller Device Driver * Copyright (c) 2009, Intel Corporation and its suppliers. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * */ #include <linux/fs.h> #include <linux/slab.h> #include "flash.h" #include "ffsdefs.h" #include "lld.h" #include "lld_nand.h" #if CMD_DMA #include "lld_cdma.h" #endif #define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize)) #define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \ DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize)) #define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\ BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK)) #define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK)) #define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\ BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK)) #define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK)) #if DEBUG_BNDRY void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno, char *filename) { if (chnl >= limit) printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, " "at %s:%d. Other info:%d. Aborting...\n", chnl, limit, filename, lineno, no); } /* static int globalmemsize; */ #endif static u16 FTL_Cache_If_Hit(u64 dwPageAddr); static int FTL_Cache_Read(u64 dwPageAddr); static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr, u16 cache_blk); static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr, u8 cache_blk, u16 flag); static int FTL_Cache_Write(void); static void FTL_Calculate_LRU(void); static u32 FTL_Get_Block_Index(u32 wBlockNum); static int FTL_Search_Block_Table_IN_Block(u32 BT_Block, u8 BT_Tag, u16 *Page); static int FTL_Read_Block_Table(void); static int FTL_Write_Block_Table(int wForce); static int FTL_Write_Block_Table_Data(void); static int FTL_Check_Block_Table(int wOldTable); static int FTL_Static_Wear_Leveling(void); static u32 FTL_Replace_Block_Table(void); static int FTL_Write_IN_Progress_Block_Table_Page(void); static u32 FTL_Get_Page_Num(u64 length); static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr); static u32 FTL_Replace_OneBlock(u32 wBlockNum, u32 wReplaceNum); static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect); static u32 FTL_Replace_MWBlock(void); static int FTL_Replace_Block(u64 blk_addr); static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX); struct device_info_tag DeviceInfo; struct flash_cache_tag Cache; static struct spectra_l2_cache_info cache_l2; static u8 *cache_l2_page_buf; static u8 *cache_l2_blk_buf; u8 *g_pBlockTable; u8 *g_pWearCounter; u16 *g_pReadCounter; u32 *g_pBTBlocks; static u16 g_wBlockTableOffset; static u32 g_wBlockTableIndex; static u8 g_cBlockTableStatus; static u8 *g_pTempBuf; static u8 *flag_check_blk_table; static u8 *tmp_buf_search_bt_in_block; static u8 *spare_buf_search_bt_in_block; static u8 *spare_buf_bt_search_bt_in_block; static u8 *tmp_buf1_read_blk_table; static u8 *tmp_buf2_read_blk_table; static u8 *flags_static_wear_leveling; static u8 *tmp_buf_write_blk_table_data; static u8 *tmp_buf_read_disturbance; u8 *buf_read_page_main_spare; u8 *buf_write_page_main_spare; u8 *buf_read_page_spare; u8 *buf_get_bad_block; #if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA) struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS]; struct flash_cache_tag cache_start_copy; #endif int g_wNumFreeBlocks; u8 g_SBDCmdIndex; static u8 *g_pIPF; static u8 bt_flag = FIRST_BT_ID; static u8 bt_block_changed; static u16 cache_block_to_write; static u8 last_erased = FIRST_BT_ID; static u8 GC_Called; static u8 BT_GC_Called; #if CMD_DMA #define COPY_BACK_BUF_NUM 10 static u8 ftl_cmd_cnt; /* Init value is 0 */ u8 *g_pBTDelta; u8 *g_pBTDelta_Free; u8 *g_pBTStartingCopy; u8 *g_pWearCounterCopy; u16 *g_pReadCounterCopy; u8 *g_pBlockTableCopies; u8 *g_pNextBlockTable; static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM]; static int cp_back_buf_idx; static u8 *g_temp_buf; #pragma pack(push, 1) #pragma pack(1) struct BTableChangesDelta { u8 ftl_cmd_cnt; u8 ValidFields; u16 g_wBlockTableOffset; u32 g_wBlockTableIndex; u32 BT_Index; u32 BT_Entry_Value; u32 WC_Index; u8 WC_Entry_Value; u32 RC_Index; u16 RC_Entry_Value; }; #pragma pack(pop) struct BTableChangesDelta *p_BTableChangesDelta; #endif #define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK) #define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK) #define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\ sizeof(u32)) #define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\ sizeof(u8)) #define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\ sizeof(u16)) #if SUPPORT_LARGE_BLOCKNUM #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\ sizeof(u8) * 3) #else #define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\ sizeof(u16)) #endif #define FTL_Get_WearCounter_Table_Flash_Size_Bytes \ FTL_Get_WearCounter_Table_Mem_Size_Bytes #define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \ FTL_Get_ReadCounter_Table_Mem_Size_Bytes static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void) { u32 byte_num; if (DeviceInfo.MLCDevice) { byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() + DeviceInfo.wDataBlockNum * sizeof(u8) + DeviceInfo.wDataBlockNum * sizeof(u16); } else { byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() + DeviceInfo.wDataBlockNum * sizeof(u8); } byte_num += 4 * sizeof(u8); return byte_num; } static u16 FTL_Get_Block_Table_Flash_Size_Pages(void) { return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes()); } static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx, u32 sizeTxed) { u32 wBytesCopied, blk_tbl_size, wBytes; u32 *pbt = (u32 *)g_pBlockTable; blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes(); for (wBytes = 0; (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) { #if SUPPORT_LARGE_BLOCKNUM flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3] >> (((wBytes + sizeTxed) % 3) ? ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF; #else flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2] >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF; #endif } sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0; blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes(); wBytesCopied = wBytes; wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ? (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed); memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes); sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0; if (DeviceInfo.MLCDevice) { blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes(); wBytesCopied += wBytes; for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) flashBuf[wBytes + wBytesCopied] = (g_pReadCounter[(wBytes + sizeTxed) / 2] >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF; } return wBytesCopied + wBytes; } static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf, u32 sizeToTx, u32 sizeTxed) { u32 wBytesCopied, blk_tbl_size, wBytes; u32 *pbt = (u32 *)g_pBlockTable; blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes(); for (wBytes = 0; (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) { #if SUPPORT_LARGE_BLOCKNUM if (!((wBytes + sizeTxed) % 3)) pbt[(wBytes + sizeTxed) / 3] = 0; pbt[(wBytes + sizeTxed) / 3] |= (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ? ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)); #else if (!((wBytes + sizeTxed) % 2)) pbt[(wBytes + sizeTxed) / 2] = 0; pbt[(wBytes + sizeTxed) / 2] |= (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ? 0 : 8)); #endif } sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0; blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes(); wBytesCopied = wBytes; wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ? (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed); memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes); sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0; if (DeviceInfo.MLCDevice) { wBytesCopied += wBytes; blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes(); for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) { if (((wBytes + sizeTxed) % 2)) g_pReadCounter[(wBytes + sizeTxed) / 2] = 0; g_pReadCounter[(wBytes + sizeTxed) / 2] |= (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ? 0 : 8)); } } return wBytesCopied+wBytes; } static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag) { int i; for (i = 0; i < BTSIG_BYTES; i++) buf[BTSIG_OFFSET + i] = ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) % (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID; return PASS; } static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray) { static u8 tag[BTSIG_BYTES >> 1]; int i, j, k, tagi, tagtemp, status; *tagarray = (u8 *)tag; tagi = 0; for (i = 0; i < (BTSIG_BYTES - 1); i++) { for (j = i + 1; (j < BTSIG_BYTES) && (tagi < (BTSIG_BYTES >> 1)); j++) { tagtemp = buf[BTSIG_OFFSET + j] - buf[BTSIG_OFFSET + i]; if (tagtemp && !(tagtemp % BTSIG_DELTA)) { tagtemp = (buf[BTSIG_OFFSET + i] + (1 + LAST_BT_ID - FIRST_BT_ID) - (i * BTSIG_DELTA)) % (1 + LAST_BT_ID - FIRST_BT_ID); status = FAIL; for (k = 0; k < tagi; k++) { if (tagtemp == tag[k]) status = PASS; } if (status == FAIL) { tag[tagi++] = tagtemp; i = (j == (i + 1)) ? i + 1 : i; j = (j == (i + 1)) ? i + 1 : i; } } } } return tagi; } static int FTL_Execute_SPL_Recovery(void) { u32 j, block, blks; u32 *pbt = (u32 *)g_pBlockTable; int ret; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock; for (j = 0; j <= blks; j++) { block = (pbt[j]); if (((block & BAD_BLOCK) != BAD_BLOCK) && ((block & SPARE_BLOCK) == SPARE_BLOCK)) { ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK); if (FAIL == ret) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d " "generated!\n", __FILE__, __LINE__, __func__, (int)(block & ~BAD_BLOCK)); MARK_BLOCK_AS_BAD(pbt[j]); } } } return PASS; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_IdentifyDevice * Inputs: pointer to identify data structure * Outputs: PASS / FAIL * Description: the identify data structure is filled in with * information for the block driver. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data) { nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); dev_data->NumBlocks = DeviceInfo.wTotalBlocks; dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock; dev_data->PageDataSize = DeviceInfo.wPageDataSize; dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector; dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum; return PASS; } /* ..... */ static int allocate_memory(void) { u32 block_table_size, page_size, block_size, mem_size; u32 total_bytes = 0; int i; #if CMD_DMA int j; #endif nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); page_size = DeviceInfo.wPageSize; block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize; block_table_size = DeviceInfo.wDataBlockNum * (sizeof(u32) + sizeof(u8) + sizeof(u16)); block_table_size += (DeviceInfo.wPageDataSize - (block_table_size % DeviceInfo.wPageDataSize)) % DeviceInfo.wPageDataSize; /* Malloc memory for block tables */ g_pBlockTable = kzalloc(block_table_size, GFP_ATOMIC); if (!g_pBlockTable) goto block_table_fail; total_bytes += block_table_size; g_pWearCounter = (u8 *)(g_pBlockTable + DeviceInfo.wDataBlockNum * sizeof(u32)); if (DeviceInfo.MLCDevice) g_pReadCounter = (u16 *)(g_pBlockTable + DeviceInfo.wDataBlockNum * (sizeof(u32) + sizeof(u8))); /* Malloc memory and init for cache items */ for (i = 0; i < CACHE_ITEM_NUM; i++) { Cache.array[i].address = NAND_CACHE_INIT_ADDR; Cache.array[i].use_cnt = 0; Cache.array[i].changed = CLEAR; Cache.array[i].buf = kzalloc(Cache.cache_item_size, GFP_ATOMIC); if (!Cache.array[i].buf) goto cache_item_fail; total_bytes += Cache.cache_item_size; } /* Malloc memory for IPF */ g_pIPF = kzalloc(page_size, GFP_ATOMIC); if (!g_pIPF) goto ipf_fail; total_bytes += page_size; /* Malloc memory for data merging during Level2 Cache flush */ cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC); if (!cache_l2_page_buf) goto cache_l2_page_buf_fail; memset(cache_l2_page_buf, 0xff, page_size); total_bytes += page_size; cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC); if (!cache_l2_blk_buf) goto cache_l2_blk_buf_fail; memset(cache_l2_blk_buf, 0xff, block_size); total_bytes += block_size; /* Malloc memory for temp buffer */ g_pTempBuf = kzalloc(Cache.cache_item_size, GFP_ATOMIC); if (!g_pTempBuf) goto Temp_buf_fail; total_bytes += Cache.cache_item_size; /* Malloc memory for block table blocks */ mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32); g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC); if (!g_pBTBlocks) goto bt_blocks_fail; memset(g_pBTBlocks, 0xff, mem_size); total_bytes += mem_size; /* Malloc memory for function FTL_Check_Block_Table */ flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC); if (!flag_check_blk_table) goto flag_check_blk_table_fail; total_bytes += DeviceInfo.wDataBlockNum; /* Malloc memory for function FTL_Search_Block_Table_IN_Block */ tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC); if (!tmp_buf_search_bt_in_block) goto tmp_buf_search_bt_in_block_fail; memset(tmp_buf_search_bt_in_block, 0xff, page_size); total_bytes += page_size; mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize; spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC); if (!spare_buf_search_bt_in_block) goto spare_buf_search_bt_in_block_fail; memset(spare_buf_search_bt_in_block, 0xff, mem_size); total_bytes += mem_size; spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC); if (!spare_buf_bt_search_bt_in_block) goto spare_buf_bt_search_bt_in_block_fail; memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size); total_bytes += mem_size; /* Malloc memory for function FTL_Read_Block_Table */ tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC); if (!tmp_buf1_read_blk_table) goto tmp_buf1_read_blk_table_fail; memset(tmp_buf1_read_blk_table, 0xff, page_size); total_bytes += page_size; tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC); if (!tmp_buf2_read_blk_table) goto tmp_buf2_read_blk_table_fail; memset(tmp_buf2_read_blk_table, 0xff, page_size); total_bytes += page_size; /* Malloc memory for function FTL_Static_Wear_Leveling */ flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC); if (!flags_static_wear_leveling) goto flags_static_wear_leveling_fail; total_bytes += DeviceInfo.wDataBlockNum; /* Malloc memory for function FTL_Write_Block_Table_Data */ if (FTL_Get_Block_Table_Flash_Size_Pages() > 3) mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() - 2 * DeviceInfo.wPageSize; else mem_size = DeviceInfo.wPageSize; tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC); if (!tmp_buf_write_blk_table_data) goto tmp_buf_write_blk_table_data_fail; memset(tmp_buf_write_blk_table_data, 0xff, mem_size); total_bytes += mem_size; /* Malloc memory for function FTL_Read_Disturbance */ tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC); if (!tmp_buf_read_disturbance) goto tmp_buf_read_disturbance_fail; memset(tmp_buf_read_disturbance, 0xff, block_size); total_bytes += block_size; /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */ buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC); if (!buf_read_page_main_spare) goto buf_read_page_main_spare_fail; total_bytes += DeviceInfo.wPageSize; /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */ buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC); if (!buf_write_page_main_spare) goto buf_write_page_main_spare_fail; total_bytes += DeviceInfo.wPageSize; /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */ buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC); if (!buf_read_page_spare) goto buf_read_page_spare_fail; memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize); total_bytes += DeviceInfo.wPageSpareSize; /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */ buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC); if (!buf_get_bad_block) goto buf_get_bad_block_fail; memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize); total_bytes += DeviceInfo.wPageSpareSize; #if CMD_DMA g_temp_buf = kmalloc(block_size, GFP_ATOMIC); if (!g_temp_buf) goto temp_buf_fail; memset(g_temp_buf, 0xff, block_size); total_bytes += block_size; /* Malloc memory for copy of block table used in CDMA mode */ g_pBTStartingCopy = kzalloc(block_table_size, GFP_ATOMIC); if (!g_pBTStartingCopy) goto bt_starting_copy; total_bytes += block_table_size; g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy + DeviceInfo.wDataBlockNum * sizeof(u32)); if (DeviceInfo.MLCDevice) g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy + DeviceInfo.wDataBlockNum * (sizeof(u32) + sizeof(u8))); /* Malloc memory for block table copies */ mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) + 5 * DeviceInfo.wDataBlockNum * sizeof(u8); if (DeviceInfo.MLCDevice) mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16); g_pBlockTableCopies = kzalloc(mem_size, GFP_ATOMIC); if (!g_pBlockTableCopies) goto blk_table_copies_fail; total_bytes += mem_size; g_pNextBlockTable = g_pBlockTableCopies; /* Malloc memory for Block Table Delta */ mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta); g_pBTDelta = kzalloc(mem_size, GFP_ATOMIC); if (!g_pBTDelta) goto bt_delta_fail; total_bytes += mem_size; g_pBTDelta_Free = g_pBTDelta; /* Malloc memory for Copy Back Buffers */ for (j = 0; j < COPY_BACK_BUF_NUM; j++) { cp_back_buf_copies[j] = kzalloc(block_size, GFP_ATOMIC); if (!cp_back_buf_copies[j]) goto cp_back_buf_copies_fail; total_bytes += block_size; } cp_back_buf_idx = 0; /* Malloc memory for pending commands list */ mem_size = sizeof(struct pending_cmd) * MAX_DESCS; info.pcmds = kzalloc(mem_size, GFP_KERNEL); if (!info.pcmds) goto pending_cmds_buf_fail; total_bytes += mem_size; /* Malloc memory for CDMA descripter table */ mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS; info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL); if (!info.cdma_desc_buf) goto cdma_desc_buf_fail; total_bytes += mem_size; /* Malloc memory for Memcpy descripter table */ mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS; info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL); if (!info.memcp_desc_buf) goto memcp_desc_buf_fail; total_bytes += mem_size; #endif nand_dbg_print(NAND_DBG_WARN, "Total memory allocated in FTL layer: %d\n", total_bytes); return PASS; #if CMD_DMA memcp_desc_buf_fail: kfree(info.cdma_desc_buf); cdma_desc_buf_fail: kfree(info.pcmds); pending_cmds_buf_fail: cp_back_buf_copies_fail: j--; for (; j >= 0; j--) kfree(cp_back_buf_copies[j]); kfree(g_pBTDelta); bt_delta_fail: kfree(g_pBlockTableCopies); blk_table_copies_fail: kfree(g_pBTStartingCopy); bt_starting_copy: kfree(g_temp_buf); temp_buf_fail: kfree(buf_get_bad_block); #endif buf_get_bad_block_fail: kfree(buf_read_page_spare); buf_read_page_spare_fail: kfree(buf_write_page_main_spare); buf_write_page_main_spare_fail: kfree(buf_read_page_main_spare); buf_read_page_main_spare_fail: kfree(tmp_buf_read_disturbance); tmp_buf_read_disturbance_fail: kfree(tmp_buf_write_blk_table_data); tmp_buf_write_blk_table_data_fail: kfree(flags_static_wear_leveling); flags_static_wear_leveling_fail: kfree(tmp_buf2_read_blk_table); tmp_buf2_read_blk_table_fail: kfree(tmp_buf1_read_blk_table); tmp_buf1_read_blk_table_fail: kfree(spare_buf_bt_search_bt_in_block); spare_buf_bt_search_bt_in_block_fail: kfree(spare_buf_search_bt_in_block); spare_buf_search_bt_in_block_fail: kfree(tmp_buf_search_bt_in_block); tmp_buf_search_bt_in_block_fail: kfree(flag_check_blk_table); flag_check_blk_table_fail: kfree(g_pBTBlocks); bt_blocks_fail: kfree(g_pTempBuf); Temp_buf_fail: kfree(cache_l2_blk_buf); cache_l2_blk_buf_fail: kfree(cache_l2_page_buf); cache_l2_page_buf_fail: kfree(g_pIPF); ipf_fail: cache_item_fail: i--; for (; i >= 0; i--) kfree(Cache.array[i].buf); kfree(g_pBlockTable); block_table_fail: printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n", __FILE__, __LINE__); return -ENOMEM; } /* .... */ static int free_memory(void) { int i; #if CMD_DMA kfree(info.memcp_desc_buf); kfree(info.cdma_desc_buf); kfree(info.pcmds); for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--) kfree(cp_back_buf_copies[i]); kfree(g_pBTDelta); kfree(g_pBlockTableCopies); kfree(g_pBTStartingCopy); kfree(g_temp_buf); kfree(buf_get_bad_block); #endif kfree(buf_read_page_spare); kfree(buf_write_page_main_spare); kfree(buf_read_page_main_spare); kfree(tmp_buf_read_disturbance); kfree(tmp_buf_write_blk_table_data); kfree(flags_static_wear_leveling); kfree(tmp_buf2_read_blk_table); kfree(tmp_buf1_read_blk_table); kfree(spare_buf_bt_search_bt_in_block); kfree(spare_buf_search_bt_in_block); kfree(tmp_buf_search_bt_in_block); kfree(flag_check_blk_table); kfree(g_pBTBlocks); kfree(g_pTempBuf); kfree(g_pIPF); for (i = CACHE_ITEM_NUM - 1; i >= 0; i--) kfree(Cache.array[i].buf); kfree(g_pBlockTable); return 0; } static void dump_cache_l2_table(void) { struct list_head *p; struct spectra_l2_cache_list *pnd; int n; n = 0; list_for_each(p, &cache_l2.table.list) { pnd = list_entry(p, struct spectra_l2_cache_list, list); nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num); /* for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) { if (pnd->pages_array[i] != MAX_U32_VALUE) nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]); } */ n++; } } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Init * Inputs: none * Outputs: PASS=0 / FAIL=1 * Description: allocates the memory for cache array, * important data structures * clears the cache array * reads the block table from flash into array *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Init(void) { int i; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); Cache.pages_per_item = 1; Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize; if (allocate_memory() != PASS) return FAIL; #if CMD_DMA #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE memcpy((void *)&cache_start_copy, (void *)&Cache, sizeof(struct flash_cache_tag)); memset((void *)&int_cache, -1, sizeof(struct flash_cache_delta_list_tag) * (MAX_CHANS + MAX_DESCS)); #endif ftl_cmd_cnt = 0; #endif if (FTL_Read_Block_Table() != PASS) return FAIL; /* Init the Level2 Cache data structure */ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) cache_l2.blk_array[i] = MAX_U32_VALUE; cache_l2.cur_blk_idx = 0; cache_l2.cur_page_num = 0; INIT_LIST_HEAD(&cache_l2.table.list); cache_l2.table.logical_blk_num = MAX_U32_VALUE; dump_cache_l2_table(); return 0; } #if CMD_DMA #if 0 static void save_blk_table_changes(u16 idx) { u8 ftl_cmd; u32 *pbt = (u32 *)g_pBTStartingCopy; #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE u16 id; u8 cache_blks; id = idx - MAX_CHANS; if (int_cache[id].item != -1) { cache_blks = int_cache[id].item; cache_start_copy.array[cache_blks].address = int_cache[id].cache.address; cache_start_copy.array[cache_blks].changed = int_cache[id].cache.changed; } #endif ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; while (ftl_cmd <= PendingCMD[idx].Tag) { if (p_BTableChangesDelta->ValidFields == 0x01) { g_wBlockTableOffset = p_BTableChangesDelta->g_wBlockTableOffset; } else if (p_BTableChangesDelta->ValidFields == 0x0C) { pbt[p_BTableChangesDelta->BT_Index] = p_BTableChangesDelta->BT_Entry_Value; debug_boundary_error((( p_BTableChangesDelta->BT_Index)), DeviceInfo.wDataBlockNum, 0); } else if (p_BTableChangesDelta->ValidFields == 0x03) { g_wBlockTableOffset = p_BTableChangesDelta->g_wBlockTableOffset; g_wBlockTableIndex = p_BTableChangesDelta->g_wBlockTableIndex; } else if (p_BTableChangesDelta->ValidFields == 0x30) { g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] = p_BTableChangesDelta->WC_Entry_Value; } else if ((DeviceInfo.MLCDevice) && (p_BTableChangesDelta->ValidFields == 0xC0)) { g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] = p_BTableChangesDelta->RC_Entry_Value; nand_dbg_print(NAND_DBG_DEBUG, "In event status setting read counter " "GLOB_ftl_cmd_cnt %u Count %u Index %u\n", ftl_cmd, p_BTableChangesDelta->RC_Entry_Value, (unsigned int)p_BTableChangesDelta->RC_Index); } else { nand_dbg_print(NAND_DBG_DEBUG, "This should never occur \n"); } p_BTableChangesDelta += 1; ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; } } static void discard_cmds(u16 n) { u32 *pbt = (u32 *)g_pBTStartingCopy; u8 ftl_cmd; unsigned long k; #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE u8 cache_blks; u16 id; #endif if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) || (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) { for (k = 0; k < DeviceInfo.wDataBlockNum; k++) { if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK))) MARK_BLK_AS_DISCARD(pbt[k]); } } ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; while (ftl_cmd <= PendingCMD[n].Tag) { p_BTableChangesDelta += 1; ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; } #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE id = n - MAX_CHANS; if (int_cache[id].item != -1) { cache_blks = int_cache[id].item; if (PendingCMD[n].CMD == MEMCOPY_CMD) { if ((cache_start_copy.array[cache_blks].buf <= PendingCMD[n].DataDestAddr) && ((cache_start_copy.array[cache_blks].buf + Cache.cache_item_size) > PendingCMD[n].DataDestAddr)) { cache_start_copy.array[cache_blks].address = NAND_CACHE_INIT_ADDR; cache_start_copy.array[cache_blks].use_cnt = 0; cache_start_copy.array[cache_blks].changed = CLEAR; } } else { cache_start_copy.array[cache_blks].address = int_cache[id].cache.address; cache_start_copy.array[cache_blks].changed = int_cache[id].cache.changed; } } #endif } static void process_cmd_pass(int *first_failed_cmd, u16 idx) { if (0 == *first_failed_cmd) save_blk_table_changes(idx); else discard_cmds(idx); } static void process_cmd_fail_abort(int *first_failed_cmd, u16 idx, int event) { u32 *pbt = (u32 *)g_pBTStartingCopy; u8 ftl_cmd; unsigned long i; int erase_fail, program_fail; #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE u8 cache_blks; u16 id; #endif if (0 == *first_failed_cmd) *first_failed_cmd = PendingCMD[idx].SBDCmdIndex; nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occurred " "while executing %u Command %u accesing Block %u\n", (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt, PendingCMD[idx].CMD, (unsigned int)PendingCMD[idx].Block); ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; while (ftl_cmd <= PendingCMD[idx].Tag) { p_BTableChangesDelta += 1; ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; } #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE id = idx - MAX_CHANS; if (int_cache[id].item != -1) { cache_blks = int_cache[id].item; if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) { cache_start_copy.array[cache_blks].address = int_cache[id].cache.address; cache_start_copy.array[cache_blks].changed = SET; } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) { cache_start_copy.array[cache_blks].address = NAND_CACHE_INIT_ADDR; cache_start_copy.array[cache_blks].use_cnt = 0; cache_start_copy.array[cache_blks].changed = CLEAR; } else if (PendingCMD[idx].CMD == ERASE_CMD) { /* ? */ } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) { /* ? */ } } #endif erase_fail = (event == EVENT_ERASE_FAILURE) && (PendingCMD[idx].CMD == ERASE_CMD); program_fail = (event == EVENT_PROGRAM_FAILURE) && ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) || (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD)); if (erase_fail || program_fail) { for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if (PendingCMD[idx].Block == (pbt[i] & (~BAD_BLOCK))) MARK_BLOCK_AS_BAD(pbt[i]); } } } static void process_cmd(int *first_failed_cmd, u16 idx, int event) { u8 ftl_cmd; int cmd_match = 0; if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag) cmd_match = 1; if (PendingCMD[idx].Status == CMD_PASS) { process_cmd_pass(first_failed_cmd, idx); } else if ((PendingCMD[idx].Status == CMD_FAIL) || (PendingCMD[idx].Status == CMD_ABORT)) { process_cmd_fail_abort(first_failed_cmd, idx, event); } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) && PendingCMD[idx].Tag) { nand_dbg_print(NAND_DBG_DEBUG, " Command no. %hu is not executed\n", (unsigned int)PendingCMD[idx].Tag); ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; while (ftl_cmd <= PendingCMD[idx].Tag) { p_BTableChangesDelta += 1; ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt; } } } #endif static void process_cmd(int *first_failed_cmd, u16 idx, int event) { printk(KERN_ERR "temporary workaround function. " "Should not be called! \n"); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Event_Status * Inputs: none * Outputs: Event Code * Description: It is called by SBD after hardware interrupt signalling * completion of commands chain * It does following things * get event status from LLD * analyze command chain status * determine last command executed * analyze results * rebuild the block table in case of uncorrectable error * return event code *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Event_Status(int *first_failed_cmd) { int event_code = PASS; u16 i_P; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); *first_failed_cmd = 0; event_code = GLOB_LLD_Event_Status(); switch (event_code) { case EVENT_PASS: nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n"); break; case EVENT_UNCORRECTABLE_DATA_ERROR: nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n"); break; case EVENT_PROGRAM_FAILURE: case EVENT_ERASE_FAILURE: nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. " "Event code: 0x%x\n", event_code); p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta; for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS); i_P++) process_cmd(first_failed_cmd, i_P, event_code); memcpy(g_pBlockTable, g_pBTStartingCopy, DeviceInfo.wDataBlockNum * sizeof(u32)); memcpy(g_pWearCounter, g_pWearCounterCopy, DeviceInfo.wDataBlockNum * sizeof(u8)); if (DeviceInfo.MLCDevice) memcpy(g_pReadCounter, g_pReadCounterCopy, DeviceInfo.wDataBlockNum * sizeof(u16)); #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE memcpy((void *)&Cache, (void *)&cache_start_copy, sizeof(struct flash_cache_tag)); memset((void *)&int_cache, -1, sizeof(struct flash_cache_delta_list_tag) * (MAX_DESCS + MAX_CHANS)); #endif break; default: nand_dbg_print(NAND_DBG_WARN, "Handling unexpected event code - 0x%x\n", event_code); event_code = ERR; break; } memcpy(g_pBTStartingCopy, g_pBlockTable, DeviceInfo.wDataBlockNum * sizeof(u32)); memcpy(g_pWearCounterCopy, g_pWearCounter, DeviceInfo.wDataBlockNum * sizeof(u8)); if (DeviceInfo.MLCDevice) memcpy(g_pReadCounterCopy, g_pReadCounter, DeviceInfo.wDataBlockNum * sizeof(u16)); g_pBTDelta_Free = g_pBTDelta; ftl_cmd_cnt = 0; g_pNextBlockTable = g_pBlockTableCopies; cp_back_buf_idx = 0; #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE memcpy((void *)&cache_start_copy, (void *)&Cache, sizeof(struct flash_cache_tag)); memset((void *)&int_cache, -1, sizeof(struct flash_cache_delta_list_tag) * (MAX_DESCS + MAX_CHANS)); #endif return event_code; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: glob_ftl_execute_cmds * Inputs: none * Outputs: none * Description: pass thru to LLD ***************************************************************/ u16 glob_ftl_execute_cmds(void) { nand_dbg_print(NAND_DBG_TRACE, "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n", (unsigned int)ftl_cmd_cnt); g_SBDCmdIndex = 0; return glob_lld_execute_cmds(); } #endif #if !CMD_DMA /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Read Immediate * Inputs: pointer to data * address of data * Outputs: PASS / FAIL * Description: Reads one page of data into RAM directly from flash without * using or disturbing cache.It is assumed this function is called * with CMD-DMA disabled. *****************************************************************/ int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr) { int wResult = FAIL; u32 Block; u16 Page; u32 phy_blk; u32 *pbt = (u32 *)g_pBlockTable; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); Block = BLK_FROM_ADDR(addr); Page = PAGE_FROM_ADDR(addr, Block); if (!IS_SPARE_BLOCK(Block)) return FAIL; phy_blk = pbt[Block]; wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1); if (DeviceInfo.MLCDevice) { g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++; if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock] >= MAX_READ_COUNTER) FTL_Read_Disturbance(phy_blk); if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } } return wResult; } #endif #ifdef SUPPORT_BIG_ENDIAN /********************************************************************* * Function: FTL_Invert_Block_Table * Inputs: none * Outputs: none * Description: Re-format the block table in ram based on BIG_ENDIAN and * LARGE_BLOCKNUM if necessary **********************************************************************/ static void FTL_Invert_Block_Table(void) { u32 i; u32 *pbt = (u32 *)g_pBlockTable; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); #ifdef SUPPORT_LARGE_BLOCKNUM for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { pbt[i] = INVERTUINT32(pbt[i]); g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]); } #else for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { pbt[i] = INVERTUINT16(pbt[i]); g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]); } #endif } #endif /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Flash_Init * Inputs: none * Outputs: PASS=0 / FAIL=0x01 (based on read ID) * Description: The flash controller is initialized * The flash device is reset * Perform a flash READ ID command to confirm that a * valid device is attached and active. * The DeviceInfo structure gets filled in *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Flash_Init(void) { int status = FAIL; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); g_SBDCmdIndex = 0; status = GLOB_LLD_Flash_Init(); return status; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Inputs: none * Outputs: PASS=0 / FAIL=0x01 (based on read ID) * Description: The flash controller is released *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Flash_Release(void) { nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); return GLOB_LLD_Flash_Release(); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Cache_Release * Inputs: none * Outputs: none * Description: release all allocated memory in GLOB_FTL_Init * (allocated in GLOB_FTL_Init) *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ void GLOB_FTL_Cache_Release(void) { nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); free_memory(); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_If_Hit * Inputs: Page Address * Outputs: Block number/UNHIT BLOCK * Description: Determines if the addressed page is in cache *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u16 FTL_Cache_If_Hit(u64 page_addr) { u16 item; u64 addr; int i; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); item = UNHIT_CACHE_ITEM; for (i = 0; i < CACHE_ITEM_NUM; i++) { addr = Cache.array[i].address; if ((page_addr >= addr) && (page_addr < (addr + Cache.cache_item_size))) { item = i; break; } } return item; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Calculate_LRU * Inputs: None * Outputs: None * Description: Calculate the least recently block in a cache and record its * index in LRU field. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static void FTL_Calculate_LRU(void) { u16 i, bCurrentLRU, bTempCount; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); bCurrentLRU = 0; bTempCount = MAX_WORD_VALUE; for (i = 0; i < CACHE_ITEM_NUM; i++) { if (Cache.array[i].use_cnt < bTempCount) { bCurrentLRU = i; bTempCount = Cache.array[i].use_cnt; } } Cache.LRU = bCurrentLRU; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Read_Page * Inputs: pointer to read buffer, logical address and cache item number * Outputs: None * Description: Read the page from the cached block addressed by blocknumber *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item) { u8 *start_addr; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); start_addr = Cache.array[cache_item].buf; start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >> DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize); #if CMD_DMA GLOB_LLD_MemCopy_CMD(data_buf, start_addr, DeviceInfo.wPageDataSize, 0); ftl_cmd_cnt++; #else memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize); #endif if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE) Cache.array[cache_item].use_cnt++; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Read_All * Inputs: pointer to read buffer,block address * Outputs: PASS=0 / FAIL =1 * Description: It reads pages in cache *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr) { int wResult = PASS; u32 Block; u32 lba; u16 Page; u16 PageCount; u32 *pbt = (u32 *)g_pBlockTable; u32 i; Block = BLK_FROM_ADDR(phy_addr); Page = PAGE_FROM_ADDR(phy_addr, Block); PageCount = Cache.pages_per_item; nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s, Block: 0x%x\n", __FILE__, __LINE__, __func__, Block); lba = 0xffffffff; for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if ((pbt[i] & (~BAD_BLOCK)) == Block) { lba = i; if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) || IS_DISCARDED_BLOCK(i)) { /* Add by yunpeng -2008.12.3 */ #if CMD_DMA GLOB_LLD_MemCopy_CMD(pData, g_temp_buf, PageCount * DeviceInfo.wPageDataSize, 0); ftl_cmd_cnt++; #else memset(pData, 0xFF, PageCount * DeviceInfo.wPageDataSize); #endif return wResult; } else { continue; /* break ?? */ } } } if (0xffffffff == lba) printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n"); #if CMD_DMA wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page, PageCount, LLD_CMD_FLAG_MODE_CDMA); if (DeviceInfo.MLCDevice) { g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++; nand_dbg_print(NAND_DBG_DEBUG, "Read Counter modified in ftl_cmd_cnt %u" " Block %u Counter%u\n", ftl_cmd_cnt, (unsigned int)Block, g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]); p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->RC_Index = Block - DeviceInfo.wSpectraStartBlock; p_BTableChangesDelta->RC_Entry_Value = g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]; p_BTableChangesDelta->ValidFields = 0xC0; ftl_cmd_cnt++; if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >= MAX_READ_COUNTER) FTL_Read_Disturbance(Block); if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } } else { ftl_cmd_cnt++; } #else wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount); if (wResult == FAIL) return wResult; if (DeviceInfo.MLCDevice) { g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++; if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >= MAX_READ_COUNTER) FTL_Read_Disturbance(Block); if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } } #endif return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Write_All * Inputs: pointer to cache in sys memory * address of free block in flash * Outputs: PASS=0 / FAIL=1 * Description: writes all the pages of the block in cache to flash * * NOTE:need to make sure this works ok when cache is limited * to a partial block. This is where copy-back would be * activated. This would require knowing which pages in the * cached block are clean/dirty.Right now we only know if * the whole block is clean/dirty. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr) { u16 wResult = PASS; u32 Block; u16 Page; u16 PageCount; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written " "on %d\n", cache_block_to_write, (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize)); Block = BLK_FROM_ADDR(blk_addr); Page = PAGE_FROM_ADDR(blk_addr, Block); PageCount = Cache.pages_per_item; #if CMD_DMA if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData, Block, Page, PageCount)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d generated! " "Need Bad Block replacing.\n", __FILE__, __LINE__, __func__, Block); wResult = FAIL; } ftl_cmd_cnt++; #else if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s," " Line %d, Function %s, new Bad Block %d generated!" "Need Bad Block replacing.\n", __FILE__, __LINE__, __func__, Block); wResult = FAIL; } #endif return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Copy_Block * Inputs: source block address * Destination block address * Outputs: PASS=0 / FAIL=1 * Description: used only for static wear leveling to move the block * containing static data to new blocks(more worn) *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr) { int i, r1, r2, wResult = PASS; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) { r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr + i * DeviceInfo.wPageDataSize); r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr + i * DeviceInfo.wPageDataSize); if ((ERR == r1) || (FAIL == r2)) { wResult = FAIL; break; } } return wResult; } /* Search the block table to find out the least wear block and then return it */ static u32 find_least_worn_blk_for_l2_cache(void) { int i; u32 *pbt = (u32 *)g_pBlockTable; u8 least_wear_cnt = MAX_BYTE_VALUE; u32 least_wear_blk_idx = MAX_U32_VALUE; u32 phy_idx; for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if (IS_SPARE_BLOCK(i)) { phy_idx = (u32)((~BAD_BLOCK) & pbt[i]); if (phy_idx > DeviceInfo.wSpectraEndBlock) printk(KERN_ERR "find_least_worn_blk_for_l2_cache: " "Too big phy block num (%d)\n", phy_idx); if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) { least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock]; least_wear_blk_idx = i; } } } nand_dbg_print(NAND_DBG_WARN, "find_least_worn_blk_for_l2_cache: " "find block %d with least worn counter (%d)\n", least_wear_blk_idx, least_wear_cnt); return least_wear_blk_idx; } /* Get blocks for Level2 Cache */ static int get_l2_cache_blks(void) { int n; u32 blk; u32 *pbt = (u32 *)g_pBlockTable; for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) { blk = find_least_worn_blk_for_l2_cache(); if (blk >= DeviceInfo.wDataBlockNum) { nand_dbg_print(NAND_DBG_WARN, "find_least_worn_blk_for_l2_cache: " "No enough free NAND blocks (n: %d) for L2 Cache!\n", n); return FAIL; } /* Tag the free block as discard in block table */ pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK; /* Add the free block to the L2 Cache block array */ cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK); } return PASS; } static int erase_l2_cache_blocks(void) { int i, ret = PASS; u32 pblk, lblk = BAD_BLOCK; u64 addr; u32 *pbt = (u32 *)g_pBlockTable; nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) { pblk = cache_l2.blk_array[i]; /* If the L2 cache block is invalid, then just skip it */ if (MAX_U32_VALUE == pblk) continue; BUG_ON(pblk > DeviceInfo.wSpectraEndBlock); addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize; if (PASS == GLOB_FTL_Block_Erase(addr)) { /* Get logical block number of the erased block */ lblk = FTL_Get_Block_Index(pblk); BUG_ON(BAD_BLOCK == lblk); /* Tag it as free in the block table */ pbt[lblk] &= (u32)(~DISCARD_BLOCK); pbt[lblk] |= (u32)(SPARE_BLOCK); } else { MARK_BLOCK_AS_BAD(pbt[lblk]); ret = ERR; } } return ret; } /* * Merge the valid data page in the L2 cache blocks into NAND. */ static int flush_l2_cache(void) { struct list_head *p; struct spectra_l2_cache_list *pnd, *tmp_pnd; u32 *pbt = (u32 *)g_pBlockTable; u32 phy_blk, l2_blk; u64 addr; u16 l2_page; int i, ret = PASS; nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (list_empty(&cache_l2.table.list)) /* No data to flush */ return ret; //dump_cache_l2_table(); if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } list_for_each(p, &cache_l2.table.list) { pnd = list_entry(p, struct spectra_l2_cache_list, list); if (IS_SPARE_BLOCK(pnd->logical_blk_num) || IS_BAD_BLOCK(pnd->logical_blk_num) || IS_DISCARDED_BLOCK(pnd->logical_blk_num)) { nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__); memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize); } else { nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__); phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK); ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock); if (ret == FAIL) { printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__); } } for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) { if (pnd->pages_array[i] != MAX_U32_VALUE) { l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff]; l2_page = pnd->pages_array[i] & 0xffff; ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1); if (ret == FAIL) { printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__); } memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize); } } /* Find a free block and tag the original block as discarded */ addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize; ret = FTL_Replace_Block(addr); if (ret == FAIL) { printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__); } /* Write back the updated data into NAND */ phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK); if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) { nand_dbg_print(NAND_DBG_WARN, "Program NAND block %d fail in %s, Line %d\n", phy_blk, __FILE__, __LINE__); /* This may not be really a bad block. So just tag it as discarded. */ /* Then it has a chance to be erased when garbage collection. */ /* If it is really bad, then the erase will fail and it will be marked */ /* as bad then. Otherwise it will be marked as free and can be used again */ MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]); /* Find another free block and write it again */ FTL_Replace_Block(addr); phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK); if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) { printk(KERN_ERR "Failed to write back block %d when flush L2 cache." "Some data will be lost!\n", phy_blk); MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]); } } else { /* tag the new free block as used block */ pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK); } } /* Destroy the L2 Cache table and free the memory of all nodes */ list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) { list_del(&pnd->list); kfree(pnd); } /* Erase discard L2 cache blocks */ if (erase_l2_cache_blocks() != PASS) nand_dbg_print(NAND_DBG_WARN, " Erase L2 cache blocks error in %s, Line %d\n", __FILE__, __LINE__); /* Init the Level2 Cache data structure */ for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) cache_l2.blk_array[i] = MAX_U32_VALUE; cache_l2.cur_blk_idx = 0; cache_l2.cur_page_num = 0; INIT_LIST_HEAD(&cache_l2.table.list); cache_l2.table.logical_blk_num = MAX_U32_VALUE; return ret; } /* * Write back a changed victim cache item to the Level2 Cache * and update the L2 Cache table to map the change. * If the L2 Cache is full, then start to do the L2 Cache flush. */ static int write_back_to_l2_cache(u8 *buf, u64 logical_addr) { u32 logical_blk_num; u16 logical_page_num; struct list_head *p; struct spectra_l2_cache_list *pnd, *pnd_new; u32 node_size; int i, found; nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); /* * If Level2 Cache table is empty, then it means either: * 1. This is the first time that the function called after FTL_init * or * 2. The Level2 Cache has just been flushed * * So, 'steal' some free blocks from NAND for L2 Cache using * by just mask them as discard in the block table */ if (list_empty(&cache_l2.table.list)) { BUG_ON(cache_l2.cur_blk_idx != 0); BUG_ON(cache_l2.cur_page_num!= 0); BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE); if (FAIL == get_l2_cache_blks()) { GLOB_FTL_Garbage_Collection(); if (FAIL == get_l2_cache_blks()) { printk(KERN_ALERT "Fail to get L2 cache blks!\n"); return FAIL; } } } logical_blk_num = BLK_FROM_ADDR(logical_addr); logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num); BUG_ON(logical_blk_num == MAX_U32_VALUE); /* Write the cache item data into the current position of L2 Cache */ #if CMD_DMA /* * TODO */ #else if (FAIL == GLOB_LLD_Write_Page_Main(buf, cache_l2.blk_array[cache_l2.cur_blk_idx], cache_l2.cur_page_num, 1)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in " "%s, Line %d, new Bad Block %d generated!\n", __FILE__, __LINE__, cache_l2.blk_array[cache_l2.cur_blk_idx]); /* TODO: tag the current block as bad and try again */ return FAIL; } #endif /* * Update the L2 Cache table. * * First seaching in the table to see whether the logical block * has been mapped. If not, then kmalloc a new node for the * logical block, fill data, and then insert it to the list. * Otherwise, just update the mapped node directly. */ found = 0; list_for_each(p, &cache_l2.table.list) { pnd = list_entry(p, struct spectra_l2_cache_list, list); if (pnd->logical_blk_num == logical_blk_num) { pnd->pages_array[logical_page_num] = (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num; found = 1; break; } } if (!found) { /* Create new node for the logical block here */ /* The logical pages to physical pages map array is * located at the end of struct spectra_l2_cache_list. */ node_size = sizeof(struct spectra_l2_cache_list) + sizeof(u32) * DeviceInfo.wPagesPerBlock; pnd_new = kmalloc(node_size, GFP_ATOMIC); if (!pnd_new) { printk(KERN_ERR "Failed to kmalloc in %s Line %d\n", __FILE__, __LINE__); /* * TODO: Need to flush all the L2 cache into NAND ASAP * since no memory available here */ } pnd_new->logical_blk_num = logical_blk_num; for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) pnd_new->pages_array[i] = MAX_U32_VALUE; pnd_new->pages_array[logical_page_num] = (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num; list_add(&pnd_new->list, &cache_l2.table.list); } /* Increasing the current position pointer of the L2 Cache */ cache_l2.cur_page_num++; if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) { cache_l2.cur_blk_idx++; if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) { /* The L2 Cache is full. Need to flush it now */ nand_dbg_print(NAND_DBG_WARN, "L2 Cache is full, will start to flush it\n"); flush_l2_cache(); } else { cache_l2.cur_page_num = 0; } } return PASS; } /* * Search in the Level2 Cache table to find the cache item. * If find, read the data from the NAND page of L2 Cache, * Otherwise, return FAIL. */ static int search_l2_cache(u8 *buf, u64 logical_addr) { u32 logical_blk_num; u16 logical_page_num; struct list_head *p; struct spectra_l2_cache_list *pnd; u32 tmp = MAX_U32_VALUE; u32 phy_blk; u16 phy_page; int ret = FAIL; logical_blk_num = BLK_FROM_ADDR(logical_addr); logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num); list_for_each(p, &cache_l2.table.list) { pnd = list_entry(p, struct spectra_l2_cache_list, list); if (pnd->logical_blk_num == logical_blk_num) { tmp = pnd->pages_array[logical_page_num]; break; } } if (tmp != MAX_U32_VALUE) { /* Found valid map */ phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF]; phy_page = tmp & 0xFFFF; #if CMD_DMA /* TODO */ #else ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1); #endif } return ret; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Write_Page * Inputs: Pointer to buffer, page address, cache block number * Outputs: PASS=0 / FAIL=1 * Description: It writes the data in Cache Block *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr, u8 cache_blk, u16 flag) { u8 *pDest; u64 addr; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); addr = Cache.array[cache_blk].address; pDest = Cache.array[cache_blk].buf; pDest += (unsigned long)(page_addr - addr); Cache.array[cache_blk].changed = SET; #if CMD_DMA #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE int_cache[ftl_cmd_cnt].item = cache_blk; int_cache[ftl_cmd_cnt].cache.address = Cache.array[cache_blk].address; int_cache[ftl_cmd_cnt].cache.changed = Cache.array[cache_blk].changed; #endif GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag); ftl_cmd_cnt++; #else memcpy(pDest, pData, DeviceInfo.wPageDataSize); #endif if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE) Cache.array[cache_blk].use_cnt++; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Write * Inputs: none * Outputs: PASS=0 / FAIL=1 * Description: It writes least frequently used Cache block to flash if it * has been changed *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Cache_Write(void) { int i, bResult = PASS; u16 bNO, least_count = 0xFFFF; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); FTL_Calculate_LRU(); bNO = Cache.LRU; nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: " "Least used cache block is %d\n", bNO); if (Cache.array[bNO].changed != SET) return bResult; nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache" " Block %d containing logical block %d is dirty\n", bNO, (u32)(Cache.array[bNO].address >> DeviceInfo.nBitsInBlockDataSize)); #if CMD_DMA #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE int_cache[ftl_cmd_cnt].item = bNO; int_cache[ftl_cmd_cnt].cache.address = Cache.array[bNO].address; int_cache[ftl_cmd_cnt].cache.changed = CLEAR; #endif #endif bResult = write_back_to_l2_cache(Cache.array[bNO].buf, Cache.array[bNO].address); if (bResult != ERR) Cache.array[bNO].changed = CLEAR; least_count = Cache.array[bNO].use_cnt; for (i = 0; i < CACHE_ITEM_NUM; i++) { if (i == bNO) continue; if (Cache.array[i].use_cnt > 0) Cache.array[i].use_cnt -= least_count; } return bResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Cache_Read * Inputs: Page address * Outputs: PASS=0 / FAIL=1 * Description: It reads the block from device in Cache Block * Set the LRU count to 1 * Mark the Cache Block as clean *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Cache_Read(u64 logical_addr) { u64 item_addr, phy_addr; u16 num; int ret; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); num = Cache.LRU; /* The LRU cache item will be overwritten */ item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) * Cache.cache_item_size; Cache.array[num].address = item_addr; Cache.array[num].use_cnt = 1; Cache.array[num].changed = CLEAR; #if CMD_DMA #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE int_cache[ftl_cmd_cnt].item = num; int_cache[ftl_cmd_cnt].cache.address = Cache.array[num].address; int_cache[ftl_cmd_cnt].cache.changed = Cache.array[num].changed; #endif #endif /* * Search in L2 Cache. If hit, fill data into L1 Cache item buffer, * Otherwise, read it from NAND */ ret = search_l2_cache(Cache.array[num].buf, logical_addr); if (PASS == ret) /* Hit in L2 Cache */ return ret; /* Compute the physical start address of NAND device according to */ /* the logical start address of the cache item (LRU cache item) */ phy_addr = FTL_Get_Physical_Block_Addr(item_addr) + GLOB_u64_Remainder(item_addr, 2); return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Check_Block_Table * Inputs: ? * Outputs: PASS=0 / FAIL=1 * Description: It checks the correctness of each block table entry *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Check_Block_Table(int wOldTable) { u32 i; int wResult = PASS; u32 blk_idx; u32 *pbt = (u32 *)g_pBlockTable; u8 *pFlag = flag_check_blk_table; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (NULL != pFlag) { memset(pFlag, FAIL, DeviceInfo.wDataBlockNum); for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { blk_idx = (u32)(pbt[i] & (~BAD_BLOCK)); /* * 20081006/KBV - Changed to pFlag[i] reference * to avoid buffer overflow */ /* * 2008-10-20 Yunpeng Note: This change avoid * buffer overflow, but changed function of * the code, so it should be re-write later */ if ((blk_idx > DeviceInfo.wSpectraEndBlock) || PASS == pFlag[i]) { wResult = FAIL; break; } else { pFlag[i] = PASS; } } } return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Write_Block_Table * Inputs: flasg * Outputs: 0=Block Table was updated. No write done. 1=Block write needs to * happen. -1 Error * Description: It writes the block table * Block table always mapped to LBA 0 which inturn mapped * to any physical block *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Write_Block_Table(int wForce) { u32 *pbt = (u32 *)g_pBlockTable; int wSuccess = PASS; u32 wTempBlockTableIndex; u16 bt_pages, new_bt_offset; u8 blockchangeoccured = 0; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); bt_pages = FTL_Get_Block_Table_Flash_Size_Pages(); if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) return 0; if (PASS == wForce) { g_wBlockTableOffset = (u16)(DeviceInfo.wPagesPerBlock - bt_pages); #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset; p_BTableChangesDelta->ValidFields = 0x01; #endif } nand_dbg_print(NAND_DBG_DEBUG, "Inside FTL_Write_Block_Table: block %d Page:%d\n", g_wBlockTableIndex, g_wBlockTableOffset); do { new_bt_offset = g_wBlockTableOffset + bt_pages + 1; if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) || (new_bt_offset > DeviceInfo.wPagesPerBlock) || (FAIL == wSuccess)) { wTempBlockTableIndex = FTL_Replace_Block_Table(); if (BAD_BLOCK == wTempBlockTableIndex) return ERR; if (!blockchangeoccured) { bt_block_changed = 1; blockchangeoccured = 1; } g_wBlockTableIndex = wTempBlockTableIndex; g_wBlockTableOffset = 0; pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset; p_BTableChangesDelta->g_wBlockTableIndex = g_wBlockTableIndex; p_BTableChangesDelta->ValidFields = 0x03; p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = BLOCK_TABLE_INDEX; p_BTableChangesDelta->BT_Entry_Value = pbt[BLOCK_TABLE_INDEX]; p_BTableChangesDelta->ValidFields = 0x0C; #endif } wSuccess = FTL_Write_Block_Table_Data(); if (FAIL == wSuccess) MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]); } while (FAIL == wSuccess); g_cBlockTableStatus = CURRENT_BLOCK_TABLE; return 1; } static int force_format_nand(void) { u32 i; /* Force erase the whole unprotected physical partiton of NAND */ printk(KERN_ALERT "Start to force erase whole NAND device ...\n"); printk(KERN_ALERT "From phyical block %d to %d\n", DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock); for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) { if (GLOB_LLD_Erase_Block(i)) printk(KERN_ERR "Failed to force erase NAND block %d\n", i); } printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n"); while(1); return PASS; } int GLOB_FTL_Flash_Format(void) { //return FTL_Format_Flash(1); return force_format_nand(); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Search_Block_Table_IN_Block * Inputs: Block Number * Pointer to page * Outputs: PASS / FAIL * Page contatining the block table * Description: It searches the block table in the block * passed as an argument. * *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Search_Block_Table_IN_Block(u32 BT_Block, u8 BT_Tag, u16 *Page) { u16 i, j, k; u16 Result = PASS; u16 Last_IPF = 0; u8 BT_Found = 0; u8 *tagarray; u8 *tempbuf = tmp_buf_search_bt_in_block; u8 *pSpareBuf = spare_buf_search_bt_in_block; u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block; u8 bt_flag_last_page = 0xFF; u8 search_in_previous_pages = 0; u16 bt_pages; nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); nand_dbg_print(NAND_DBG_DEBUG, "Searching block table in %u block\n", (unsigned int)BT_Block); bt_pages = FTL_Get_Block_Table_Flash_Size_Pages(); for (i = bt_pages; i < DeviceInfo.wPagesPerBlock; i += (bt_pages + 1)) { nand_dbg_print(NAND_DBG_DEBUG, "Searching last IPF: %d\n", i); Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf, BT_Block, i, 1); if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) { if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) { continue; } else { search_in_previous_pages = 1; Last_IPF = i; } } if (!search_in_previous_pages) { if (i != bt_pages) { i -= (bt_pages + 1); Last_IPF = i; } } if (0 == Last_IPF) break; if (!search_in_previous_pages) { i = i + 1; nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of Block %u Page %u", (unsigned int)BT_Block, i); Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1); nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of Block %u Page %u", (unsigned int)BT_Block, i + bt_pages - 1); Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage, BT_Block, i + bt_pages - 1, 1); k = 0; j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray); if (j) { for (; k < j; k++) { if (tagarray[k] == BT_Tag) break; } } if (k < j) bt_flag = tagarray[k]; else Result = FAIL; if (Result == PASS) { k = 0; j = FTL_Extract_Block_Table_Tag( pSpareBufBTLastPage, &tagarray); if (j) { for (; k < j; k++) { if (tagarray[k] == BT_Tag) break; } } if (k < j) bt_flag_last_page = tagarray[k]; else Result = FAIL; if (Result == PASS) { if (bt_flag == bt_flag_last_page) { nand_dbg_print(NAND_DBG_DEBUG, "Block table is found" " in page after IPF " "at block %d " "page %d\n", (int)BT_Block, i); BT_Found = 1; *Page = i; g_cBlockTableStatus = CURRENT_BLOCK_TABLE; break; } else { Result = FAIL; } } } } if (search_in_previous_pages) i = i - bt_pages; else i = i - (bt_pages + 1); Result = PASS; nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of Block %d Page %d", (int)BT_Block, i); Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1); nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of Block %u Page %u", (unsigned int)BT_Block, i + bt_pages - 1); Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage, BT_Block, i + bt_pages - 1, 1); k = 0; j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray); if (j) { for (; k < j; k++) { if (tagarray[k] == BT_Tag) break; } } if (k < j) bt_flag = tagarray[k]; else Result = FAIL; if (Result == PASS) { k = 0; j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage, &tagarray); if (j) { for (; k < j; k++) { if (tagarray[k] == BT_Tag) break; } } if (k < j) { bt_flag_last_page = tagarray[k]; } else { Result = FAIL; break; } if (Result == PASS) { if (bt_flag == bt_flag_last_page) { nand_dbg_print(NAND_DBG_DEBUG, "Block table is found " "in page prior to IPF " "at block %u page %d\n", (unsigned int)BT_Block, i); BT_Found = 1; *Page = i; g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; break; } else { Result = FAIL; break; } } } } if (Result == FAIL) { if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) { BT_Found = 1; *Page = i - (bt_pages + 1); } if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found)) goto func_return; } if (Last_IPF == 0) { i = 0; Result = PASS; nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of " "Block %u Page %u", (unsigned int)BT_Block, i); Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1); nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of Block %u Page %u", (unsigned int)BT_Block, i + bt_pages - 1); Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage, BT_Block, i + bt_pages - 1, 1); k = 0; j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray); if (j) { for (; k < j; k++) { if (tagarray[k] == BT_Tag) break; } } if (k < j) bt_flag = tagarray[k]; else Result = FAIL; if (Result == PASS) { k = 0; j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage, &tagarray); if (j) { for (; k < j; k++) { if (tagarray[k] == BT_Tag) break; } } if (k < j) bt_flag_last_page = tagarray[k]; else Result = FAIL; if (Result == PASS) { if (bt_flag == bt_flag_last_page) { nand_dbg_print(NAND_DBG_DEBUG, "Block table is found " "in page after IPF at " "block %u page %u\n", (unsigned int)BT_Block, (unsigned int)i); BT_Found = 1; *Page = i; g_cBlockTableStatus = CURRENT_BLOCK_TABLE; goto func_return; } else { Result = FAIL; } } } if (Result == FAIL) goto func_return; } func_return: return Result; } u8 *get_blk_table_start_addr(void) { return g_pBlockTable; } unsigned long get_blk_table_len(void) { return DeviceInfo.wDataBlockNum * sizeof(u32); } u8 *get_wear_leveling_table_start_addr(void) { return g_pWearCounter; } unsigned long get_wear_leveling_table_len(void) { return DeviceInfo.wDataBlockNum * sizeof(u8); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Read_Block_Table * Inputs: none * Outputs: PASS / FAIL * Description: read the flash spare area and find a block containing the * most recent block table(having largest block_table_counter). * Find the last written Block table in this block. * Check the correctness of Block Table * If CDMA is enabled, this function is called in * polling mode. * We don't need to store changes in Block table in this * function as it is called only at initialization * * Note: Currently this function is called at initialization * before any read/erase/write command issued to flash so, * there is no need to wait for CDMA list to complete as of now *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Read_Block_Table(void) { u16 i = 0; int k, j; u8 *tempBuf, *tagarray; int wResult = FAIL; int status = FAIL; u8 block_table_found = 0; int search_result; u32 Block; u16 Page = 0; u16 PageCount; u16 bt_pages; int wBytesCopied = 0, tempvar; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); tempBuf = tmp_buf1_read_blk_table; bt_pages = FTL_Get_Block_Table_Flash_Size_Pages(); for (j = DeviceInfo.wSpectraStartBlock; j <= (int)DeviceInfo.wSpectraEndBlock; j++) { status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1); k = 0; i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray); if (i) { status = GLOB_LLD_Read_Page_Main_Polling(tempBuf, j, 0, 1); for (; k < i; k++) { if (tagarray[k] == tempBuf[3]) break; } } if (k < i) k = tagarray[k]; else continue; nand_dbg_print(NAND_DBG_DEBUG, "Block table is contained in Block %d %d\n", (unsigned int)j, (unsigned int)k); if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) { g_pBTBlocks[k-FIRST_BT_ID] = j; block_table_found = 1; } else { printk(KERN_ERR "FTL_Read_Block_Table -" "This should never happens. " "Two block table have same counter %u!\n", k); } } if (block_table_found) { if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL && g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) { j = LAST_BT_ID; while ((j > FIRST_BT_ID) && (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL)) j--; if (j == FIRST_BT_ID) { j = LAST_BT_ID; last_erased = LAST_BT_ID; } else { last_erased = (u8)j + 1; while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL == g_pBTBlocks[j - FIRST_BT_ID])) j--; } } else { j = FIRST_BT_ID; while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL) j++; last_erased = (u8)j; while ((j < LAST_BT_ID) && (BTBLOCK_INVAL != g_pBTBlocks[j - FIRST_BT_ID])) j++; if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL) j--; } if (last_erased > j) j += (1 + LAST_BT_ID - FIRST_BT_ID); for (; (j >= last_erased) && (FAIL == wResult); j--) { i = (j - FIRST_BT_ID) % (1 + LAST_BT_ID - FIRST_BT_ID); search_result = FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i], i + FIRST_BT_ID, &Page); if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE) block_table_found = 0; while ((search_result == PASS) && (FAIL == wResult)) { nand_dbg_print(NAND_DBG_DEBUG, "FTL_Read_Block_Table:" "Block: %u Page: %u " "contains block table\n", (unsigned int)g_pBTBlocks[i], (unsigned int)Page); tempBuf = tmp_buf2_read_blk_table; for (k = 0; k < bt_pages; k++) { Block = g_pBTBlocks[i]; PageCount = 1; status = GLOB_LLD_Read_Page_Main_Polling( tempBuf, Block, Page, PageCount); tempvar = k ? 0 : 4; wBytesCopied += FTL_Copy_Block_Table_From_Flash( tempBuf + tempvar, DeviceInfo.wPageDataSize - tempvar, wBytesCopied); Page++; } wResult = FTL_Check_Block_Table(FAIL); if (FAIL == wResult) { block_table_found = 0; if (Page > bt_pages) Page -= ((bt_pages<<1) + 1); else search_result = FAIL; } } } } if (PASS == wResult) { if (!block_table_found) FTL_Execute_SPL_Recovery(); if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE) g_wBlockTableOffset = (u16)Page + 1; else g_wBlockTableOffset = (u16)Page - bt_pages; g_wBlockTableIndex = (u32)g_pBTBlocks[i]; #if CMD_DMA if (DeviceInfo.MLCDevice) memcpy(g_pBTStartingCopy, g_pBlockTable, DeviceInfo.wDataBlockNum * sizeof(u32) + DeviceInfo.wDataBlockNum * sizeof(u8) + DeviceInfo.wDataBlockNum * sizeof(u16)); else memcpy(g_pBTStartingCopy, g_pBlockTable, DeviceInfo.wDataBlockNum * sizeof(u32) + DeviceInfo.wDataBlockNum * sizeof(u8)); #endif } if (FAIL == wResult) printk(KERN_ERR "Yunpeng - " "Can not find valid spectra block table!\n"); #if AUTO_FORMAT_FLASH if (FAIL == wResult) { nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n"); wResult = FTL_Format_Flash(0); } #endif return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Get_Page_Num * Inputs: Size in bytes * Outputs: Size in pages * Description: It calculates the pages required for the length passed *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u32 FTL_Get_Page_Num(u64 length) { return (u32)((length >> DeviceInfo.nBitsInPageDataSize) + (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0)); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Get_Physical_Block_Addr * Inputs: Block Address (byte format) * Outputs: Physical address of the block. * Description: It translates LBA to PBA by returning address stored * at the LBA location in the block table *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr) { u32 *pbt; u64 physical_addr; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); pbt = (u32 *)g_pBlockTable; physical_addr = (u64) DeviceInfo.wBlockDataSize * (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK)); return physical_addr; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Get_Block_Index * Inputs: Physical Block no. * Outputs: Logical block no. /BAD_BLOCK * Description: It returns the logical block no. for the PBA passed *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u32 FTL_Get_Block_Index(u32 wBlockNum) { u32 *pbt = (u32 *)g_pBlockTable; u32 i; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < DeviceInfo.wDataBlockNum; i++) if (wBlockNum == (pbt[i] & (~BAD_BLOCK))) return i; return BAD_BLOCK; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Wear_Leveling * Inputs: none * Outputs: PASS=0 * Description: This is static wear leveling (done by explicit call) * do complete static wear leveling * do complete garbage collection *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Wear_Leveling(void) { nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); FTL_Static_Wear_Leveling(); GLOB_FTL_Garbage_Collection(); return PASS; } static void find_least_most_worn(u8 *chg, u32 *least_idx, u8 *least_cnt, u32 *most_idx, u8 *most_cnt) { u32 *pbt = (u32 *)g_pBlockTable; u32 idx; u8 cnt; int i; for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) { if (IS_BAD_BLOCK(i) || PASS == chg[i]) continue; idx = (u32) ((~BAD_BLOCK) & pbt[i]); cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock]; if (IS_SPARE_BLOCK(i)) { if (cnt > *most_cnt) { *most_cnt = cnt; *most_idx = idx; } } if (IS_DATA_BLOCK(i)) { if (cnt < *least_cnt) { *least_cnt = cnt; *least_idx = idx; } } if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) { debug_boundary_error(*most_idx, DeviceInfo.wDataBlockNum, 0); debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0); continue; } } } static int move_blks_for_wear_leveling(u8 *chg, u32 *least_idx, u32 *rep_blk_num, int *result) { u32 *pbt = (u32 *)g_pBlockTable; u32 rep_blk; int j, ret_cp_blk, ret_erase; int ret = PASS; chg[*least_idx] = PASS; debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0); rep_blk = FTL_Replace_MWBlock(); if (rep_blk != BAD_BLOCK) { nand_dbg_print(NAND_DBG_DEBUG, "More than two spare blocks exist so do it\n"); nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n", rep_blk); chg[rep_blk] = PASS; if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } for (j = 0; j < RETRY_TIMES; j++) { ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) * DeviceInfo.wBlockDataSize, (u64)rep_blk * DeviceInfo.wBlockDataSize); if (FAIL == ret_cp_blk) { ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk * DeviceInfo.wBlockDataSize); if (FAIL == ret_erase) MARK_BLOCK_AS_BAD(pbt[rep_blk]); } else { nand_dbg_print(NAND_DBG_DEBUG, "FTL_Copy_Block == OK\n"); break; } } if (j < RETRY_TIMES) { u32 tmp; u32 old_idx = FTL_Get_Block_Index(*least_idx); u32 rep_idx = FTL_Get_Block_Index(rep_blk); tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]); pbt[old_idx] = (u32)((~SPARE_BLOCK) & pbt[rep_idx]); pbt[rep_idx] = tmp; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *) g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = old_idx; p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx]; p_BTableChangesDelta->ValidFields = 0x0C; p_BTableChangesDelta = (struct BTableChangesDelta *) g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = rep_idx; p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx]; p_BTableChangesDelta->ValidFields = 0x0C; #endif } else { pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *) g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = FTL_Get_Block_Index(rep_blk); p_BTableChangesDelta->BT_Entry_Value = pbt[FTL_Get_Block_Index(rep_blk)]; p_BTableChangesDelta->ValidFields = 0x0C; #endif *result = FAIL; ret = FAIL; } if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM) ret = FAIL; } else { printk(KERN_ERR "Less than 3 spare blocks exist so quit\n"); ret = FAIL; } return ret; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Static_Wear_Leveling * Inputs: none * Outputs: PASS=0 / FAIL=1 * Description: This is static wear leveling (done by explicit call) * search for most&least used * if difference < GATE: * update the block table with exhange * mark block table in flash as IN_PROGRESS * copy flash block * the caller should handle GC clean up after calling this function *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int FTL_Static_Wear_Leveling(void) { u8 most_worn_cnt; u8 least_worn_cnt; u32 most_worn_idx; u32 least_worn_idx; int result = PASS; int go_on = PASS; u32 replaced_blks = 0; u8 *chang_flag = flags_static_wear_leveling; nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (!chang_flag) return FAIL; memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum); while (go_on == PASS) { nand_dbg_print(NAND_DBG_DEBUG, "starting static wear leveling\n"); most_worn_cnt = 0; least_worn_cnt = 0xFF; least_worn_idx = BLOCK_TABLE_INDEX; most_worn_idx = BLOCK_TABLE_INDEX; find_least_most_worn(chang_flag, &least_worn_idx, &least_worn_cnt, &most_worn_idx, &most_worn_cnt); nand_dbg_print(NAND_DBG_DEBUG, "Used and least worn is block %u, whos count is %u\n", (unsigned int)least_worn_idx, (unsigned int)least_worn_cnt); nand_dbg_print(NAND_DBG_DEBUG, "Free and most worn is block %u, whos count is %u\n", (unsigned int)most_worn_idx, (unsigned int)most_worn_cnt); if ((most_worn_cnt > least_worn_cnt) && (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE)) go_on = move_blks_for_wear_leveling(chang_flag, &least_worn_idx, &replaced_blks, &result); else go_on = FAIL; } return result; } #if CMD_DMA static int do_garbage_collection(u32 discard_cnt) { u32 *pbt = (u32 *)g_pBlockTable; u32 pba; u8 bt_block_erased = 0; int i, cnt, ret = FAIL; u64 addr; i = 0; while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) && ((ftl_cmd_cnt + 28) < 256)) { if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) && (pbt[i] & DISCARD_BLOCK)) { if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } addr = FTL_Get_Physical_Block_Addr((u64)i * DeviceInfo.wBlockDataSize); pba = BLK_FROM_ADDR(addr); for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) { if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) { nand_dbg_print(NAND_DBG_DEBUG, "GC will erase BT block %u\n", (unsigned int)pba); discard_cnt--; i++; bt_block_erased = 1; break; } } if (bt_block_erased) { bt_block_erased = 0; continue; } addr = FTL_Get_Physical_Block_Addr((u64)i * DeviceInfo.wBlockDataSize); if (PASS == GLOB_FTL_Block_Erase(addr)) { pbt[i] &= (u32)(~DISCARD_BLOCK); pbt[i] |= (u32)(SPARE_BLOCK); p_BTableChangesDelta = (struct BTableChangesDelta *) g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt - 1; p_BTableChangesDelta->BT_Index = i; p_BTableChangesDelta->BT_Entry_Value = pbt[i]; p_BTableChangesDelta->ValidFields = 0x0C; discard_cnt--; ret = PASS; } else { MARK_BLOCK_AS_BAD(pbt[i]); } } i++; } return ret; } #else static int do_garbage_collection(u32 discard_cnt) { u32 *pbt = (u32 *)g_pBlockTable; u32 pba; u8 bt_block_erased = 0; int i, cnt, ret = FAIL; u64 addr; i = 0; while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) { if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) && (pbt[i] & DISCARD_BLOCK)) { if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } addr = FTL_Get_Physical_Block_Addr((u64)i * DeviceInfo.wBlockDataSize); pba = BLK_FROM_ADDR(addr); for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) { if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) { nand_dbg_print(NAND_DBG_DEBUG, "GC will erase BT block %d\n", pba); discard_cnt--; i++; bt_block_erased = 1; break; } } if (bt_block_erased) { bt_block_erased = 0; continue; } /* If the discard block is L2 cache block, then just skip it */ for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) { if (cache_l2.blk_array[cnt] == pba) { nand_dbg_print(NAND_DBG_DEBUG, "GC will erase L2 cache blk %d\n", pba); break; } } if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */ discard_cnt--; i++; continue; } addr = FTL_Get_Physical_Block_Addr((u64)i * DeviceInfo.wBlockDataSize); if (PASS == GLOB_FTL_Block_Erase(addr)) { pbt[i] &= (u32)(~DISCARD_BLOCK); pbt[i] |= (u32)(SPARE_BLOCK); discard_cnt--; ret = PASS; } else { MARK_BLOCK_AS_BAD(pbt[i]); } } i++; } return ret; } #endif /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Garbage_Collection * Inputs: none * Outputs: PASS / FAIL (returns the number of un-erased blocks * Description: search the block table for all discarded blocks to erase * for each discarded block: * set the flash block to IN_PROGRESS * erase the block * update the block table * write the block table to flash *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Garbage_Collection(void) { u32 i; u32 wDiscard = 0; int wResult = FAIL; u32 *pbt = (u32 *)g_pBlockTable; nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (GC_Called) { printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() " "has been re-entered! Exit.\n"); return PASS; } GC_Called = 1; GLOB_FTL_BT_Garbage_Collection(); for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if (IS_DISCARDED_BLOCK(i)) wDiscard++; } if (wDiscard <= 0) { GC_Called = 0; return wResult; } nand_dbg_print(NAND_DBG_DEBUG, "Found %d discarded blocks\n", wDiscard); FTL_Write_Block_Table(FAIL); wResult = do_garbage_collection(wDiscard); FTL_Write_Block_Table(FAIL); GC_Called = 0; return wResult; } #if CMD_DMA static int do_bt_garbage_collection(void) { u32 pba, lba; u32 *pbt = (u32 *)g_pBlockTable; u32 *pBTBlocksNode = (u32 *)g_pBTBlocks; u64 addr; int i, ret = FAIL; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (BT_GC_Called) return PASS; BT_GC_Called = 1; for (i = last_erased; (i <= LAST_BT_ID) && (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) + FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) && ((ftl_cmd_cnt + 28)) < 256; i++) { pba = pBTBlocksNode[i - FIRST_BT_ID]; lba = FTL_Get_Block_Index(pba); nand_dbg_print(NAND_DBG_DEBUG, "do_bt_garbage_collection: pba %d, lba %d\n", pba, lba); nand_dbg_print(NAND_DBG_DEBUG, "Block Table Entry: %d", pbt[lba]); if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) && (pbt[lba] & DISCARD_BLOCK)) { nand_dbg_print(NAND_DBG_DEBUG, "do_bt_garbage_collection_cdma: " "Erasing Block tables present in block %d\n", pba); addr = FTL_Get_Physical_Block_Addr((u64)lba * DeviceInfo.wBlockDataSize); if (PASS == GLOB_FTL_Block_Erase(addr)) { pbt[lba] &= (u32)(~DISCARD_BLOCK); pbt[lba] |= (u32)(SPARE_BLOCK); p_BTableChangesDelta = (struct BTableChangesDelta *) g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt - 1; p_BTableChangesDelta->BT_Index = lba; p_BTableChangesDelta->BT_Entry_Value = pbt[lba]; p_BTableChangesDelta->ValidFields = 0x0C; ret = PASS; pBTBlocksNode[last_erased - FIRST_BT_ID] = BTBLOCK_INVAL; nand_dbg_print(NAND_DBG_DEBUG, "resetting bt entry at index %d " "value %d\n", i, pBTBlocksNode[i - FIRST_BT_ID]); if (last_erased == LAST_BT_ID) last_erased = FIRST_BT_ID; else last_erased++; } else { MARK_BLOCK_AS_BAD(pbt[lba]); } } } BT_GC_Called = 0; return ret; } #else static int do_bt_garbage_collection(void) { u32 pba, lba; u32 *pbt = (u32 *)g_pBlockTable; u32 *pBTBlocksNode = (u32 *)g_pBTBlocks; u64 addr; int i, ret = FAIL; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (BT_GC_Called) return PASS; BT_GC_Called = 1; for (i = last_erased; (i <= LAST_BT_ID) && (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) + FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) { pba = pBTBlocksNode[i - FIRST_BT_ID]; lba = FTL_Get_Block_Index(pba); nand_dbg_print(NAND_DBG_DEBUG, "do_bt_garbage_collection_cdma: pba %d, lba %d\n", pba, lba); nand_dbg_print(NAND_DBG_DEBUG, "Block Table Entry: %d", pbt[lba]); if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) && (pbt[lba] & DISCARD_BLOCK)) { nand_dbg_print(NAND_DBG_DEBUG, "do_bt_garbage_collection: " "Erasing Block tables present in block %d\n", pba); addr = FTL_Get_Physical_Block_Addr((u64)lba * DeviceInfo.wBlockDataSize); if (PASS == GLOB_FTL_Block_Erase(addr)) { pbt[lba] &= (u32)(~DISCARD_BLOCK); pbt[lba] |= (u32)(SPARE_BLOCK); ret = PASS; pBTBlocksNode[last_erased - FIRST_BT_ID] = BTBLOCK_INVAL; nand_dbg_print(NAND_DBG_DEBUG, "resetting bt entry at index %d " "value %d\n", i, pBTBlocksNode[i - FIRST_BT_ID]); if (last_erased == LAST_BT_ID) last_erased = FIRST_BT_ID; else last_erased++; } else { MARK_BLOCK_AS_BAD(pbt[lba]); } } } BT_GC_Called = 0; return ret; } #endif /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_BT_Garbage_Collection * Inputs: none * Outputs: PASS / FAIL (returns the number of un-erased blocks * Description: Erases discarded blocks containing Block table * *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_BT_Garbage_Collection(void) { return do_bt_garbage_collection(); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Replace_OneBlock * Inputs: Block number 1 * Block number 2 * Outputs: Replaced Block Number * Description: Interchange block table entries at wBlockNum and wReplaceNum * *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk) { u32 tmp_blk; u32 replace_node = BAD_BLOCK; u32 *pbt = (u32 *)g_pBlockTable; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (rep_blk != BAD_BLOCK) { if (IS_BAD_BLOCK(blk)) tmp_blk = pbt[blk]; else tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]); replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]); pbt[blk] = replace_node; pbt[rep_blk] = tmp_blk; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = blk; p_BTableChangesDelta->BT_Entry_Value = pbt[blk]; p_BTableChangesDelta->ValidFields = 0x0C; p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = rep_blk; p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk]; p_BTableChangesDelta->ValidFields = 0x0C; #endif } return replace_node; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Write_Block_Table_Data * Inputs: Block table size in pages * Outputs: PASS=0 / FAIL=1 * Description: Write block table data in flash * If first page and last page * Write data+BT flag * else * Write data * BT flag is a counter. Its value is incremented for block table * write in a new Block *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Write_Block_Table_Data(void) { u64 dwBlockTableAddr, pTempAddr; u32 Block; u16 Page, PageCount; u8 *tempBuf = tmp_buf_write_blk_table_data; int wBytesCopied; u16 bt_pages; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); dwBlockTableAddr = (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize + (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize); pTempAddr = dwBlockTableAddr; bt_pages = FTL_Get_Block_Table_Flash_Size_Pages(); nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: " "page= %d BlockTableIndex= %d " "BlockTableOffset=%d\n", bt_pages, g_wBlockTableIndex, g_wBlockTableOffset); Block = BLK_FROM_ADDR(pTempAddr); Page = PAGE_FROM_ADDR(pTempAddr, Block); PageCount = 1; if (bt_block_changed) { if (bt_flag == LAST_BT_ID) { bt_flag = FIRST_BT_ID; g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block; } else if (bt_flag < LAST_BT_ID) { bt_flag++; g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block; } if ((bt_flag > (LAST_BT_ID-4)) && g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) { bt_block_changed = 0; GLOB_FTL_BT_Garbage_Collection(); } bt_block_changed = 0; nand_dbg_print(NAND_DBG_DEBUG, "Block Table Counter is %u Block %u\n", bt_flag, (unsigned int)Block); } memset(tempBuf, 0, 3); tempBuf[3] = bt_flag; wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4, DeviceInfo.wPageDataSize - 4, 0); memset(&tempBuf[wBytesCopied + 4], 0xff, DeviceInfo.wPageSize - (wBytesCopied + 4)); FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize], bt_flag); #if CMD_DMA memcpy(g_pNextBlockTable, tempBuf, DeviceInfo.wPageSize * sizeof(u8)); nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table " "Block %u Page %u\n", (unsigned int)Block, Page); if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable, Block, Page, 1, LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in " "%s, Line %d, Function: %s, " "new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, Block); goto func_return; } ftl_cmd_cnt++; g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8))); #else if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, Function: %s, " "new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, Block); goto func_return; } #endif if (bt_pages > 1) { PageCount = bt_pages - 1; if (PageCount > 1) { wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf, DeviceInfo.wPageDataSize * (PageCount - 1), wBytesCopied); #if CMD_DMA memcpy(g_pNextBlockTable, tempBuf, (PageCount - 1) * DeviceInfo.wPageDataSize); if (FAIL == GLOB_LLD_Write_Page_Main_cdma( g_pNextBlockTable, Block, Page + 1, PageCount - 1)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, " "new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, (int)Block); goto func_return; } ftl_cmd_cnt++; g_pNextBlockTable += (PageCount - 1) * DeviceInfo.wPageDataSize * sizeof(u8); #else if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf, Block, Page + 1, PageCount - 1)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, " "new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, (int)Block); goto func_return; } #endif } wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf, DeviceInfo.wPageDataSize, wBytesCopied); memset(&tempBuf[wBytesCopied], 0xff, DeviceInfo.wPageSize-wBytesCopied); FTL_Insert_Block_Table_Signature( &tempBuf[DeviceInfo.wPageDataSize], bt_flag); #if CMD_DMA memcpy(g_pNextBlockTable, tempBuf, DeviceInfo.wPageSize * sizeof(u8)); nand_dbg_print(NAND_DBG_DEBUG, "Writing the last Page of Block Table " "Block %u Page %u\n", (unsigned int)Block, Page + bt_pages - 1); if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma( g_pNextBlockTable, Block, Page + bt_pages - 1, 1, LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, Block); goto func_return; } ftl_cmd_cnt++; #else if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page+bt_pages - 1, 1)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, " "new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, Block); goto func_return; } #endif } nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n"); func_return: return PASS; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Replace_Block_Table * Inputs: None * Outputs: PASS=0 / FAIL=1 * Description: Get a new block to write block table *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u32 FTL_Replace_Block_Table(void) { u32 blk; int gc; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc); if ((BAD_BLOCK == blk) && (PASS == gc)) { GLOB_FTL_Garbage_Collection(); blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc); } if (BAD_BLOCK == blk) printk(KERN_ERR "%s, %s: There is no spare block. " "It should never happen\n", __FILE__, __func__); nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk); return blk; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Replace_LWBlock * Inputs: Block number * Pointer to Garbage Collect flag * Outputs: * Description: Determine the least weared block by traversing * block table * Set Garbage collection to be called if number of spare * block is less than Free Block Gate count * Change Block table entry to map least worn block for current * operation *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect) { u32 i; u32 *pbt = (u32 *)g_pBlockTable; u8 wLeastWornCounter = 0xFF; u32 wLeastWornIndex = BAD_BLOCK; u32 wSpareBlockNum = 0; u32 wDiscardBlockNum = 0; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (IS_SPARE_BLOCK(wBlockNum)) { *pGarbageCollect = FAIL; pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK)); #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = (u32)(wBlockNum); p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum]; p_BTableChangesDelta->ValidFields = 0x0C; #endif return pbt[wBlockNum]; } for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if (IS_DISCARDED_BLOCK(i)) wDiscardBlockNum++; if (IS_SPARE_BLOCK(i)) { u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]); if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock) printk(KERN_ERR "FTL_Replace_LWBlock: " "This should never occur!\n"); if (g_pWearCounter[wPhysicalIndex - DeviceInfo.wSpectraStartBlock] < wLeastWornCounter) { wLeastWornCounter = g_pWearCounter[wPhysicalIndex - DeviceInfo.wSpectraStartBlock]; wLeastWornIndex = i; } wSpareBlockNum++; } } nand_dbg_print(NAND_DBG_WARN, "FTL_Replace_LWBlock: Least Worn Counter %d\n", (int)wLeastWornCounter); if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) || (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE)) *pGarbageCollect = PASS; else *pGarbageCollect = FAIL; nand_dbg_print(NAND_DBG_DEBUG, "FTL_Replace_LWBlock: Discarded Blocks %u Spare" " Blocks %u\n", (unsigned int)wDiscardBlockNum, (unsigned int)wSpareBlockNum); return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Replace_MWBlock * Inputs: None * Outputs: most worn spare block no./BAD_BLOCK * Description: It finds most worn spare block. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static u32 FTL_Replace_MWBlock(void) { u32 i; u32 *pbt = (u32 *)g_pBlockTable; u8 wMostWornCounter = 0; u32 wMostWornIndex = BAD_BLOCK; u32 wSpareBlockNum = 0; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if (IS_SPARE_BLOCK(i)) { u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]); if (g_pWearCounter[wPhysicalIndex - DeviceInfo.wSpectraStartBlock] > wMostWornCounter) { wMostWornCounter = g_pWearCounter[wPhysicalIndex - DeviceInfo.wSpectraStartBlock]; wMostWornIndex = wPhysicalIndex; } wSpareBlockNum++; } } if (wSpareBlockNum <= 2) return BAD_BLOCK; return wMostWornIndex; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Replace_Block * Inputs: Block Address * Outputs: PASS=0 / FAIL=1 * Description: If block specified by blk_addr parameter is not free, * replace it with the least worn block. *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Replace_Block(u64 blk_addr) { u32 current_blk = BLK_FROM_ADDR(blk_addr); u32 *pbt = (u32 *)g_pBlockTable; int wResult = PASS; int GarbageCollect = FAIL; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (IS_SPARE_BLOCK(current_blk)) { pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk]; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = current_blk; p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk]; p_BTableChangesDelta->ValidFields = 0x0C ; #endif return wResult; } FTL_Replace_LWBlock(current_blk, &GarbageCollect); if (PASS == GarbageCollect) wResult = GLOB_FTL_Garbage_Collection(); return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Is_BadBlock * Inputs: block number to test * Outputs: PASS (block is BAD) / FAIL (block is not bad) * Description: test if this block number is flagged as bad *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Is_BadBlock(u32 wBlockNum) { u32 *pbt = (u32 *)g_pBlockTable; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); if (wBlockNum >= DeviceInfo.wSpectraStartBlock && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK)) return PASS; else return FAIL; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Flush_Cache * Inputs: none * Outputs: PASS=0 / FAIL=1 * Description: flush all the cache blocks to flash * if a cache block is not dirty, don't do anything with it * else, write the block and update the block table * Note: This function should be called at shutdown/power down. * to write important data into device *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Flush_Cache(void) { int i, ret; nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < CACHE_ITEM_NUM; i++) { if (SET == Cache.array[i].changed) { #if CMD_DMA #if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE int_cache[ftl_cmd_cnt].item = i; int_cache[ftl_cmd_cnt].cache.address = Cache.array[i].address; int_cache[ftl_cmd_cnt].cache.changed = CLEAR; #endif #endif ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address); if (PASS == ret) { Cache.array[i].changed = CLEAR; } else { printk(KERN_ALERT "Failed when write back to L2 cache!\n"); /* TODO - How to handle this? */ } } } flush_l2_cache(); return FTL_Write_Block_Table(FAIL); } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Page_Read * Inputs: pointer to data * logical address of data (u64 is LBA * Bytes/Page) * Outputs: PASS=0 / FAIL=1 * Description: reads a page of data into RAM from the cache * if the data is not already in cache, read from flash to cache *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr) { u16 cache_item; int res = PASS; nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - " "page_addr: %llu\n", logical_addr); cache_item = FTL_Cache_If_Hit(logical_addr); if (UNHIT_CACHE_ITEM == cache_item) { nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read: Cache not hit\n"); res = FTL_Cache_Write(); if (ERR == FTL_Cache_Read(logical_addr)) res = ERR; cache_item = Cache.LRU; } FTL_Cache_Read_Page(data, logical_addr, cache_item); return res; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Page_Write * Inputs: pointer to data * address of data (ADDRESSTYPE is LBA * Bytes/Page) * Outputs: PASS=0 / FAIL=1 * Description: writes a page of data from RAM to the cache * if the data is not already in cache, write back the * least recently used block and read the addressed block * from flash to cache *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr) { u16 cache_blk; u32 *pbt = (u32 *)g_pBlockTable; int wResult = PASS; nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - " "dwPageAddr: %llu\n", dwPageAddr); cache_blk = FTL_Cache_If_Hit(dwPageAddr); if (UNHIT_CACHE_ITEM == cache_blk) { wResult = FTL_Cache_Write(); if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) { wResult = FTL_Replace_Block(dwPageAddr); pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK; if (wResult == FAIL) return FAIL; } if (ERR == FTL_Cache_Read(dwPageAddr)) wResult = ERR; cache_blk = Cache.LRU; FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0); } else { #if CMD_DMA FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, LLD_CMD_FLAG_ORDER_BEFORE_REST); #else FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0); #endif } return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: GLOB_FTL_Block_Erase * Inputs: address of block to erase (now in byte format, should change to * block format) * Outputs: PASS=0 / FAIL=1 * Description: erases the specified block * increments the erase count * If erase count reaches its upper limit,call function to * do the adjustment as per the relative erase count values *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int GLOB_FTL_Block_Erase(u64 blk_addr) { int status; u32 BlkIdx; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize); if (BlkIdx < DeviceInfo.wSpectraStartBlock) { printk(KERN_ERR "GLOB_FTL_Block_Erase: " "This should never occur\n"); return FAIL; } #if CMD_DMA status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA); if (status == FAIL) nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, BlkIdx); #else status = GLOB_LLD_Erase_Block(BlkIdx); if (status == FAIL) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, BlkIdx); return status; } #endif if (DeviceInfo.MLCDevice) { g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0; if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } } g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->WC_Index = BlkIdx - DeviceInfo.wSpectraStartBlock; p_BTableChangesDelta->WC_Entry_Value = g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]; p_BTableChangesDelta->ValidFields = 0x30; if (DeviceInfo.MLCDevice) { p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->RC_Index = BlkIdx - DeviceInfo.wSpectraStartBlock; p_BTableChangesDelta->RC_Entry_Value = g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]; p_BTableChangesDelta->ValidFields = 0xC0; } ftl_cmd_cnt++; #endif if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE) FTL_Adjust_Relative_Erase_Count(BlkIdx); return status; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Adjust_Relative_Erase_Count * Inputs: index to block that was just incremented and is at the max * Outputs: PASS=0 / FAIL=1 * Description: If any erase counts at MAX, adjusts erase count of every * block by subtracting least worn * counter from counter value of every entry in wear table *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX) { u8 wLeastWornCounter = MAX_BYTE_VALUE; u8 wWearCounter; u32 i, wWearIndex; u32 *pbt = (u32 *)g_pBlockTable; int wResult = PASS; nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); for (i = 0; i < DeviceInfo.wDataBlockNum; i++) { if (IS_BAD_BLOCK(i)) continue; wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK)); if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0) printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:" "This should never occur\n"); wWearCounter = g_pWearCounter[wWearIndex - DeviceInfo.wSpectraStartBlock]; if (wWearCounter < wLeastWornCounter) wLeastWornCounter = wWearCounter; } if (wLeastWornCounter == 0) { nand_dbg_print(NAND_DBG_WARN, "Adjusting Wear Levelling Counters: Special Case\n"); g_pWearCounter[Index_of_MAX - DeviceInfo.wSpectraStartBlock]--; #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->WC_Index = Index_of_MAX - DeviceInfo.wSpectraStartBlock; p_BTableChangesDelta->WC_Entry_Value = g_pWearCounter[Index_of_MAX - DeviceInfo.wSpectraStartBlock]; p_BTableChangesDelta->ValidFields = 0x30; #endif FTL_Static_Wear_Leveling(); } else { for (i = 0; i < DeviceInfo.wDataBlockNum; i++) if (!IS_BAD_BLOCK(i)) { wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK)); g_pWearCounter[wWearIndex - DeviceInfo.wSpectraStartBlock] = (u8)(g_pWearCounter [wWearIndex - DeviceInfo.wSpectraStartBlock] - wLeastWornCounter); #if CMD_DMA p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->WC_Index = wWearIndex - DeviceInfo.wSpectraStartBlock; p_BTableChangesDelta->WC_Entry_Value = g_pWearCounter[wWearIndex - DeviceInfo.wSpectraStartBlock]; p_BTableChangesDelta->ValidFields = 0x30; #endif } } return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Write_IN_Progress_Block_Table_Page * Inputs: None * Outputs: None * Description: It writes in-progress flag page to the page next to * block table ***********************************************************************/ static int FTL_Write_IN_Progress_Block_Table_Page(void) { int wResult = PASS; u16 bt_pages; u16 dwIPFPageAddr; #if CMD_DMA #else u32 *pbt = (u32 *)g_pBlockTable; u32 wTempBlockTableIndex; #endif nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); bt_pages = FTL_Get_Block_Table_Flash_Size_Pages(); dwIPFPageAddr = g_wBlockTableOffset + bt_pages; nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at " "Block %d Page %d\n", g_wBlockTableIndex, dwIPFPageAddr); #if CMD_DMA wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF, g_wBlockTableIndex, dwIPFPageAddr, 1, LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST); if (wResult == FAIL) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, g_wBlockTableIndex); } g_wBlockTableOffset = dwIPFPageAddr + 1; p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset; p_BTableChangesDelta->ValidFields = 0x01; ftl_cmd_cnt++; #else wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF, g_wBlockTableIndex, dwIPFPageAddr, 1); if (wResult == FAIL) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s, Line %d, " "Function: %s, new Bad Block %d generated!\n", __FILE__, __LINE__, __func__, (int)g_wBlockTableIndex); MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]); wTempBlockTableIndex = FTL_Replace_Block_Table(); bt_block_changed = 1; if (BAD_BLOCK == wTempBlockTableIndex) return ERR; g_wBlockTableIndex = wTempBlockTableIndex; g_wBlockTableOffset = 0; /* Block table tag is '00'. Means it's used one */ pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex; return FAIL; } g_wBlockTableOffset = dwIPFPageAddr + 1; #endif return wResult; } /*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&& * Function: FTL_Read_Disturbance * Inputs: block address * Outputs: PASS=0 / FAIL=1 * Description: used to handle read disturbance. Data in block that * reaches its read limit is moved to new block *&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/ int FTL_Read_Disturbance(u32 blk_addr) { int wResult = FAIL; u32 *pbt = (u32 *) g_pBlockTable; u32 dwOldBlockAddr = blk_addr; u32 wBlockNum; u32 i; u32 wLeastReadCounter = 0xFFFF; u32 wLeastReadIndex = BAD_BLOCK; u32 wSpareBlockNum = 0; u32 wTempNode; u32 wReplacedNode; u8 *g_pTempBuf; nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n", __FILE__, __LINE__, __func__); #if CMD_DMA g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx]; cp_back_buf_idx++; if (cp_back_buf_idx > COPY_BACK_BUF_NUM) { printk(KERN_ERR "cp_back_buf_copies overflow! Exit." "Maybe too many pending commands in your CDMA chain.\n"); return FAIL; } #else g_pTempBuf = tmp_buf_read_disturbance; #endif wBlockNum = FTL_Get_Block_Index(blk_addr); do { /* This is a bug.Here 'i' should be logical block number * and start from 1 (0 is reserved for block table). * Have fixed it. - Yunpeng 2008. 12. 19 */ for (i = 1; i < DeviceInfo.wDataBlockNum; i++) { if (IS_SPARE_BLOCK(i)) { u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]); if (g_pReadCounter[wPhysicalIndex - DeviceInfo.wSpectraStartBlock] < wLeastReadCounter) { wLeastReadCounter = g_pReadCounter[wPhysicalIndex - DeviceInfo.wSpectraStartBlock]; wLeastReadIndex = i; } wSpareBlockNum++; } } if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) { wResult = GLOB_FTL_Garbage_Collection(); if (PASS == wResult) continue; else break; } else { wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]); wReplacedNode = (u32)((~SPARE_BLOCK) & pbt[wLeastReadIndex]); #if CMD_DMA pbt[wBlockNum] = wReplacedNode; pbt[wLeastReadIndex] = wTempNode; p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = wBlockNum; p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum]; p_BTableChangesDelta->ValidFields = 0x0C; p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free; g_pBTDelta_Free += sizeof(struct BTableChangesDelta); p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt; p_BTableChangesDelta->BT_Index = wLeastReadIndex; p_BTableChangesDelta->BT_Entry_Value = pbt[wLeastReadIndex]; p_BTableChangesDelta->ValidFields = 0x0C; wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf, dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock, LLD_CMD_FLAG_MODE_CDMA); if (wResult == FAIL) return wResult; ftl_cmd_cnt++; if (wResult != FAIL) { if (FAIL == GLOB_LLD_Write_Page_Main_cdma( g_pTempBuf, pbt[wBlockNum], 0, DeviceInfo.wPagesPerBlock)) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in " "%s, Line %d, Function: %s, " "new Bad Block %d " "generated!\n", __FILE__, __LINE__, __func__, (int)pbt[wBlockNum]); wResult = FAIL; MARK_BLOCK_AS_BAD(pbt[wBlockNum]); } ftl_cmd_cnt++; } #else wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf, dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock); if (wResult == FAIL) return wResult; if (wResult != FAIL) { /* This is a bug. At this time, pbt[wBlockNum] is still the physical address of discard block, and should not be write. Have fixed it as below. -- Yunpeng 2008.12.19 */ wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf, wReplacedNode, 0, DeviceInfo.wPagesPerBlock); if (wResult == FAIL) { nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in " "%s, Line %d, Function: %s, " "new Bad Block %d " "generated!\n", __FILE__, __LINE__, __func__, (int)wReplacedNode); MARK_BLOCK_AS_BAD(wReplacedNode); } else { pbt[wBlockNum] = wReplacedNode; pbt[wLeastReadIndex] = wTempNode; } } if ((wResult == PASS) && (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE)) { g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE; FTL_Write_IN_Progress_Block_Table_Page(); } #endif } } while (wResult != PASS) ; #if CMD_DMA /* ... */ #endif return wResult; }
Art-Chen/android_kernel_samsung_galaxys2plus-common
drivers/staging/spectra/flash.c
C
gpl-2.0
124,089
/* * linux/drivers/video/q40fb.c -- Q40 frame buffer device * * Copyright (C) 2001 * * Richard Zidlicky <rz@linux-m68k.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/uaccess.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/q40_master.h> #include <linux/fb.h> #include <linux/module.h> #include <asm/pgtable.h> #define Q40_PHYS_SCREEN_ADDR 0xFE800000 static struct fb_fix_screeninfo q40fb_fix __devinitdata = { .id = "Q40", .smem_len = 1024*1024, .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .line_length = 1024*2, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo q40fb_var __devinitdata = { .xres = 1024, .yres = 512, .xres_virtual = 1024, .yres_virtual = 512, .bits_per_pixel = 16, .red = {6, 5, 0}, .green = {11, 5, 0}, .blue = {0, 6, 0}, .activate = FB_ACTIVATE_NOW, .height = 230, .width = 300, .vmode = FB_VMODE_NONINTERLACED, }; static int q40fb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { /* * Set a single color register. The values supplied have a 16 bit * magnitude. * Return != 0 for invalid regno. */ if (regno > 255) return 1; red>>=11; green>>=11; blue>>=10; if (regno < 16) { ((u32 *)info->pseudo_palette)[regno] = ((red & 31) <<6) | ((green & 31) << 11) | (blue & 63); } return 0; } static struct fb_ops q40fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = q40fb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int __devinit q40fb_probe(struct platform_device *dev) { struct fb_info *info; if (!MACH_IS_Q40) return -ENXIO; /* mapped in q40/config.c */ q40fb_fix.smem_start = Q40_PHYS_SCREEN_ADDR; info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev); if (!info) return -ENOMEM; info->var = q40fb_var; info->fix = q40fb_fix; info->fbops = &q40fb_ops; info->flags = FBINFO_DEFAULT; /* not as module for now */ info->pseudo_palette = info->par; info->par = NULL; info->screen_base = (char *) q40fb_fix.smem_start; if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { framebuffer_release(info); return -ENOMEM; } master_outb(3, DISPLAY_CONTROL_REG); if (register_framebuffer(info) < 0) { printk(KERN_ERR "Unable to register Q40 frame buffer\n"); fb_dealloc_cmap(&info->cmap); framebuffer_release(info); return -EINVAL; } printk(KERN_INFO "fb%d: Q40 frame buffer alive and kicking !\n", info->node); return 0; } static struct platform_driver q40fb_driver = { .probe = q40fb_probe, .driver = { .name = "q40fb", }, }; static struct platform_device q40fb_device = { .name = "q40fb", }; int __init q40fb_init(void) { int ret = 0; if (fb_get_options("q40fb", NULL)) return -ENODEV; ret = platform_driver_register(&q40fb_driver); if (!ret) { ret = platform_device_register(&q40fb_device); if (ret) platform_driver_unregister(&q40fb_driver); } return ret; } module_init(q40fb_init); MODULE_LICENSE("GPL");
zeroprobe/ZeroMHL-Overclocked-V3
drivers/video/q40fb.c
C
gpl-2.0
3,460
tinymce.PluginManager.add("media",function(a,b){function c(a){return-1!=a.indexOf(".mp3")?"audio/mpeg":-1!=a.indexOf(".wav")?"audio/wav":-1!=a.indexOf(".mp4")?"video/mp4":-1!=a.indexOf(".webm")?"video/webm":-1!=a.indexOf(".ogg")?"video/ogg":-1!=a.indexOf(".swf")?"application/x-shockwave-flash":""}function d(b){var c=a.settings.media_scripts;if(c)for(var d=0;d<c.length;d++)if(-1!==b.indexOf(c[d].filter))return c[d]}function e(){function b(a){var b,c,f,g;b=d.find("#width")[0],c=d.find("#height")[0],f=b.value(),g=c.value(),d.find("#constrain")[0].checked()&&e&&j&&f&&g&&(a.control==b?(g=Math.round(f/e*g),c.value(g)):(f=Math.round(g/j*f),b.value(f))),e=f,j=g}function c(){k=h(this.value()),this.parent().parent().fromJSON(k)}var d,e,j,k,l=[{name:"source1",type:"filepicker",filetype:"media",size:40,autofocus:!0,label:"Source",onchange:function(a){tinymce.each(a.meta,function(a,b){d.find("#"+b).value(a)})}}];a.settings.media_alt_source!==!1&&l.push({name:"source2",type:"filepicker",filetype:"media",size:40,label:"Alternative source"}),a.settings.media_poster!==!1&&l.push({name:"poster",type:"filepicker",filetype:"image",size:40,label:"Poster"}),a.settings.media_dimensions!==!1&&l.push({type:"container",label:"Dimensions",layout:"flex",align:"center",spacing:5,items:[{name:"width",type:"textbox",maxLength:3,size:3,onchange:b},{type:"label",text:"x"},{name:"height",type:"textbox",maxLength:3,size:3,onchange:b},{name:"constrain",type:"checkbox",checked:!0,text:"Constrain proportions"}]}),k=i(a.selection.getNode()),e=k.width,j=k.height;var n={id:"mcemediasource",type:"textbox",flex:1,name:"embed",value:f(),multiline:!0,label:"Source"};n[m]=c,d=a.windowManager.open({title:"Insert/edit video",data:k,bodyType:"tabpanel",body:[{title:"General",type:"form",onShowTab:function(){k=h(this.next().find("#embed").value()),this.fromJSON(k)},items:l},{title:"Embed",type:"panel",layout:"flex",direction:"column",align:"stretch",padding:10,spacing:10,onShowTab:function(){this.find("#embed").value(g(this.parent().toJSON()))},items:[{type:"label",text:"Paste your embed code below:",forId:"mcemediasource"},n]}],onSubmit:function(){var b,c,d,e;for(b=a.dom.select("img[data-mce-object]"),a.insertContent(g(this.toJSON())),c=a.dom.select("img[data-mce-object]"),d=0;d<b.length;d++)for(e=c.length-1;e>=0;e--)b[d]==c[e]&&c.splice(e,1);a.selection.select(c[0]),a.nodeChanged()}})}function f(){var b=a.selection.getNode();return b.getAttribute("data-mce-object")?a.selection.getContent():void 0}function g(e){var f="";if(!e.source1&&(tinymce.extend(e,h(e.embed)),!e.source1))return"";if(e.source2||(e.source2=""),e.poster||(e.poster=""),e.source1=a.convertURL(e.source1,"source"),e.source2=a.convertURL(e.source2,"source"),e.source1mime=c(e.source1),e.source2mime=c(e.source2),e.poster=a.convertURL(e.poster,"poster"),e.flashPlayerUrl=a.convertURL(b+"/moxieplayer.swf","movie"),tinymce.each(l,function(a){var b,c,d;if(b=a.regex.exec(e.source1)){for(d=a.url,c=0;b[c];c++)d=d.replace("$"+c,function(){return b[c]});e.source1=d,e.type=a.type,e.width=e.width||a.w,e.height=e.height||a.h}}),e.embed)f=k(e.embed,e,!0);else{var g=d(e.source1);g&&(e.type="script",e.width=g.width,e.height=g.height),e.width=e.width||300,e.height=e.height||150,tinymce.each(e,function(b,c){e[c]=a.dom.encode(b)}),"iframe"==e.type?f+='<iframe src="'+e.source1+'" width="'+e.width+'" height="'+e.height+'"></iframe>':"application/x-shockwave-flash"==e.source1mime?(f+='<object data="'+e.source1+'" width="'+e.width+'" height="'+e.height+'" type="application/x-shockwave-flash">',e.poster&&(f+='<img src="'+e.poster+'" width="'+e.width+'" height="'+e.height+'" />'),f+="</object>"):-1!=e.source1mime.indexOf("audio")?a.settings.audio_template_callback?f=a.settings.audio_template_callback(e):f+='<audio controls="controls" src="'+e.source1+'">'+(e.source2?'\n<source src="'+e.source2+'"'+(e.source2mime?' type="'+e.source2mime+'"':"")+" />\n":"")+"</audio>":"script"==e.type?f+='<script src="'+e.source1+'"></script>':f=a.settings.video_template_callback?a.settings.video_template_callback(e):'<video width="'+e.width+'" height="'+e.height+'"'+(e.poster?' poster="'+e.poster+'"':"")+' controls="controls">\n<source src="'+e.source1+'"'+(e.source1mime?' type="'+e.source1mime+'"':"")+" />\n"+(e.source2?'<source src="'+e.source2+'"'+(e.source2mime?' type="'+e.source2mime+'"':"")+" />\n":"")+"</video>"}return f}function h(a){var b={};return new tinymce.html.SaxParser({validate:!1,allow_conditional_comments:!0,special:"script,noscript",start:function(a,c){if(b.source1||"param"!=a||(b.source1=c.map.movie),("iframe"==a||"object"==a||"embed"==a||"video"==a||"audio"==a)&&(b.type||(b.type=a),b=tinymce.extend(c.map,b)),"script"==a){var e=d(c.map.src);if(!e)return;b={type:"script",source1:c.map.src,width:e.width,height:e.height}}"source"==a&&(b.source1?b.source2||(b.source2=c.map.src):b.source1=c.map.src),"img"!=a||b.poster||(b.poster=c.map.src)}}).parse(a),b.source1=b.source1||b.src||b.data,b.source2=b.source2||"",b.poster=b.poster||"",b}function i(b){return b.getAttribute("data-mce-object")?h(a.serializer.serialize(b,{selection:!0})):{}}function j(b){if(a.settings.media_filter_html===!1)return b;var c=new tinymce.html.Writer;return new tinymce.html.SaxParser({validate:!1,allow_conditional_comments:!1,special:"script,noscript",comment:function(a){c.comment(a)},cdata:function(a){c.cdata(a)},text:function(a,b){c.text(a,b)},start:function(a,b,d){if("script"!=a&&"noscript"!=a){for(var e=0;e<b.length;e++)if(0===b[e].name.indexOf("on"))return;c.start(a,b,d)}},end:function(a){"script"!=a&&"noscript"!=a&&c.end(a)}},new tinymce.html.Schema({})).parse(b),c.getContent()}function k(a,b,c){function d(a,b){var c,d,e,f;for(c in b)if(e=""+b[c],a.map[c])for(d=a.length;d--;)f=a[d],f.name==c&&(e?(a.map[c]=e,f.value=e):(delete a.map[c],a.splice(d,1)));else e&&(a.push({name:c,value:e}),a.map[c]=e)}var e,f=new tinymce.html.Writer,g=0;return new tinymce.html.SaxParser({validate:!1,allow_conditional_comments:!0,special:"script,noscript",comment:function(a){f.comment(a)},cdata:function(a){f.cdata(a)},text:function(a,b){f.text(a,b)},start:function(a,h,i){switch(a){case"video":case"object":case"embed":case"img":case"iframe":d(h,{width:b.width,height:b.height})}if(c)switch(a){case"video":d(h,{poster:b.poster,src:""}),b.source2&&d(h,{src:""});break;case"iframe":d(h,{src:b.source1});break;case"source":if(g++,2>=g&&(d(h,{src:b["source"+g],type:b["source"+g+"mime"]}),!b["source"+g]))return;break;case"img":if(!b.poster)return;e=!0}f.start(a,h,i)},end:function(a){if("video"==a&&c)for(var h=1;2>=h;h++)if(b["source"+h]){var i=[];i.map={},h>g&&(d(i,{src:b["source"+h],type:b["source"+h+"mime"]}),f.start("source",i,!0))}if(b.poster&&"object"==a&&c&&!e){var j=[];j.map={},d(j,{src:b.poster,width:b.width,height:b.height}),f.start("img",j,!0)}f.end(a)}},new tinymce.html.Schema({})).parse(a),f.getContent()}var l=[{regex:/youtu\.be\/([\w\-.]+)/,type:"iframe",w:425,h:350,url:"//www.youtube.com/embed/$1"},{regex:/youtube\.com(.+)v=([^&]+)/,type:"iframe",w:425,h:350,url:"//www.youtube.com/embed/$2"},{regex:/vimeo\.com\/([0-9]+)/,type:"iframe",w:425,h:350,url:"//player.vimeo.com/video/$1?title=0&byline=0&portrait=0&color=8dc7dc"},{regex:/vimeo\.com\/(.*)\/([0-9]+)/,type:"iframe",w:425,h:350,url:"//player.vimeo.com/video/$2?title=0&amp;byline=0"},{regex:/maps\.google\.([a-z]{2,3})\/maps\/(.+)msid=(.+)/,type:"iframe",w:425,h:350,url:'//maps.google.com/maps/ms?msid=$2&output=embed"'}],m=tinymce.Env.ie&&tinymce.Env.ie<=8?"onChange":"onInput";a.on("ResolveName",function(a){var b;1==a.target.nodeType&&(b=a.target.getAttribute("data-mce-object"))&&(a.name=b)}),a.on("preInit",function(){var b=a.schema.getSpecialElements();tinymce.each("video audio iframe object".split(" "),function(a){b[a]=new RegExp("</"+a+"[^>]*>","gi")});var c=a.schema.getBoolAttrs();tinymce.each("webkitallowfullscreen mozallowfullscreen allowfullscreen".split(" "),function(a){c[a]={}}),a.parser.addNodeFilter("iframe,video,audio,object,embed,script",function(b,c){for(var e,f,g,h,i,j,k,l,m=b.length;m--;)if(f=b[m],f.parent&&("script"!=f.name||(l=d(f.attr("src"))))){for(g=new tinymce.html.Node("img",1),g.shortEnded=!0,l&&(l.width&&f.attr("width",l.width.toString()),l.height&&f.attr("height",l.height.toString())),j=f.attributes,e=j.length;e--;)h=j[e].name,i=j[e].value,"width"!==h&&"height"!==h&&"style"!==h&&(("data"==h||"src"==h)&&(i=a.convertURL(i,h)),g.attr("data-mce-p-"+h,i));k=f.firstChild&&f.firstChild.value,k&&(g.attr("data-mce-html",escape(k)),g.firstChild=null),g.attr({width:f.attr("width")||"300",height:f.attr("height")||("audio"==c?"30":"150"),style:f.attr("style"),src:tinymce.Env.transparentSrc,"data-mce-object":c,"class":"mce-object mce-object-"+c}),f.replace(g)}}),a.serializer.addAttributeFilter("data-mce-object",function(a,b){for(var c,d,e,f,g,h,i,k=a.length;k--;)if(c=a[k],c.parent){for(i=c.attr(b),d=new tinymce.html.Node(i,1),"audio"!=i&&"script"!=i&&d.attr({width:c.attr("width"),height:c.attr("height")}),d.attr({style:c.attr("style")}),f=c.attributes,e=f.length;e--;){var l=f[e].name;0===l.indexOf("data-mce-p-")&&d.attr(l.substr(11),f[e].value)}"script"==i&&d.attr("type","text/javascript"),g=c.attr("data-mce-html"),g&&(h=new tinymce.html.Node("#text",3),h.raw=!0,h.value=j(unescape(g)),d.append(h)),c.replace(d)}})}),a.on("ObjectSelected",function(a){var b=a.target.getAttribute("data-mce-object");("audio"==b||"script"==b)&&a.preventDefault()}),a.on("objectResized",function(a){var b,c=a.target;c.getAttribute("data-mce-object")&&(b=c.getAttribute("data-mce-html"),b&&(b=unescape(b),c.setAttribute("data-mce-html",escape(k(b,{width:a.width,height:a.height})))))}),a.addButton("media",{tooltip:"Insert/edit video",onclick:e,stateSelector:["img[data-mce-object=video]","img[data-mce-object=iframe]"]}),a.addMenuItem("media",{icon:"media",text:"Insert video",onclick:e,context:"insert",prependToContext:!0})});
BrandonThomas84/cos_core
wp-includes/js/tinymce/plugins/media/plugin.min.js
JavaScript
gpl-2.0
9,890
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * identify.c: identify machine by looking up system identifier * * Copyright (C) 1998 Thomas Bogendoerfer * * This code is based on arch/mips/sgi/kernel/system.c, which is * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) */ #include <linux/bug.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <asm/sgialib.h> #include <asm/bootinfo.h> struct smatch { char *arcname; char *liname; int flags; }; static struct smatch mach_table[] = { { .arcname = "SGI-IP22", .liname = "SGI Indy", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP27", .liname = "SGI Origin", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP28", .liname = "SGI IP28", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP30", .liname = "SGI Octane", .flags = PROM_FLAG_ARCS, }, { .arcname = "SGI-IP32", .liname = "SGI O2", .flags = PROM_FLAG_ARCS, }, { .arcname = "Microsoft-Jazz", .liname = "Jazz MIPS_Magnum_4000", .flags = 0, }, { .arcname = "PICA-61", .liname = "Jazz Acer_PICA_61", .flags = 0, }, { .arcname = "RM200PCI", .liname = "SNI RM200_PCI", .flags = PROM_FLAG_DONT_FREE_TEMP, }, { .arcname = "RM200PCI-R5K", .liname = "SNI RM200_PCI-R5K", .flags = PROM_FLAG_DONT_FREE_TEMP, } }; int prom_flags; static struct smatch * __init string_to_mach(const char *s) { int i; for (i = 0; i < ARRAY_SIZE(mach_table); i++) { if (!strcmp(s, mach_table[i].arcname)) return &mach_table[i]; } panic("Yeee, could not determine architecture type <%s>", s); } char *system_type; const char *get_system_type(void) { return system_type; } void __init prom_identify_arch(void) { pcomponent *p; struct smatch *mach; const char *iname; /* * The root component tells us what machine architecture we have here. */ p = ArcGetChild(PROM_NULL_COMPONENT); if (p == NULL) { #ifdef CONFIG_SGI_IP27 /* IP27 PROM misbehaves, seems to not implement ARC GetChild(). So we just assume it's an IP27. */ iname = "SGI-IP27"; #else iname = "Unknown"; #endif } else iname = (char *) (long) p->iname; printk("ARCH: %s\n", iname); mach = string_to_mach(iname); system_type = mach->liname; prom_flags = mach->flags; }
AndroidSymmetry/android_kernel_moto_shamu
arch/mips/fw/arc/identify.c
C
gpl-2.0
2,442
/* * Xilinx XPS PS/2 device driver * * (c) 2005 MontaVista Software, Inc. * (c) 2008 Xilinx, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/serio.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #define DRIVER_NAME "xilinx_ps2" /* Register offsets for the xps2 device */ #define XPS2_SRST_OFFSET 0x00000000 /* Software Reset register */ #define XPS2_STATUS_OFFSET 0x00000004 /* Status register */ #define XPS2_RX_DATA_OFFSET 0x00000008 /* Receive Data register */ #define XPS2_TX_DATA_OFFSET 0x0000000C /* Transmit Data register */ #define XPS2_GIER_OFFSET 0x0000002C /* Global Interrupt Enable reg */ #define XPS2_IPISR_OFFSET 0x00000030 /* Interrupt Status register */ #define XPS2_IPIER_OFFSET 0x00000038 /* Interrupt Enable register */ /* Reset Register Bit Definitions */ #define XPS2_SRST_RESET 0x0000000A /* Software Reset */ /* Status Register Bit Positions */ #define XPS2_STATUS_RX_FULL 0x00000001 /* Receive Full */ #define XPS2_STATUS_TX_FULL 0x00000002 /* Transmit Full */ /* Bit definitions for ISR/IER registers. Both the registers have the same bit * definitions and are only defined once. */ #define XPS2_IPIXR_WDT_TOUT 0x00000001 /* Watchdog Timeout Interrupt */ #define XPS2_IPIXR_TX_NOACK 0x00000002 /* Transmit No ACK Interrupt */ #define XPS2_IPIXR_TX_ACK 0x00000004 /* Transmit ACK (Data) Interrupt */ #define XPS2_IPIXR_RX_OVF 0x00000008 /* Receive Overflow Interrupt */ #define XPS2_IPIXR_RX_ERR 0x00000010 /* Receive Error Interrupt */ #define XPS2_IPIXR_RX_FULL 0x00000020 /* Receive Data Interrupt */ /* Mask for all the Transmit Interrupts */ #define XPS2_IPIXR_TX_ALL (XPS2_IPIXR_TX_NOACK | XPS2_IPIXR_TX_ACK) /* Mask for all the Receive Interrupts */ #define XPS2_IPIXR_RX_ALL (XPS2_IPIXR_RX_OVF | XPS2_IPIXR_RX_ERR | \ XPS2_IPIXR_RX_FULL) /* Mask for all the Interrupts */ #define XPS2_IPIXR_ALL (XPS2_IPIXR_TX_ALL | XPS2_IPIXR_RX_ALL | \ XPS2_IPIXR_WDT_TOUT) /* Global Interrupt Enable mask */ #define XPS2_GIER_GIE_MASK 0x80000000 struct xps2data { int irq; spinlock_t lock; void __iomem *base_address; /* virt. address of control registers */ unsigned int flags; struct serio serio; /* serio */ }; /************************************/ /* XPS PS/2 data transmission calls */ /************************************/ /** * xps2_recv() - attempts to receive a byte from the PS/2 port. * @drvdata: pointer to ps2 device private data structure * @byte: address where the read data will be copied * * If there is any data available in the PS/2 receiver, this functions reads * the data, otherwise it returns error. */ static int xps2_recv(struct xps2data *drvdata, u8 *byte) { u32 sr; int status = -1; /* If there is data available in the PS/2 receiver, read it */ sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET); if (sr & XPS2_STATUS_RX_FULL) { *byte = in_be32(drvdata->base_address + XPS2_RX_DATA_OFFSET); status = 0; } return status; } /*********************/ /* Interrupt handler */ /*********************/ static irqreturn_t xps2_interrupt(int irq, void *dev_id) { struct xps2data *drvdata = dev_id; u32 intr_sr; u8 c; int status; /* Get the PS/2 interrupts and clear them */ intr_sr = in_be32(drvdata->base_address + XPS2_IPISR_OFFSET); out_be32(drvdata->base_address + XPS2_IPISR_OFFSET, intr_sr); /* Check which interrupt is active */ if (intr_sr & XPS2_IPIXR_RX_OVF) dev_warn(drvdata->serio.dev.parent, "receive overrun error\n"); if (intr_sr & XPS2_IPIXR_RX_ERR) drvdata->flags |= SERIO_PARITY; if (intr_sr & (XPS2_IPIXR_TX_NOACK | XPS2_IPIXR_WDT_TOUT)) drvdata->flags |= SERIO_TIMEOUT; if (intr_sr & XPS2_IPIXR_RX_FULL) { status = xps2_recv(drvdata, &c); /* Error, if a byte is not received */ if (status) { dev_err(drvdata->serio.dev.parent, "wrong rcvd byte count (%d)\n", status); } else { serio_interrupt(&drvdata->serio, c, drvdata->flags); drvdata->flags = 0; } } return IRQ_HANDLED; } /*******************/ /* serio callbacks */ /*******************/ /** * sxps2_write() - sends a byte out through the PS/2 port. * @pserio: pointer to the serio structure of the PS/2 port * @c: data that needs to be written to the PS/2 port * * This function checks if the PS/2 transmitter is empty and sends a byte. * Otherwise it returns error. Transmission fails only when nothing is connected * to the PS/2 port. Thats why, we do not try to resend the data in case of a * failure. */ static int sxps2_write(struct serio *pserio, unsigned char c) { struct xps2data *drvdata = pserio->port_data; unsigned long flags; u32 sr; int status = -1; spin_lock_irqsave(&drvdata->lock, flags); /* If the PS/2 transmitter is empty send a byte of data */ sr = in_be32(drvdata->base_address + XPS2_STATUS_OFFSET); if (!(sr & XPS2_STATUS_TX_FULL)) { out_be32(drvdata->base_address + XPS2_TX_DATA_OFFSET, c); status = 0; } spin_unlock_irqrestore(&drvdata->lock, flags); return status; } /** * sxps2_open() - called when a port is opened by the higher layer. * @pserio: pointer to the serio structure of the PS/2 device * * This function requests irq and enables interrupts for the PS/2 device. */ static int sxps2_open(struct serio *pserio) { struct xps2data *drvdata = pserio->port_data; int error; u8 c; error = request_irq(drvdata->irq, &xps2_interrupt, 0, DRIVER_NAME, drvdata); if (error) { dev_err(drvdata->serio.dev.parent, "Couldn't allocate interrupt %d\n", drvdata->irq); return error; } /* start reception by enabling the interrupts */ out_be32(drvdata->base_address + XPS2_GIER_OFFSET, XPS2_GIER_GIE_MASK); out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, XPS2_IPIXR_RX_ALL); (void)xps2_recv(drvdata, &c); return 0; /* success */ } /** * sxps2_close() - frees the interrupt. * @pserio: pointer to the serio structure of the PS/2 device * * This function frees the irq and disables interrupts for the PS/2 device. */ static void sxps2_close(struct serio *pserio) { struct xps2data *drvdata = pserio->port_data; /* Disable the PS2 interrupts */ out_be32(drvdata->base_address + XPS2_GIER_OFFSET, 0x00); out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0x00); free_irq(drvdata->irq, drvdata); } /** * xps2_of_probe - probe method for the PS/2 device. * @of_dev: pointer to OF device structure * @match: pointer to the structure used for matching a device * * This function probes the PS/2 device in the device tree. * It initializes the driver data structure and the hardware. * It returns 0, if the driver is bound to the PS/2 device, or a negative * value if there is an error. */ static int __devinit xps2_of_probe(struct platform_device *ofdev) { struct resource r_irq; /* Interrupt resources */ struct resource r_mem; /* IO mem resources */ struct xps2data *drvdata; struct serio *serio; struct device *dev = &ofdev->dev; resource_size_t remap_size, phys_addr; int error; dev_info(dev, "Device Tree Probing \'%s\'\n", ofdev->dev.of_node->name); /* Get iospace for the device */ error = of_address_to_resource(ofdev->dev.of_node, 0, &r_mem); if (error) { dev_err(dev, "invalid address\n"); return error; } /* Get IRQ for the device */ if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) { dev_err(dev, "no IRQ found\n"); return -ENODEV; } drvdata = kzalloc(sizeof(struct xps2data), GFP_KERNEL); if (!drvdata) { dev_err(dev, "Couldn't allocate device private record\n"); return -ENOMEM; } dev_set_drvdata(dev, drvdata); spin_lock_init(&drvdata->lock); drvdata->irq = r_irq.start; phys_addr = r_mem.start; remap_size = resource_size(&r_mem); if (!request_mem_region(phys_addr, remap_size, DRIVER_NAME)) { dev_err(dev, "Couldn't lock memory region at 0x%08llX\n", (unsigned long long)phys_addr); error = -EBUSY; goto failed1; } /* Fill in configuration data and add them to the list */ drvdata->base_address = ioremap(phys_addr, remap_size); if (drvdata->base_address == NULL) { dev_err(dev, "Couldn't ioremap memory at 0x%08llX\n", (unsigned long long)phys_addr); error = -EFAULT; goto failed2; } /* Disable all the interrupts, just in case */ out_be32(drvdata->base_address + XPS2_IPIER_OFFSET, 0); /* Reset the PS2 device and abort any current transaction, to make sure * we have the PS2 in a good state */ out_be32(drvdata->base_address + XPS2_SRST_OFFSET, XPS2_SRST_RESET); dev_info(dev, "Xilinx PS2 at 0x%08llX mapped to 0x%p, irq=%d\n", (unsigned long long)phys_addr, drvdata->base_address, drvdata->irq); serio = &drvdata->serio; serio->id.type = SERIO_8042; serio->write = sxps2_write; serio->open = sxps2_open; serio->close = sxps2_close; serio->port_data = drvdata; serio->dev.parent = dev; snprintf(serio->name, sizeof(serio->name), "Xilinx XPS PS/2 at %08llX", (unsigned long long)phys_addr); snprintf(serio->phys, sizeof(serio->phys), "xilinxps2/serio at %08llX", (unsigned long long)phys_addr); serio_register_port(serio); return 0; /* success */ failed2: release_mem_region(phys_addr, remap_size); failed1: kfree(drvdata); dev_set_drvdata(dev, NULL); return error; } /** * xps2_of_remove - unbinds the driver from the PS/2 device. * @of_dev: pointer to OF device structure * * This function is called if a device is physically removed from the system or * if the driver module is being unloaded. It frees any resources allocated to * the device. */ static int __devexit xps2_of_remove(struct platform_device *of_dev) { struct device *dev = &of_dev->dev; struct xps2data *drvdata = dev_get_drvdata(dev); struct resource r_mem; /* IO mem resources */ serio_unregister_port(&drvdata->serio); iounmap(drvdata->base_address); /* Get iospace of the device */ if (of_address_to_resource(of_dev->dev.of_node, 0, &r_mem)) dev_err(dev, "invalid address\n"); else release_mem_region(r_mem.start, resource_size(&r_mem)); kfree(drvdata); dev_set_drvdata(dev, NULL); return 0; } /* Match table for of_platform binding */ static const struct of_device_id xps2_of_match[] __devinitconst = { { .compatible = "xlnx,xps-ps2-1.00.a", }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xps2_of_match); static struct platform_driver xps2_of_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = xps2_of_match, }, .probe = xps2_of_probe, .remove = __devexit_p(xps2_of_remove), }; module_platform_driver(xps2_of_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx XPS PS/2 driver"); MODULE_LICENSE("GPL");
Joshndroid/kernel_samsung_lt03wifi
drivers/input/serio/xilinx_ps2.c
C
gpl-2.0
11,197
/* * Wireless Host Controller (WHC) qset management. * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/uwb/umc.h> #include <linux/usb.h> #include "../../wusbcore/wusbhc.h" #include "whcd.h" struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) { struct whc_qset *qset; dma_addr_t dma; qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma); if (qset == NULL) return NULL; memset(qset, 0, sizeof(struct whc_qset)); qset->qset_dma = dma; qset->whc = whc; INIT_LIST_HEAD(&qset->list_node); INIT_LIST_HEAD(&qset->stds); return qset; } /** * qset_fill_qh - fill the static endpoint state in a qset's QHead * @qset: the qset whose QH needs initializing with static endpoint * state * @urb: an urb for a transfer to this endpoint */ static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb) { struct usb_device *usb_dev = urb->dev; struct wusb_dev *wusb_dev = usb_dev->wusb_dev; struct usb_wireless_ep_comp_descriptor *epcd; bool is_out; uint8_t phy_rate; is_out = usb_pipeout(urb->pipe); qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize); epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; if (epcd) { qset->max_seq = epcd->bMaxSequence; qset->max_burst = epcd->bMaxBurst; } else { qset->max_seq = 2; qset->max_burst = 1; } /* * Initial PHY rate is 53.3 Mbit/s for control endpoints or * the maximum supported by the device for other endpoints * (unless limited by the user). */ if (usb_pipecontrol(urb->pipe)) phy_rate = UWB_PHY_RATE_53; else { uint16_t phy_rates; phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates); phy_rate = fls(phy_rates) - 1; if (phy_rate > whc->wusbhc.phy_rate) phy_rate = whc->wusbhc.phy_rate; } qset->qh.info1 = cpu_to_le32( QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) | usb_pipe_to_qh_type(urb->pipe) | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) | QH_INFO1_MAX_PKT_LEN(qset->max_packet) ); qset->qh.info2 = cpu_to_le32( QH_INFO2_BURST(qset->max_burst) | QH_INFO2_DBP(0) | QH_INFO2_MAX_COUNT(3) | QH_INFO2_MAX_RETRY(3) | QH_INFO2_MAX_SEQ(qset->max_seq - 1) ); /* FIXME: where can we obtain these Tx parameters from? Why * doesn't the chip know what Tx power to use? It knows the Rx * strength and can presumably guess the Tx power required * from that? */ qset->qh.info3 = cpu_to_le32( QH_INFO3_TX_RATE(phy_rate) | QH_INFO3_TX_PWR(0) /* 0 == max power */ ); qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); } /** * qset_clear - clear fields in a qset so it may be reinserted into a * schedule. * * The sequence number and current window are not cleared (see * qset_reset()). */ void qset_clear(struct whc *whc, struct whc_qset *qset) { qset->td_start = qset->td_end = qset->ntds = 0; qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T); qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK; qset->qh.err_count = 0; qset->qh.scratch[0] = 0; qset->qh.scratch[1] = 0; qset->qh.scratch[2] = 0; memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay)); init_completion(&qset->remove_complete); } /** * qset_reset - reset endpoint state in a qset. * * Clears the sequence number and current window. This qset must not * be in the ASL or PZL. */ void qset_reset(struct whc *whc, struct whc_qset *qset) { qset->reset = 0; qset->qh.status &= ~QH_STATUS_SEQ_MASK; qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1); } /** * get_qset - get the qset for an async endpoint * * A new qset is created if one does not already exist. */ struct whc_qset *get_qset(struct whc *whc, struct urb *urb, gfp_t mem_flags) { struct whc_qset *qset; qset = urb->ep->hcpriv; if (qset == NULL) { qset = qset_alloc(whc, mem_flags); if (qset == NULL) return NULL; qset->ep = urb->ep; urb->ep->hcpriv = qset; qset_fill_qh(whc, qset, urb); } return qset; } void qset_remove_complete(struct whc *whc, struct whc_qset *qset) { qset->remove = 0; list_del_init(&qset->list_node); complete(&qset->remove_complete); } /** * qset_add_qtds - add qTDs for an URB to a qset * * Returns true if the list (ASL/PZL) must be updated because (for a * WHCI 0.95 controller) an activated qTD was pointed to be iCur. */ enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset) { struct whc_std *std; enum whc_update update = 0; list_for_each_entry(std, &qset->stds, list_node) { struct whc_qtd *qtd; uint32_t status; if (qset->ntds >= WHCI_QSET_TD_MAX || (qset->pause_after_urb && std->urb != qset->pause_after_urb)) break; if (std->qtd) continue; /* already has a qTD */ qtd = std->qtd = &qset->qtd[qset->td_end]; /* Fill in setup bytes for control transfers. */ if (usb_pipecontrol(std->urb->pipe)) memcpy(qtd->setup, std->urb->setup_packet, 8); status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len); if (whc_std_last(std) && usb_pipeout(std->urb->pipe)) status |= QTD_STS_LAST_PKT; /* * For an IN transfer the iAlt field should be set so * the h/w will automatically advance to the next * transfer. However, if there are 8 or more TDs * remaining in this transfer then iAlt cannot be set * as it could point to somewhere in this transfer. */ if (std->ntds_remaining < WHCI_QSET_TD_MAX) { int ialt; ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX; status |= QTD_STS_IALT(ialt); } else if (usb_pipein(std->urb->pipe)) qset->pause_after_urb = std->urb; if (std->num_pointers) qtd->options = cpu_to_le32(QTD_OPT_IOC); else qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL); qtd->page_list_ptr = cpu_to_le64(std->dma_addr); qtd->status = cpu_to_le32(status); if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end) update = WHC_UPDATE_UPDATED; if (++qset->td_end >= WHCI_QSET_TD_MAX) qset->td_end = 0; qset->ntds++; } return update; } /** * qset_remove_qtd - remove the first qTD from a qset. * * The qTD might be still active (if it's part of a IN URB that * resulted in a short read) so ensure it's deactivated. */ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset) { qset->qtd[qset->td_start].status = 0; if (++qset->td_start >= WHCI_QSET_TD_MAX) qset->td_start = 0; qset->ntds--; } static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std) { struct scatterlist *sg; void *bounce; size_t remaining, offset; bounce = std->bounce_buf; remaining = std->len; sg = std->bounce_sg; offset = std->bounce_offset; while (remaining) { size_t len; len = min(sg->length - offset, remaining); memcpy(sg_virt(sg) + offset, bounce, len); bounce += len; remaining -= len; offset += len; if (offset >= sg->length) { sg = sg_next(sg); offset = 0; } } } /** * qset_free_std - remove an sTD and free it. * @whc: the WHCI host controller * @std: the sTD to remove and free. */ void qset_free_std(struct whc *whc, struct whc_std *std) { list_del(&std->list_node); if (std->bounce_buf) { bool is_out = usb_pipeout(std->urb->pipe); dma_addr_t dma_addr; if (std->num_pointers) dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr); else dma_addr = std->dma_addr; dma_unmap_single(whc->wusbhc.dev, dma_addr, std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (!is_out) qset_copy_bounce_to_sg(whc, std); kfree(std->bounce_buf); } if (std->pl_virt) { if (std->dma_addr) dma_unmap_single(whc->wusbhc.dev, std->dma_addr, std->num_pointers * sizeof(struct whc_page_list_entry), DMA_TO_DEVICE); kfree(std->pl_virt); std->pl_virt = NULL; } kfree(std); } /** * qset_remove_qtds - remove an URB's qTDs (and sTDs). */ static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset, struct urb *urb) { struct whc_std *std, *t; list_for_each_entry_safe(std, t, &qset->stds, list_node) { if (std->urb != urb) break; if (std->qtd != NULL) qset_remove_qtd(whc, qset); qset_free_std(whc, std); } } /** * qset_free_stds - free any remaining sTDs for an URB. */ static void qset_free_stds(struct whc_qset *qset, struct urb *urb) { struct whc_std *std, *t; list_for_each_entry_safe(std, t, &qset->stds, list_node) { if (std->urb == urb) qset_free_std(qset->whc, std); } } static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags) { dma_addr_t dma_addr = std->dma_addr; dma_addr_t sp, ep; size_t pl_len; int p; /* Short buffers don't need a page list. */ if (std->len <= WHCI_PAGE_SIZE) { std->num_pointers = 0; return 0; } sp = dma_addr & ~(WHCI_PAGE_SIZE-1); ep = dma_addr + std->len; std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); std->pl_virt = kmalloc(pl_len, mem_flags); if (std->pl_virt == NULL) return -ENOMEM; std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); for (p = 0; p < std->num_pointers; p++) { std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); } return 0; } /** * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system. */ static void urb_dequeue_work(struct work_struct *work) { struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work); struct whc_qset *qset = wurb->qset; struct whc *whc = qset->whc; unsigned long flags; if (wurb->is_async == true) asl_update(whc, WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB | WUSBCMD_ASYNC_QSET_RM); else pzl_update(whc, WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB | WUSBCMD_PERIODIC_QSET_RM); spin_lock_irqsave(&whc->lock, flags); qset_remove_urb(whc, qset, wurb->urb, wurb->status); spin_unlock_irqrestore(&whc->lock, flags); } static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags) { struct whc_std *std; std = kzalloc(sizeof(struct whc_std), mem_flags); if (std == NULL) return NULL; std->urb = urb; std->qtd = NULL; INIT_LIST_HEAD(&std->list_node); list_add_tail(&std->list_node, &qset->stds); return std; } static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags) { size_t remaining; struct scatterlist *sg; int i; int ntds = 0; struct whc_std *std = NULL; struct whc_page_list_entry *entry; dma_addr_t prev_end = 0; size_t pl_len; int p = 0; remaining = urb->transfer_buffer_length; for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { dma_addr_t dma_addr; size_t dma_remaining; dma_addr_t sp, ep; int num_pointers; if (remaining == 0) { break; } dma_addr = sg_dma_address(sg); dma_remaining = min_t(size_t, sg_dma_len(sg), remaining); while (dma_remaining) { size_t dma_len; /* * We can use the previous std (if it exists) provided that: * - the previous one ended on a page boundary. * - the current one begins on a page boundary. * - the previous one isn't full. * * If a new std is needed but the previous one * was not a whole number of packets then this * sg list cannot be mapped onto multiple * qTDs. Return an error and let the caller * sort it out. */ if (!std || (prev_end & (WHCI_PAGE_SIZE-1)) || (dma_addr & (WHCI_PAGE_SIZE-1)) || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) { if (std && std->len % qset->max_packet != 0) return -EINVAL; std = qset_new_std(whc, qset, urb, mem_flags); if (std == NULL) { return -ENOMEM; } ntds++; p = 0; } dma_len = dma_remaining; /* * If the remainder of this element doesn't * fit in a single qTD, limit the qTD to a * whole number of packets. This allows the * remainder to go into the next qTD. */ if (std->len + dma_len > QTD_MAX_XFER_SIZE) { dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet) * qset->max_packet - std->len; } std->len += dma_len; std->ntds_remaining = -1; /* filled in later */ sp = dma_addr & ~(WHCI_PAGE_SIZE-1); ep = dma_addr + dma_len; num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); std->num_pointers += num_pointers; pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags); if (std->pl_virt == NULL) { return -ENOMEM; } for (;p < std->num_pointers; p++, entry++) { std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1); } prev_end = dma_addr = ep; dma_remaining -= dma_len; remaining -= dma_len; } } /* Now the number of stds is know, go back and fill in std->ntds_remaining. */ list_for_each_entry(std, &qset->stds, list_node) { if (std->ntds_remaining == -1) { pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); std->ntds_remaining = ntds--; std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE); } } return 0; } /** * qset_add_urb_sg_linearize - add an urb with sg list, copying the data * * If the URB contains an sg list whose elements cannot be directly * mapped to qTDs then the data must be transferred via bounce * buffers. */ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags) { bool is_out = usb_pipeout(urb->pipe); size_t max_std_len; size_t remaining; int ntds = 0; struct whc_std *std = NULL; void *bounce = NULL; struct scatterlist *sg; int i; /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */ max_std_len = qset->max_burst * qset->max_packet; remaining = urb->transfer_buffer_length; for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) { size_t len; size_t sg_remaining; void *orig; if (remaining == 0) { break; } sg_remaining = min_t(size_t, remaining, sg->length); orig = sg_virt(sg); while (sg_remaining) { if (!std || std->len == max_std_len) { std = qset_new_std(whc, qset, urb, mem_flags); if (std == NULL) return -ENOMEM; std->bounce_buf = kmalloc(max_std_len, mem_flags); if (std->bounce_buf == NULL) return -ENOMEM; std->bounce_sg = sg; std->bounce_offset = orig - sg_virt(sg); bounce = std->bounce_buf; ntds++; } len = min(sg_remaining, max_std_len - std->len); if (is_out) memcpy(bounce, orig, len); std->len += len; std->ntds_remaining = -1; /* filled in later */ bounce += len; orig += len; sg_remaining -= len; remaining -= len; } } /* * For each of the new sTDs, map the bounce buffers, create * page lists (if necessary), and fill in std->ntds_remaining. */ list_for_each_entry(std, &qset->stds, list_node) { if (std->ntds_remaining != -1) continue; std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (qset_fill_page_list(whc, std, mem_flags) < 0) return -ENOMEM; std->ntds_remaining = ntds--; } return 0; } /** * qset_add_urb - add an urb to the qset's queue. * * The URB is chopped into sTDs, one for each qTD that will required. * At least one qTD (and sTD) is required even if the transfer has no * data (e.g., for some control transfers). */ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, gfp_t mem_flags) { struct whc_urb *wurb; int remaining = urb->transfer_buffer_length; u64 transfer_dma = urb->transfer_dma; int ntds_remaining; int ret; wurb = kzalloc(sizeof(struct whc_urb), mem_flags); if (wurb == NULL) goto err_no_mem; urb->hcpriv = wurb; wurb->qset = qset; wurb->urb = urb; INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); if (urb->num_sgs) { ret = qset_add_urb_sg(whc, qset, urb, mem_flags); if (ret == -EINVAL) { qset_free_stds(qset, urb); ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags); } if (ret < 0) goto err_no_mem; return 0; } ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE); if (ntds_remaining == 0) ntds_remaining = 1; while (ntds_remaining) { struct whc_std *std; size_t std_len; std_len = remaining; if (std_len > QTD_MAX_XFER_SIZE) std_len = QTD_MAX_XFER_SIZE; std = qset_new_std(whc, qset, urb, mem_flags); if (std == NULL) goto err_no_mem; std->dma_addr = transfer_dma; std->len = std_len; std->ntds_remaining = ntds_remaining; if (qset_fill_page_list(whc, std, mem_flags) < 0) goto err_no_mem; ntds_remaining--; remaining -= std_len; transfer_dma += std_len; } return 0; err_no_mem: qset_free_stds(qset, urb); return -ENOMEM; } /** * qset_remove_urb - remove an URB from the urb queue. * * The URB is returned to the USB subsystem. */ void qset_remove_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, int status) { struct wusbhc *wusbhc = &whc->wusbhc; struct whc_urb *wurb = urb->hcpriv; usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb); /* Drop the lock as urb->complete() may enqueue another urb. */ spin_unlock(&whc->lock); wusbhc_giveback_urb(wusbhc, urb, status); spin_lock(&whc->lock); kfree(wurb); } /** * get_urb_status_from_qtd - get the completed urb status from qTD status * @urb: completed urb * @status: qTD status */ static int get_urb_status_from_qtd(struct urb *urb, u32 status) { if (status & QTD_STS_HALTED) { if (status & QTD_STS_DBE) return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM; else if (status & QTD_STS_BABBLE) return -EOVERFLOW; else if (status & QTD_STS_RCE) return -ETIME; return -EPIPE; } if (usb_pipein(urb->pipe) && (urb->transfer_flags & URB_SHORT_NOT_OK) && urb->actual_length < urb->transfer_buffer_length) return -EREMOTEIO; return 0; } /** * process_inactive_qtd - process an inactive (but not halted) qTD. * * Update the urb with the transfer bytes from the qTD, if the urb is * completely transferred or (in the case of an IN only) the LPF is * set, then the transfer is complete and the urb should be returned * to the system. */ void process_inactive_qtd(struct whc *whc, struct whc_qset *qset, struct whc_qtd *qtd) { struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); struct urb *urb = std->urb; uint32_t status; bool complete; status = le32_to_cpu(qtd->status); urb->actual_length += std->len - QTD_STS_TO_LEN(status); if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT)) complete = true; else complete = whc_std_last(std); qset_remove_qtd(whc, qset); qset_free_std(whc, std); /* * Transfers for this URB are complete? Then return it to the * USB subsystem. */ if (complete) { qset_remove_qtds(whc, qset, urb); qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status)); /* * If iAlt isn't valid then the hardware didn't * advance iCur. Adjust the start and end pointers to * match iCur. */ if (!(status & QTD_STS_IALT_VALID)) qset->td_start = qset->td_end = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status)); qset->pause_after_urb = NULL; } } /** * process_halted_qtd - process a qset with a halted qtd * * Remove all the qTDs for the failed URB and return the failed URB to * the USB subsystem. Then remove all other qTDs so the qset can be * removed. * * FIXME: this is the point where rate adaptation can be done. If a * transfer failed because it exceeded the maximum number of retries * then it could be reactivated with a slower rate without having to * remove the qset. */ void process_halted_qtd(struct whc *whc, struct whc_qset *qset, struct whc_qtd *qtd) { struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node); struct urb *urb = std->urb; int urb_status; urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status)); qset_remove_qtds(whc, qset, urb); qset_remove_urb(whc, qset, urb, urb_status); list_for_each_entry(std, &qset->stds, list_node) { if (qset->ntds == 0) break; qset_remove_qtd(whc, qset); std->qtd = NULL; } qset->remove = 1; } void qset_free(struct whc *whc, struct whc_qset *qset) { dma_pool_free(whc->qset_pool, qset, qset->qset_dma); } /** * qset_delete - wait for a qset to be unused, then free it. */ void qset_delete(struct whc *whc, struct whc_qset *qset) { wait_for_completion(&qset->remove_complete); qset_free(whc, qset); }
Altaf-Mahdi/flo
drivers/usb/host/whci/qset.c
C
gpl-2.0
21,268
/* * Microblaze support for cache consistent memory. * Copyright (C) 2010 Michal Simek <monstr@monstr.eu> * Copyright (C) 2010 PetaLogix * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au> * * Based on PowerPC version derived from arch/arm/mm/consistent.c * Copyright (C) 2001 Dan Malek (dmalek@jlc.net) * Copyright (C) 2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/gfp.h> #include <asm/pgalloc.h> #include <linux/io.h> #include <linux/hardirq.h> #include <asm/mmu_context.h> #include <asm/mmu.h> #include <linux/uaccess.h> #include <asm/pgtable.h> #include <asm/cpuinfo.h> #include <asm/tlbflush.h> #ifndef CONFIG_MMU /* I have to use dcache values because I can't relate on ram size */ # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) #endif /* * Consistent memory allocators. Used for DMA devices that want to * share uncached memory with the processor core. * My crufty no-MMU approach is simple. In the HW platform we can optionally * mirror the DDR up above the processor cacheable region. So, memory accessed * in this mirror region will not be cached. It's alloced from the same * pool as normal memory, but the handle we return is shifted up into the * uncached region. This will no doubt cause big problems if memory allocated * here is not also freed properly. -- JW */ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle) { unsigned long order, vaddr; void *ret; unsigned int i, err = 0; struct page *page, *end; #ifdef CONFIG_MMU phys_addr_t pa; struct vm_struct *area; unsigned long va; #endif if (in_interrupt()) BUG(); /* Only allocate page size areas. */ size = PAGE_ALIGN(size); order = get_order(size); vaddr = __get_free_pages(gfp, order); if (!vaddr) return NULL; /* * we need to ensure that there are no cachelines in use, * or worse dirty in this area. */ flush_dcache_range(virt_to_phys((void *)vaddr), virt_to_phys((void *)vaddr) + size); #ifndef CONFIG_MMU ret = (void *)vaddr; /* * Here's the magic! Note if the uncached shadow is not implemented, * it's up to the calling code to also test that condition and make * other arranegments, such as manually flushing the cache and so on. */ # ifdef CONFIG_XILINX_UNCACHED_SHADOW ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); # endif if ((unsigned int)ret > cpuinfo.dcache_base && (unsigned int)ret < cpuinfo.dcache_high) printk(KERN_WARNING "ERROR: Your cache coherent area is CACHED!!!\n"); /* dma_handle is same as physical (shadowed) address */ *dma_handle = (dma_addr_t)ret; #else /* Allocate some common virtual space to map the new pages. */ area = get_vm_area(size, VM_ALLOC); if (!area) { free_pages(vaddr, order); return NULL; } va = (unsigned long) area->addr; ret = (void *)va; /* This gives us the real physical address of the first page. */ *dma_handle = pa = virt_to_bus((void *)vaddr); #endif /* * free wasted pages. We skip the first page since we know * that it will have count = 1 and won't require freeing. * We also mark the pages in use as reserved so that * remap_page_range works. */ page = virt_to_page(vaddr); end = page + (1 << order); split_page(page, order); for (i = 0; i < size && err == 0; i += PAGE_SIZE) { #ifdef CONFIG_MMU /* MS: This is the whole magic - use cache inhibit pages */ err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); #endif SetPageReserved(page); page++; } /* Free the otherwise unused pages. */ while (page < end) { __free_page(page); page++; } if (err) { free_pages(vaddr, order); return NULL; } return ret; } EXPORT_SYMBOL(consistent_alloc); /* * free page(s) as defined by the above mapping. */ void consistent_free(size_t size, void *vaddr) { struct page *page; if (in_interrupt()) BUG(); size = PAGE_ALIGN(size); #ifndef CONFIG_MMU /* Clear SHADOW_MASK bit in address, and free as per usual */ # ifdef CONFIG_XILINX_UNCACHED_SHADOW vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); # endif page = virt_to_page(vaddr); do { ClearPageReserved(page); __free_page(page); page++; } while (size -= PAGE_SIZE); #else do { pte_t *ptep; unsigned long pfn; ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( (unsigned int)vaddr), (unsigned int)vaddr), (unsigned int)vaddr); if (!pte_none(*ptep) && pte_present(*ptep)) { pfn = pte_pfn(*ptep); pte_clear(&init_mm, (unsigned int)vaddr, ptep); if (pfn_valid(pfn)) { page = pfn_to_page(pfn); ClearPageReserved(page); __free_page(page); } } vaddr += PAGE_SIZE; } while (size -= PAGE_SIZE); /* flush tlb */ flush_tlb_all(); #endif } EXPORT_SYMBOL(consistent_free); /* * make an area consistent. */ void consistent_sync(void *vaddr, size_t size, int direction) { unsigned long start; unsigned long end; start = (unsigned long)vaddr; /* Convert start address back down to unshadowed memory region */ #ifdef CONFIG_XILINX_UNCACHED_SHADOW start &= ~UNCACHED_SHADOW_MASK; #endif end = start + size; switch (direction) { case PCI_DMA_NONE: BUG(); case PCI_DMA_FROMDEVICE: /* invalidate only */ invalidate_dcache_range(start, end); break; case PCI_DMA_TODEVICE: /* writeback only */ flush_dcache_range(start, end); break; case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */ flush_dcache_range(start, end); break; } } EXPORT_SYMBOL(consistent_sync); /* * consistent_sync_page makes memory consistent. identical * to consistent_sync, but takes a struct page instead of a * virtual address */ void consistent_sync_page(struct page *page, unsigned long offset, size_t size, int direction) { unsigned long start = (unsigned long)page_address(page) + offset; consistent_sync((void *)start, size, direction); } EXPORT_SYMBOL(consistent_sync_page);
MassStash/htc_m8wl_kernel_sense_4.4.4
arch/microblaze/mm/consistent.c
C
gpl-2.0
6,521
/* mii.c: MII interface library Maintained by Jeff Garzik <jgarzik@pobox.com> Copyright 2001,2002 Jeff Garzik Various code came from myson803.c and other files by Donald Becker. Copyright: Written 1998-2002 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> static u32 mii_get_an(struct mii_if_info *mii, u16 addr) { int advert; advert = mii->mdio_read(mii->dev, mii->phy_id, addr); return mii_lpa_to_ethtool_lpa_t(advert); } /** * mii_ethtool_gset - get settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * The @ecmd parameter is expected to have been cleared before calling * mii_ethtool_gset(). * * Returns 0 for success, negative on error. */ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; u32 nego; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); if (mii->supports_gmii) ecmd->supported |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; /* only supports twisted-pair */ ecmd->port = PORT_MII; /* only supports internal transceiver */ ecmd->transceiver = XCVR_INTERNAL; /* this isn't fully supported at higher layers */ ecmd->phy_address = mii->phy_id; ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); if (mii->supports_gmii) { ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); } if (bmcr & BMCR_ANENABLE) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); if (mii->supports_gmii) ecmd->advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000); if (bmsr & BMSR_ANEGCOMPLETE) { ecmd->lp_advertising = mii_get_an(mii, MII_LPA); ecmd->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(stat1000); } else { ecmd->lp_advertising = 0; } nego = ecmd->advertising & ecmd->lp_advertising; if (nego & (ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_1000); ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); } else if (nego & (ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half)) { ethtool_cmd_speed_set(ecmd, SPEED_100); ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); } else { ethtool_cmd_speed_set(ecmd, SPEED_10); ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); } } else { ecmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED1000 && (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } mii->full_duplex = ecmd->duplex; /* ignore maxtxpkt, maxrxpkt for now */ return 0; } /** * mii_ethtool_sset - set settings that are specified in @ecmd * @mii: MII interface * @ecmd: requested ethtool_cmd * * Returns 0 for success, negative on error. */ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) { struct net_device *dev = mii->dev; u32 speed = ethtool_cmd_speed(ecmd); if (speed != SPEED_10 && speed != SPEED_100 && speed != SPEED_1000) return -EINVAL; if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) return -EINVAL; if (ecmd->port != PORT_MII) return -EINVAL; if (ecmd->transceiver != XCVR_INTERNAL) return -EINVAL; if (ecmd->phy_address != mii->phy_id) return -EINVAL; if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) return -EINVAL; if ((speed == SPEED_1000) && (!mii->supports_gmii)) return -EINVAL; /* ignore supported, maxtxpkt, maxrxpkt */ if (ecmd->autoneg == AUTONEG_ENABLE) { u32 bmcr, advert, tmp; u32 advert2 = 0, tmp2 = 0; if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) == 0) return -EINVAL; /* advertise only what has been requested */ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); if (mii->supports_gmii) { advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); } tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); if (mii->supports_gmii) tmp2 |= ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); if (advert != tmp) { mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); mii->advertising = tmp; } if ((mii->supports_gmii) && (advert2 != tmp2)) mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); /* turn on autonegotiation, and force a renegotiate */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); mii->force_media = 0; } else { u32 bmcr, tmp; /* turn off auto negotiation, set speed and duplexity */ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_FULLDPLX); if (speed == SPEED_1000) tmp |= BMCR_SPEED1000; else if (speed == SPEED_100) tmp |= BMCR_SPEED100; if (ecmd->duplex == DUPLEX_FULL) { tmp |= BMCR_FULLDPLX; mii->full_duplex = 1; } else mii->full_duplex = 0; if (bmcr != tmp) mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); mii->force_media = 1; } return 0; } /** * mii_check_gmii_support - check if the MII supports Gb interfaces * @mii: the MII interface */ int mii_check_gmii_support(struct mii_if_info *mii) { int reg; reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (reg & BMSR_ESTATEN) { reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) return 1; } return 0; } /** * mii_link_ok - is link status up/ok * @mii: the MII interface * * Returns 1 if the MII reports link status up/ok, 0 otherwise. */ int mii_link_ok (struct mii_if_info *mii) { /* first, a dummy read, needed to latch some MII phys */ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) return 1; return 0; } /** * mii_nway_restart - restart NWay (autonegotiation) for this interface * @mii: the MII interface * * Returns 0 on success, negative on error. */ int mii_nway_restart (struct mii_if_info *mii) { int bmcr; int r = -EINVAL; /* if autoneg is off, it's an error */ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); if (bmcr & BMCR_ANENABLE) { bmcr |= BMCR_ANRESTART; mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); r = 0; } return r; } /** * mii_check_link - check MII link status * @mii: MII interface * * If the link status changed (previous != current), call * netif_carrier_on() if current link status is Up or call * netif_carrier_off() if current link status is Down. */ void mii_check_link (struct mii_if_info *mii) { int cur_link = mii_link_ok(mii); int prev_link = netif_carrier_ok(mii->dev); if (cur_link && !prev_link) netif_carrier_on(mii->dev); else if (prev_link && !cur_link) netif_carrier_off(mii->dev); } /** * mii_check_media - check the MII interface for a duplex change * @mii: the MII interface * @ok_to_print: OK to print link up/down messages * @init_media: OK to save duplex mode in @mii * * Returns 1 if the duplex mode changed, 0 if not. * If the media type is forced, always returns 0. */ unsigned int mii_check_media (struct mii_if_info *mii, unsigned int ok_to_print, unsigned int init_media) { unsigned int old_carrier, new_carrier; int advertise, lpa, media, duplex; int lpa2 = 0; /* if forced media, go no further */ if (mii->force_media) return 0; /* duplex did not change */ /* check current and old link status */ old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; new_carrier = (unsigned int) mii_link_ok(mii); /* if carrier state did not change, this is a "bounce", * just exit as everything is already set correctly */ if ((!init_media) && (old_carrier == new_carrier)) return 0; /* duplex did not change */ /* no carrier, nothing much to do */ if (!new_carrier) { netif_carrier_off(mii->dev); if (ok_to_print) netdev_info(mii->dev, "link down\n"); return 0; /* duplex did not change */ } /* * we have carrier, see who's on the other end */ netif_carrier_on(mii->dev); /* get MII advertise and LPA values */ if ((!init_media) && (mii->advertising)) advertise = mii->advertising; else { advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); mii->advertising = advertise; } lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); if (mii->supports_gmii) lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); /* figure out media and duplex from advertise and LPA values */ media = mii_nway_result(lpa & advertise); duplex = (media & ADVERTISE_FULL) ? 1 : 0; if (lpa2 & LPA_1000FULL) duplex = 1; if (ok_to_print) netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? 100 : 10, duplex ? "full" : "half", lpa); if ((init_media) || (mii->full_duplex != duplex)) { mii->full_duplex = duplex; return 1; /* duplex changed */ } return 0; /* duplex did not change */ } /** * generic_mii_ioctl - main MII ioctl interface * @mii_if: the MII interface * @mii_data: MII ioctl data structure * @cmd: MII ioctl command * @duplex_chg_out: pointer to @duplex_changed status if there was no * ioctl error * * Returns 0 on success, negative on error. */ int generic_mii_ioctl(struct mii_if_info *mii_if, struct mii_ioctl_data *mii_data, int cmd, unsigned int *duplex_chg_out) { int rc = 0; unsigned int duplex_changed = 0; if (duplex_chg_out) *duplex_chg_out = 0; mii_data->phy_id &= mii_if->phy_id_mask; mii_data->reg_num &= mii_if->reg_num_mask; switch(cmd) { case SIOCGMIIPHY: mii_data->phy_id = mii_if->phy_id; /* fall through */ case SIOCGMIIREG: mii_data->val_out = mii_if->mdio_read(mii_if->dev, mii_data->phy_id, mii_data->reg_num); break; case SIOCSMIIREG: { u16 val = mii_data->val_in; if (mii_data->phy_id == mii_if->phy_id) { switch(mii_data->reg_num) { case MII_BMCR: { unsigned int new_duplex = 0; if (val & (BMCR_RESET|BMCR_ANENABLE)) mii_if->force_media = 0; else mii_if->force_media = 1; if (mii_if->force_media && (val & BMCR_FULLDPLX)) new_duplex = 1; if (mii_if->full_duplex != new_duplex) { duplex_changed = 1; mii_if->full_duplex = new_duplex; } break; } case MII_ADVERTISE: mii_if->advertising = val; break; default: /* do nothing */ break; } } mii_if->mdio_write(mii_if->dev, mii_data->phy_id, mii_data->reg_num, val); break; } default: rc = -EOPNOTSUPP; break; } if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) *duplex_chg_out = 1; return rc; } MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>"); MODULE_DESCRIPTION ("MII hardware support library"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(mii_link_ok); EXPORT_SYMBOL(mii_nway_restart); EXPORT_SYMBOL(mii_ethtool_gset); EXPORT_SYMBOL(mii_ethtool_sset); EXPORT_SYMBOL(mii_check_link); EXPORT_SYMBOL(mii_check_media); EXPORT_SYMBOL(mii_check_gmii_support); EXPORT_SYMBOL(generic_mii_ioctl);
heaven001/android_kernel_sony_msm8974
drivers/net/mii.c
C
gpl-2.0
12,578
/* * Copyright 2010 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/err.h> #include <linux/mutex.h> #include <asm/processor.h> /* for cpu_relax() */ #include <mach/mxs.h> #define OCOTP_WORD_OFFSET 0x20 #define OCOTP_WORD_COUNT 0x20 #define BM_OCOTP_CTRL_BUSY (1 << 8) #define BM_OCOTP_CTRL_ERROR (1 << 9) #define BM_OCOTP_CTRL_RD_BANK_OPEN (1 << 12) static DEFINE_MUTEX(ocotp_mutex); static u32 ocotp_words[OCOTP_WORD_COUNT]; const u32 *mxs_get_ocotp(void) { void __iomem *ocotp_base = MXS_IO_ADDRESS(MXS_OCOTP_BASE_ADDR); int timeout = 0x400; size_t i; static int once = 0; if (once) return ocotp_words; mutex_lock(&ocotp_mutex); /* * clk_enable(hbus_clk) for ocotp can be skipped * as it must be on when system is running. */ /* try to clear ERROR bit */ __mxs_clrl(BM_OCOTP_CTRL_ERROR, ocotp_base); /* check both BUSY and ERROR cleared */ while ((__raw_readl(ocotp_base) & (BM_OCOTP_CTRL_BUSY | BM_OCOTP_CTRL_ERROR)) && --timeout) cpu_relax(); if (unlikely(!timeout)) goto error_unlock; /* open OCOTP banks for read */ __mxs_setl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base); /* approximately wait 32 hclk cycles */ udelay(1); /* poll BUSY bit becoming cleared */ timeout = 0x400; while ((__raw_readl(ocotp_base) & BM_OCOTP_CTRL_BUSY) && --timeout) cpu_relax(); if (unlikely(!timeout)) goto error_unlock; for (i = 0; i < OCOTP_WORD_COUNT; i++) ocotp_words[i] = __raw_readl(ocotp_base + OCOTP_WORD_OFFSET + i * 0x10); /* close banks for power saving */ __mxs_clrl(BM_OCOTP_CTRL_RD_BANK_OPEN, ocotp_base); once = 1; mutex_unlock(&ocotp_mutex); return ocotp_words; error_unlock: mutex_unlock(&ocotp_mutex); pr_err("%s: timeout in reading OCOTP\n", __func__); return NULL; }
playfulgod/android_kernel_lge_kk_zee
arch/arm/mach-mxs/ocotp.c
C
gpl-2.0
2,304
#include <linux/module.h> #include <linux/gfp.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/ceph/pagelist.h> static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl) { if (pl->mapped_tail) { struct page *page = list_entry(pl->head.prev, struct page, lru); kunmap(page); pl->mapped_tail = NULL; } } int ceph_pagelist_release(struct ceph_pagelist *pl) { ceph_pagelist_unmap_tail(pl); while (!list_empty(&pl->head)) { struct page *page = list_first_entry(&pl->head, struct page, lru); list_del(&page->lru); __free_page(page); } ceph_pagelist_free_reserve(pl); return 0; } EXPORT_SYMBOL(ceph_pagelist_release); static int ceph_pagelist_addpage(struct ceph_pagelist *pl) { struct page *page; if (!pl->num_pages_free) { page = __page_cache_alloc(GFP_NOFS); } else { page = list_first_entry(&pl->free_list, struct page, lru); list_del(&page->lru); --pl->num_pages_free; } if (!page) return -ENOMEM; pl->room += PAGE_SIZE; ceph_pagelist_unmap_tail(pl); list_add_tail(&page->lru, &pl->head); pl->mapped_tail = kmap(page); return 0; } int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) { while (pl->room < len) { size_t bit = pl->room; int ret; memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, bit); pl->length += bit; pl->room -= bit; buf += bit; len -= bit; ret = ceph_pagelist_addpage(pl); if (ret) return ret; } memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); pl->length += len; pl->room -= len; return 0; } EXPORT_SYMBOL(ceph_pagelist_append); /** * Allocate enough pages for a pagelist to append the given amount * of data without without allocating. * Returns: 0 on success, -ENOMEM on error. */ int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space) { if (space <= pl->room) return 0; space -= pl->room; space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */ while (space > pl->num_pages_free) { struct page *page = __page_cache_alloc(GFP_NOFS); if (!page) return -ENOMEM; list_add_tail(&page->lru, &pl->free_list); ++pl->num_pages_free; } return 0; } EXPORT_SYMBOL(ceph_pagelist_reserve); /** * Free any pages that have been preallocated. */ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl) { while (!list_empty(&pl->free_list)) { struct page *page = list_first_entry(&pl->free_list, struct page, lru); list_del(&page->lru); __free_page(page); --pl->num_pages_free; } BUG_ON(pl->num_pages_free); return 0; } EXPORT_SYMBOL(ceph_pagelist_free_reserve); /** * Create a truncation point. */ void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, struct ceph_pagelist_cursor *c) { c->pl = pl; c->page_lru = pl->head.prev; c->room = pl->room; } EXPORT_SYMBOL(ceph_pagelist_set_cursor); /** * Truncate a pagelist to the given point. Move extra pages to reserve. * This won't sleep. * Returns: 0 on success, * -EINVAL if the pagelist doesn't match the trunc point pagelist */ int ceph_pagelist_truncate(struct ceph_pagelist *pl, struct ceph_pagelist_cursor *c) { struct page *page; if (pl != c->pl) return -EINVAL; ceph_pagelist_unmap_tail(pl); while (pl->head.prev != c->page_lru) { page = list_entry(pl->head.prev, struct page, lru); list_del(&page->lru); /* remove from pagelist */ list_add_tail(&page->lru, &pl->free_list); /* add to reserve */ ++pl->num_pages_free; } pl->room = c->room; if (!list_empty(&pl->head)) { page = list_entry(pl->head.prev, struct page, lru); pl->mapped_tail = kmap(page); } return 0; } EXPORT_SYMBOL(ceph_pagelist_truncate);
MyAOSP/kernel_asus_tf201
net/ceph/pagelist.c
C
gpl-2.0
3,704
/* * wm9713.c -- Codec touch driver for Wolfson WM9713 AC97 Codec. * * Copyright 2003, 2004, 2005, 2006, 2007, 2008 Wolfson Microelectronics PLC. * Author: Liam Girdwood <lrg@slimlogic.co.uk> * Parts Copyright : Ian Molton <spyro@f2s.com> * Andrew Zabolotny <zap@homelink.ru> * Russell King <rmk@arm.linux.org.uk> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/wm97xx.h> #define TS_NAME "wm97xx" #define WM9713_VERSION "1.00" #define DEFAULT_PRESSURE 0xb0c0 /* * Module parameters */ /* * Set internal pull up for pen detect. * * Pull up is in the range 1.02k (least sensitive) to 64k (most sensitive) * i.e. pull up resistance = 64k Ohms / rpu. * * Adjust this value if you are having problems with pen detect not * detecting any down event. */ static int rpu = 8; module_param(rpu, int, 0); MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); /* * Set current used for pressure measurement. * * Set pil = 2 to use 400uA * pil = 1 to use 200uA and * pil = 0 to disable pressure measurement. * * This is used to increase the range of values returned by the adc * when measureing touchpanel pressure. */ static int pil; module_param(pil, int, 0); MODULE_PARM_DESC(pil, "Set current used for pressure measurement."); /* * Set threshold for pressure measurement. * * Pen down pressure below threshold is ignored. */ static int pressure = DEFAULT_PRESSURE & 0xfff; module_param(pressure, int, 0); MODULE_PARM_DESC(pressure, "Set threshold for pressure measurement."); /* * Set adc sample delay. * * For accurate touchpanel measurements, some settling time may be * required between the switch matrix applying a voltage across the * touchpanel plate and the ADC sampling the signal. * * This delay can be set by setting delay = n, where n is the array * position of the delay in the array delay_table below. * Long delays > 1ms are supported for completeness, but are not * recommended. */ static int delay = 4; module_param(delay, int, 0); MODULE_PARM_DESC(delay, "Set adc sample delay."); /* * Set five_wire = 1 to use a 5 wire touchscreen. * * NOTE: Five wire mode does not allow for readback of pressure. */ static int five_wire; module_param(five_wire, int, 0); MODULE_PARM_DESC(five_wire, "Set to '1' to use 5-wire touchscreen."); /* * Set adc mask function. * * Sources of glitch noise, such as signals driving an LCD display, may feed * through to the touch screen plates and affect measurement accuracy. In * order to minimise this, a signal may be applied to the MASK pin to delay or * synchronise the sampling. * * 0 = No delay or sync * 1 = High on pin stops conversions * 2 = Edge triggered, edge on pin delays conversion by delay param (above) * 3 = Edge triggered, edge on pin starts conversion after delay param */ static int mask; module_param(mask, int, 0); MODULE_PARM_DESC(mask, "Set adc mask function."); /* * Coordinate Polling Enable. * * Set to 1 to enable coordinate polling. e.g. x,y[,p] is sampled together * for every poll. */ static int coord; module_param(coord, int, 0); MODULE_PARM_DESC(coord, "Polling coordinate mode"); /* * ADC sample delay times in uS */ static const int delay_table[] = { 21, /* 1 AC97 Link frames */ 42, /* 2 */ 84, /* 4 */ 167, /* 8 */ 333, /* 16 */ 667, /* 32 */ 1000, /* 48 */ 1333, /* 64 */ 2000, /* 96 */ 2667, /* 128 */ 3333, /* 160 */ 4000, /* 192 */ 4667, /* 224 */ 5333, /* 256 */ 6000, /* 288 */ 0 /* No delay, switch matrix always on */ }; /* * Delay after issuing a POLL command. * * The delay is 3 AC97 link frames + the touchpanel settling delay */ static inline void poll_delay(int d) { udelay(3 * AC97_LINK_FRAME + delay_table[d]); } /* * set up the physical settings of the WM9713 */ static void wm9713_phy_init(struct wm97xx *wm) { u16 dig1 = 0, dig2, dig3; /* default values */ dig2 = WM97XX_DELAY(4) | WM97XX_SLT(5); dig3 = WM9712_RPU(1); /* rpu */ if (rpu) { dig3 &= 0xffc0; dig3 |= WM9712_RPU(rpu); dev_info(wm->dev, "setting pen detect pull-up to %d Ohms\n", 64000 / rpu); } /* Five wire panel? */ if (five_wire) { dig3 |= WM9713_45W; dev_info(wm->dev, "setting 5-wire touchscreen mode."); if (pil) { dev_warn(wm->dev, "Pressure measurement not supported in 5 " "wire mode, disabling\n"); pil = 0; } } /* touchpanel pressure */ if (pil == 2) { dig3 |= WM9712_PIL; dev_info(wm->dev, "setting pressure measurement current to 400uA."); } else if (pil) dev_info(wm->dev, "setting pressure measurement current to 200uA."); if (!pil) pressure = 0; /* sample settling delay */ if (delay < 0 || delay > 15) { dev_info(wm->dev, "supplied delay out of range."); delay = 4; dev_info(wm->dev, "setting adc sample delay to %d u Secs.", delay_table[delay]); } dig2 &= 0xff0f; dig2 |= WM97XX_DELAY(delay); /* mask */ dig3 |= ((mask & 0x3) << 4); if (coord) dig3 |= WM9713_WAIT; wm->misc = wm97xx_reg_read(wm, 0x5a); wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1); wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2); wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3); wm97xx_reg_write(wm, AC97_GPIO_STICKY, 0x0); } static void wm9713_dig_enable(struct wm97xx *wm, int enable) { u16 val; if (enable) { val = wm97xx_reg_read(wm, AC97_EXTENDED_MID); wm97xx_reg_write(wm, AC97_EXTENDED_MID, val & 0x7fff); wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] | WM97XX_PRP_DET_DIG); wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* dummy read */ } else { wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig[2] & ~WM97XX_PRP_DET_DIG); val = wm97xx_reg_read(wm, AC97_EXTENDED_MID); wm97xx_reg_write(wm, AC97_EXTENDED_MID, val | 0x8000); } } static void wm9713_dig_restore(struct wm97xx *wm) { wm97xx_reg_write(wm, AC97_WM9713_DIG1, wm->dig_save[0]); wm97xx_reg_write(wm, AC97_WM9713_DIG2, wm->dig_save[1]); wm97xx_reg_write(wm, AC97_WM9713_DIG3, wm->dig_save[2]); } static void wm9713_aux_prepare(struct wm97xx *wm) { memcpy(wm->dig_save, wm->dig, sizeof(wm->dig)); wm97xx_reg_write(wm, AC97_WM9713_DIG1, 0); wm97xx_reg_write(wm, AC97_WM9713_DIG2, 0); wm97xx_reg_write(wm, AC97_WM9713_DIG3, WM97XX_PRP_DET_DIG); } static inline int is_pden(struct wm97xx *wm) { return wm->dig[2] & WM9713_PDEN; } /* * Read a sample from the WM9713 adc in polling mode. */ static int wm9713_poll_sample(struct wm97xx *wm, int adcsel, int *sample) { u16 dig1; int timeout = 5 * delay; bool wants_pen = adcsel & WM97XX_PEN_DOWN; if (wants_pen && !wm->pen_probably_down) { u16 data = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (!(data & WM97XX_PEN_DOWN)) return RC_PENUP; wm->pen_probably_down = 1; } /* set up digitiser */ dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1); dig1 &= ~WM9713_ADCSEL_MASK; /* WM97XX_ADCSEL_* channels need to be converted to WM9713 format */ dig1 |= 1 << ((adcsel & WM97XX_ADCSEL_MASK) >> 12); if (wm->mach_ops && wm->mach_ops->pre_sample) wm->mach_ops->pre_sample(adcsel); wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL); /* wait 3 AC97 time slots + delay for conversion */ poll_delay(delay); /* wait for POLL to go low */ while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) && timeout) { udelay(AC97_LINK_FRAME); timeout--; } if (timeout <= 0) { /* If PDEN is set, we can get a timeout when pen goes up */ if (is_pden(wm)) wm->pen_probably_down = 0; else dev_dbg(wm->dev, "adc sample timeout"); return RC_PENUP; } *sample = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (wm->mach_ops && wm->mach_ops->post_sample) wm->mach_ops->post_sample(adcsel); /* check we have correct sample */ if ((*sample ^ adcsel) & WM97XX_ADCSEL_MASK) { dev_dbg(wm->dev, "adc wrong sample, wanted %x got %x", adcsel & WM97XX_ADCSEL_MASK, *sample & WM97XX_ADCSEL_MASK); return RC_PENUP; } if (wants_pen && !(*sample & WM97XX_PEN_DOWN)) { wm->pen_probably_down = 0; return RC_PENUP; } return RC_VALID; } /* * Read a coordinate from the WM9713 adc in polling mode. */ static int wm9713_poll_coord(struct wm97xx *wm, struct wm97xx_data *data) { u16 dig1; int timeout = 5 * delay; if (!wm->pen_probably_down) { u16 val = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (!(val & WM97XX_PEN_DOWN)) return RC_PENUP; wm->pen_probably_down = 1; } /* set up digitiser */ dig1 = wm97xx_reg_read(wm, AC97_WM9713_DIG1); dig1 &= ~WM9713_ADCSEL_MASK; if (pil) dig1 |= WM9713_ADCSEL_PRES; if (wm->mach_ops && wm->mach_ops->pre_sample) wm->mach_ops->pre_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y); wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1 | WM9713_POLL | WM9713_COO); /* wait 3 AC97 time slots + delay for conversion */ poll_delay(delay); data->x = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); /* wait for POLL to go low */ while ((wm97xx_reg_read(wm, AC97_WM9713_DIG1) & WM9713_POLL) && timeout) { udelay(AC97_LINK_FRAME); timeout--; } if (timeout <= 0) { /* If PDEN is set, we can get a timeout when pen goes up */ if (is_pden(wm)) wm->pen_probably_down = 0; else dev_dbg(wm->dev, "adc sample timeout"); return RC_PENUP; } /* read back data */ data->y = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); if (pil) data->p = wm97xx_reg_read(wm, AC97_WM97XX_DIGITISER_RD); else data->p = DEFAULT_PRESSURE; if (wm->mach_ops && wm->mach_ops->post_sample) wm->mach_ops->post_sample(WM97XX_ADCSEL_X | WM97XX_ADCSEL_Y); /* check we have correct sample */ if (!(data->x & WM97XX_ADCSEL_X) || !(data->y & WM97XX_ADCSEL_Y)) goto err; if (pil && !(data->p & WM97XX_ADCSEL_PRES)) goto err; if (!(data->x & WM97XX_PEN_DOWN) || !(data->y & WM97XX_PEN_DOWN)) { wm->pen_probably_down = 0; return RC_PENUP; } return RC_VALID; err: return 0; } /* * Sample the WM9713 touchscreen in polling mode */ static int wm9713_poll_touch(struct wm97xx *wm, struct wm97xx_data *data) { int rc; if (coord) { rc = wm9713_poll_coord(wm, data); if (rc != RC_VALID) return rc; } else { rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_X | WM97XX_PEN_DOWN, &data->x); if (rc != RC_VALID) return rc; rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_Y | WM97XX_PEN_DOWN, &data->y); if (rc != RC_VALID) return rc; if (pil) { rc = wm9713_poll_sample(wm, WM97XX_ADCSEL_PRES | WM97XX_PEN_DOWN, &data->p); if (rc != RC_VALID) return rc; } else data->p = DEFAULT_PRESSURE; } return RC_VALID; } /* * Enable WM9713 continuous mode, i.e. touch data is streamed across * an AC97 slot */ static int wm9713_acc_enable(struct wm97xx *wm, int enable) { u16 dig1, dig2, dig3; int ret = 0; dig1 = wm->dig[0]; dig2 = wm->dig[1]; dig3 = wm->dig[2]; if (enable) { /* continuous mode */ if (wm->mach_ops->acc_startup && (ret = wm->mach_ops->acc_startup(wm)) < 0) return ret; dig1 &= ~WM9713_ADCSEL_MASK; dig1 |= WM9713_CTC | WM9713_COO | WM9713_ADCSEL_X | WM9713_ADCSEL_Y; if (pil) dig1 |= WM9713_ADCSEL_PRES; dig2 &= ~(WM97XX_DELAY_MASK | WM97XX_SLT_MASK | WM97XX_CM_RATE_MASK); dig2 |= WM97XX_SLEN | WM97XX_DELAY(delay) | WM97XX_SLT(wm->acc_slot) | WM97XX_RATE(wm->acc_rate); dig3 |= WM9713_PDEN; } else { dig1 &= ~(WM9713_CTC | WM9713_COO); dig2 &= ~WM97XX_SLEN; dig3 &= ~WM9713_PDEN; if (wm->mach_ops->acc_shutdown) wm->mach_ops->acc_shutdown(wm); } wm97xx_reg_write(wm, AC97_WM9713_DIG1, dig1); wm97xx_reg_write(wm, AC97_WM9713_DIG2, dig2); wm97xx_reg_write(wm, AC97_WM9713_DIG3, dig3); return ret; } struct wm97xx_codec_drv wm9713_codec = { .id = WM9713_ID2, .name = "wm9713", .poll_sample = wm9713_poll_sample, .poll_touch = wm9713_poll_touch, .acc_enable = wm9713_acc_enable, .phy_init = wm9713_phy_init, .dig_enable = wm9713_dig_enable, .dig_restore = wm9713_dig_restore, .aux_prepare = wm9713_aux_prepare, }; EXPORT_SYMBOL_GPL(wm9713_codec); /* Module information */ MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>"); MODULE_DESCRIPTION("WM9713 Touch Screen Driver"); MODULE_LICENSE("GPL");
RonGokhale/codeaurora_kernel
drivers/input/touchscreen/wm9713.c
C
gpl-2.0
12,515
/* * arch/sh/kernel/cpu/sh5/unwind.c * * Copyright (C) 2004 Paul Mundt * Copyright (C) 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/page.h> #include <asm/ptrace.h> #include <asm/processor.h> #include <asm/io.h> static u8 regcache[63]; /* * Finding the previous stack frame isn't horribly straightforward as it is * on some other platforms. In the sh64 case, we don't have "linked" stack * frames, so we need to do a bit of work to determine the previous frame, * and in turn, the previous r14/r18 pair. * * There are generally a few cases which determine where we can find out * the r14/r18 values. In the general case, this can be determined by poking * around the prologue of the symbol PC is in (note that we absolutely must * have frame pointer support as well as the kernel symbol table mapped, * otherwise we can't even get this far). * * In other cases, such as the interrupt/exception path, we can poke around * the sp/fp. * * Notably, this entire approach is somewhat error prone, and in the event * that the previous frame cannot be determined, that's all we can do. * Either way, this still leaves us with a more correct backtrace then what * we would be able to come up with by walking the stack (which is garbage * for anything beyond the first frame). * -- PFM. */ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, unsigned long *pprev_fp, unsigned long *pprev_pc, struct pt_regs *regs) { const char *sym; char namebuf[128]; unsigned long offset; unsigned long prologue = 0; unsigned long fp_displacement = 0; unsigned long fp_prev = 0; unsigned long offset_r14 = 0, offset_r18 = 0; int i, found_prologue_end = 0; sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf); if (!sym) return -EINVAL; prologue = pc - offset; if (!prologue) return -EINVAL; /* Validate fp, to avoid risk of dereferencing a bad pointer later. Assume 128Mb since that's the amount of RAM on a Cayman. Modify when there is an SH-5 board with more. */ if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) || (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) || ((fp & 7) != 0)) { return -EINVAL; } /* * Depth to walk, depth is completely arbitrary. */ for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) { unsigned long op; u8 major, minor; u8 src, dest, disp; op = *(unsigned long *)prologue; major = (op >> 26) & 0x3f; src = (op >> 20) & 0x3f; minor = (op >> 16) & 0xf; disp = (op >> 10) & 0x3f; dest = (op >> 4) & 0x3f; /* * Stack frame creation happens in a number of ways.. in the * general case when the stack frame is less than 511 bytes, * it's generally created by an addi or addi.l: * * addi/addi.l r15, -FRAME_SIZE, r15 * * in the event that the frame size is bigger than this, it's * typically created using a movi/sub pair as follows: * * movi FRAME_SIZE, rX * sub r15, rX, r15 */ switch (major) { case (0x00 >> 2): switch (minor) { case 0x8: /* add.l */ case 0x9: /* add */ /* Look for r15, r63, r14 */ if (src == 15 && disp == 63 && dest == 14) found_prologue_end = 1; break; case 0xa: /* sub.l */ case 0xb: /* sub */ if (src != 15 || dest != 15) continue; fp_displacement -= regcache[disp]; fp_prev = fp - fp_displacement; break; } break; case (0xa8 >> 2): /* st.l */ if (src != 15) continue; switch (dest) { case 14: if (offset_r14 || fp_displacement == 0) continue; offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); offset_r14 *= sizeof(unsigned long); offset_r14 += fp_displacement; break; case 18: if (offset_r18 || fp_displacement == 0) continue; offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); offset_r18 *= sizeof(unsigned long); offset_r18 += fp_displacement; break; } break; case (0xcc >> 2): /* movi */ if (dest >= 63) { printk(KERN_NOTICE "%s: Invalid dest reg %d " "specified in movi handler. Failed " "opcode was 0x%lx: ", __func__, dest, op); continue; } /* Sign extend */ regcache[dest] = ((((s64)(u64)op >> 10) & 0xffff) << 54) >> 54; break; case (0xd0 >> 2): /* addi */ case (0xd4 >> 2): /* addi.l */ /* Look for r15, -FRAME_SIZE, r15 */ if (src != 15 || dest != 15) continue; /* Sign extended frame size.. */ fp_displacement += (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54); fp_prev = fp - fp_displacement; break; } if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev) break; } if (offset_r14 == 0 || fp_prev == 0) { if (!offset_r14) pr_debug("Unable to find r14 offset\n"); if (!fp_prev) pr_debug("Unable to find previous fp\n"); return -EINVAL; } /* For innermost leaf function, there might not be a offset_r18 */ if (!*pprev_pc && (offset_r18 == 0)) return -EINVAL; *pprev_fp = *(unsigned long *)(fp_prev + offset_r14); if (offset_r18) *pprev_pc = *(unsigned long *)(fp_prev + offset_r18); *pprev_pc &= ~1; return 0; } /* Don't put this on the stack since we'll want to call sh64_unwind * when we're close to underflowing the stack anyway. */ static struct pt_regs here_regs; extern const char syscall_ret; extern const char ret_from_syscall; extern const char ret_from_exception; extern const char ret_from_irq; static void sh64_unwind_inner(struct pt_regs *regs); static void unwind_nested (unsigned long pc, unsigned long fp) { if ((fp >= __MEMORY_START) && ((fp & 7) == 0)) { sh64_unwind_inner((struct pt_regs *) fp); } } static void sh64_unwind_inner(struct pt_regs *regs) { unsigned long pc, fp; int ofs = 0; int first_pass; pc = regs->pc & ~1; fp = regs->regs[14]; first_pass = 1; for (;;) { int cond; unsigned long next_fp, next_pc; if (pc == ((unsigned long) &syscall_ret & ~1)) { printk("SYSCALL\n"); unwind_nested(pc,fp); return; } if (pc == ((unsigned long) &ret_from_syscall & ~1)) { printk("SYSCALL (PREEMPTED)\n"); unwind_nested(pc,fp); return; } /* In this case, the PC is discovered by lookup_prev_stack_frame but it has 4 taken off it to look like the 'caller' */ if (pc == ((unsigned long) &ret_from_exception & ~1)) { printk("EXCEPTION\n"); unwind_nested(pc,fp); return; } if (pc == ((unsigned long) &ret_from_irq & ~1)) { printk("IRQ\n"); unwind_nested(pc,fp); return; } cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) && ((pc & 3) == 0) && ((fp & 7) == 0)); pc -= ofs; printk("[<%08lx>] ", pc); print_symbol("%s\n", pc); if (first_pass) { /* If the innermost frame is a leaf function, it's * possible that r18 is never saved out to the stack. */ next_pc = regs->regs[18]; } else { next_pc = 0; } if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) { ofs = sizeof(unsigned long); pc = next_pc & ~1; fp = next_fp; } else { printk("Unable to lookup previous stack frame\n"); break; } first_pass = 0; } printk("\n"); } void sh64_unwind(struct pt_regs *regs) { if (!regs) { /* * Fetch current regs if we have no other saved state to back * trace from. */ regs = &here_regs; __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14])); __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15])); __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18])); __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0])); __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1])); __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2])); __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3])); __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4])); __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5])); __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6])); __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7])); __asm__ __volatile__ ( "pta 0f, tr0\n\t" "blink tr0, %0\n\t" "0: nop" : "=r" (regs->pc) ); } printk("\nCall Trace:\n"); sh64_unwind_inner(regs); }
samurai0000000/linux
arch/sh/kernel/cpu/sh5/unwind.c
C
gpl-2.0
8,532
/* * llc_c_ac.c - actions performed during connection state transition. * * Description: * Functions in this module are implementation of connection component actions * Details of actions can be found in IEEE-802.2 standard document. * All functions have one connection and one event as input argument. All of * them return 0 On success and 1 otherwise. * * Copyright (c) 1997 by Procom Technology, Inc. * 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program can be redistributed or modified under the terms of the * GNU General Public License as published by the Free Software Foundation. * This program is distributed without any warranty or implied warranty * of merchantability or fitness for a particular purpose. * * See the GNU General Public License for more details. */ #include <linux/netdevice.h> #include <linux/slab.h> #include <net/llc_conn.h> #include <net/llc_sap.h> #include <net/sock.h> #include <net/llc_c_ev.h> #include <net/llc_c_ac.h> #include <net/llc_c_st.h> #include <net/llc_pdu.h> #include <net/llc.h> static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb); static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *ev); static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb); static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb); #define INCORRECT 0 int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->remote_busy_flag) { u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc->remote_busy_flag = 0; del_timer(&llc->busy_state_timer.timer); nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); } return 0; } int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->ind_prim = LLC_CONN_PRIM; return 0; } int llc_conn_ac_conn_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->cfm_prim = LLC_CONN_PRIM; return 0; } static int llc_conn_ac_data_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->cfm_prim = LLC_DATA_PRIM; return 0; } int llc_conn_ac_data_ind(struct sock *sk, struct sk_buff *skb) { llc_conn_rtn_pdu(sk, skb); return 0; } int llc_conn_ac_disc_ind(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); u8 reason = 0; int rc = 0; if (ev->type == LLC_CONN_EV_TYPE_PDU) { struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_DM) reason = LLC_DISC_REASON_RX_DM_RSP_PDU; else if (LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_DISC) reason = LLC_DISC_REASON_RX_DISC_CMD_PDU; } else if (ev->type == LLC_CONN_EV_TYPE_ACK_TMR) reason = LLC_DISC_REASON_ACK_TMR_EXP; else rc = -EINVAL; if (!rc) { ev->reason = reason; ev->ind_prim = LLC_DISC_PRIM; } return rc; } int llc_conn_ac_disc_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->reason = ev->status; ev->cfm_prim = LLC_DISC_PRIM; return 0; } int llc_conn_ac_rst_ind(struct sock *sk, struct sk_buff *skb) { u8 reason = 0; int rc = 1; struct llc_conn_state_ev *ev = llc_conn_ev(skb); struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb); struct llc_sock *llc = llc_sk(sk); switch (ev->type) { case LLC_CONN_EV_TYPE_PDU: if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_RSP(pdu) == LLC_2_PDU_RSP_FRMR) { reason = LLC_RESET_REASON_LOCAL; rc = 0; } else if (LLC_PDU_IS_CMD(pdu) && LLC_PDU_TYPE_IS_U(pdu) && LLC_U_PDU_CMD(pdu) == LLC_2_PDU_CMD_SABME) { reason = LLC_RESET_REASON_REMOTE; rc = 0; } break; case LLC_CONN_EV_TYPE_ACK_TMR: case LLC_CONN_EV_TYPE_P_TMR: case LLC_CONN_EV_TYPE_REJ_TMR: case LLC_CONN_EV_TYPE_BUSY_TMR: if (llc->retry_count > llc->n2) { reason = LLC_RESET_REASON_LOCAL; rc = 0; } break; } if (!rc) { ev->reason = reason; ev->ind_prim = LLC_RESET_PRIM; } return rc; } int llc_conn_ac_rst_confirm(struct sock *sk, struct sk_buff *skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); ev->reason = 0; ev->cfm_prim = LLC_RESET_PRIM; return 0; } int llc_conn_ac_clear_remote_busy_if_f_eq_1(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_RSP(pdu) && LLC_PDU_TYPE_IS_I(pdu) && LLC_I_PF_IS_1(pdu) && llc_sk(sk)->ack_pf) llc_conn_ac_clear_remote_busy(sk, skb); return 0; } int llc_conn_ac_stop_rej_tmr_if_data_flag_eq_2(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->data_flag == 2) del_timer(&llc->rej_sent_timer.timer); return 0; } int llc_conn_ac_send_disc_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_disc_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_ac_set_p_flag_1(sk, skb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_dm_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_dm_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_dm_rsp(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_frmr_rsp_f_set_x(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); struct llc_sock *llc = llc_sk(sk); llc->rx_pdu_hdr = *((u32 *)pdu); if (LLC_PDU_IS_CMD(pdu)) llc_pdu_decode_pf_bit(skb, &f_bit); else f_bit = 0; nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_resend_frmr_rsp_f_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = (struct llc_pdu_sn *)&llc->rx_pdu_hdr; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, 0, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_resend_frmr_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct sk_buff *nskb; struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &f_bit); nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, sizeof(struct llc_frmr_info)); if (nskb) { struct llc_sap *sap = llc->sap; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_frmr_rsp(nskb, pdu, f_bit, llc->vS, llc->vR, INCORRECT); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return 0; } int llc_conn_ac_resend_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); return 0; } int llc_conn_ac_resend_i_xxx_x_set_0_or_send_rr(struct sock *sk, struct sk_buff *skb) { u8 nr; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) llc_conn_send_pdu(sk, nskb); else kfree_skb(skb); } if (rc) { nr = LLC_I_GET_NR(pdu); rc = 0; llc_conn_resend_i_pdu_as_cmd(sk, nr, 0); } return rc; } int llc_conn_ac_resend_i_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = LLC_I_GET_NR(pdu); llc_conn_resend_i_pdu_as_rsp(sk, nr, 1); return 0; } int llc_conn_ac_send_rej_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rej_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rej_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rej_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rej_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rej_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rnr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_set_remote_busy(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->remote_busy_flag) { llc->remote_busy_flag = 1; mod_timer(&llc->busy_state_timer.timer, jiffies + llc->busy_state_timer.expire); } return 0; } int llc_conn_ac_opt_send_rnr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rnr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_cmd_p_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_rr_cmd(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 f_bit = 1; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, f_bit, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ack_rsp_f_set_1(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 1, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_rr_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ack_xxx_x_set_0(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, 0, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } void llc_conn_set_p_flag(struct sock *sk, u8 value) { int state_changed = llc_sk(sk)->p_flag && !value; llc_sk(sk)->p_flag = value; if (state_changed) sk->sk_state_change(sk); } int llc_conn_ac_send_sabme_cmd_p_set_x(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); if (nskb) { struct llc_sap *sap = llc->sap; u8 *dmac = llc->daddr.mac; if (llc->dev->flags & IFF_LOOPBACK) dmac = llc->dev->dev_addr; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_CMD); llc_pdu_init_as_sabme_cmd(nskb, 1); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, dmac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); llc_conn_set_p_flag(sk, 1); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_send_ua_rsp_f_set_p(struct sock *sk, struct sk_buff *skb) { u8 f_bit; int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_U, 0); llc_pdu_decode_pf_bit(skb, &f_bit); if (nskb) { struct llc_sap *sap = llc->sap; nskb->dev = llc->dev; llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_ua_rsp(nskb, f_bit); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } int llc_conn_ac_set_s_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->s_flag = 0; return 0; } int llc_conn_ac_set_s_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->s_flag = 1; return 0; } int llc_conn_ac_start_p_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); llc_conn_set_p_flag(sk, 1); mod_timer(&llc->pf_cycle_timer.timer, jiffies + llc->pf_cycle_timer.expire); return 0; } /** * llc_conn_ac_send_ack_if_needed - check if ack is needed * @sk: current connection structure * @skb: current event * * Checks number of received PDUs which have not been acknowledged, yet, * If number of them reaches to "npta"(Number of PDUs To Acknowledge) then * sends an RR response as acknowledgement for them. Returns 0 for * success, 1 otherwise. */ int llc_conn_ac_send_ack_if_needed(struct sock *sk, struct sk_buff *skb) { u8 pf_bit; struct llc_sock *llc = llc_sk(sk); llc_pdu_decode_pf_bit(skb, &pf_bit); llc->ack_pf |= pf_bit & 1; if (!llc->ack_must_be_send) { llc->first_pdu_Ns = llc->vR; llc->ack_must_be_send = 1; llc->ack_pf = pf_bit & 1; } if (((llc->vR - llc->first_pdu_Ns + 1 + LLC_2_SEQ_NBR_MODULO) % LLC_2_SEQ_NBR_MODULO) >= llc->npta) { llc_conn_ac_send_rr_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0; llc->ack_pf = 0; llc_conn_ac_inc_npta_value(sk, skb); } return 0; } /** * llc_conn_ac_rst_sendack_flag - resets ack_must_be_send flag * @sk: current connection structure * @skb: current event * * This action resets ack_must_be_send flag of given connection, this flag * indicates if there is any PDU which has not been acknowledged yet. * Returns 0 for success, 1 otherwise. */ int llc_conn_ac_rst_sendack_flag(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->ack_must_be_send = llc_sk(sk)->ack_pf = 0; return 0; } /** * llc_conn_ac_send_i_rsp_f_set_ackpf - acknowledge received PDUs * @sk: current connection structure * @skb: current event * * Sends an I response PDU with f-bit set to ack_pf flag as acknowledge to * all received PDUs which have not been acknowledged, yet. ack_pf flag is * set to one if one PDU with p-bit set to one is received. Returns 0 for * success, 1 otherwise. */ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc; struct llc_sock *llc = llc_sk(sk); struct llc_sap *sap = llc->sap; llc_pdu_header_init(skb, LLC_PDU_TYPE_I, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR); rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac); if (likely(!rc)) { llc_conn_send_pdu(sk, skb); llc_conn_ac_inc_vs_by_1(sk, skb); } return rc; } /** * llc_conn_ac_send_i_as_ack - sends an I-format PDU to acknowledge rx PDUs * @sk: current connection structure. * @skb: current event. * * This action sends an I-format PDU as acknowledge to received PDUs which * have not been acknowledged, yet, if there is any. By using of this * action number of acknowledgements decreases, this technic is called * piggy backing. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_send_i_as_ack(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->ack_must_be_send) { llc_conn_ac_send_i_rsp_f_set_ackpf(sk, skb); llc->ack_must_be_send = 0 ; llc->ack_pf = 0; } else llc_conn_ac_send_i_cmd_p_set_0(sk, skb); return 0; } /** * llc_conn_ac_send_rr_rsp_f_set_ackpf - ack all rx PDUs not yet acked * @sk: current connection structure. * @skb: current event. * * This action sends an RR response with f-bit set to ack_pf flag as * acknowledge to all received PDUs which have not been acknowledged, yet, * if there is any. ack_pf flag indicates if a PDU has been received with * p-bit set to one. Returns 0 for success, 1 otherwise. */ static int llc_conn_ac_send_rr_rsp_f_set_ackpf(struct sock *sk, struct sk_buff *skb) { int rc = -ENOBUFS; struct llc_sock *llc = llc_sk(sk); struct sk_buff *nskb = llc_alloc_frame(sk, llc->dev, LLC_PDU_TYPE_S, 0); if (nskb) { struct llc_sap *sap = llc->sap; llc_pdu_header_init(nskb, LLC_PDU_TYPE_S, sap->laddr.lsap, llc->daddr.lsap, LLC_PDU_RSP); llc_pdu_init_as_rr_rsp(nskb, llc->ack_pf, llc->vR); rc = llc_mac_hdr_init(nskb, llc->dev->dev_addr, llc->daddr.mac); if (unlikely(rc)) goto free; llc_conn_send_pdu(sk, nskb); } out: return rc; free: kfree_skb(nskb); goto out; } /** * llc_conn_ac_inc_npta_value - tries to make value of npta greater * @sk: current connection structure. * @skb: current event. * * After "inc_cntr" times calling of this action, "npta" increase by one. * this action tries to make vale of "npta" greater as possible; number of * acknowledgements decreases by increasing of "npta". Returns 0 for * success, 1 otherwise. */ static int llc_conn_ac_inc_npta_value(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->inc_cntr) { llc->dec_step = 0; llc->dec_cntr = llc->inc_cntr = 2; ++llc->npta; if (llc->npta > (u8) ~LLC_2_SEQ_NBR_MODULO) llc->npta = (u8) ~LLC_2_SEQ_NBR_MODULO; } else --llc->inc_cntr; return 0; } /** * llc_conn_ac_adjust_npta_by_rr - decreases "npta" by one * @sk: current connection structure. * @skb: current event. * * After receiving "dec_cntr" times RR command, this action decreases * "npta" by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_adjust_npta_by_rr(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!llc->connect_step && !llc->remote_busy_flag) { if (!llc->dec_step) { if (!llc->dec_cntr) { llc->inc_cntr = llc->dec_cntr = 2; if (llc->npta > 0) llc->npta = llc->npta - 1; } else llc->dec_cntr -=1; } } else llc->connect_step = 0 ; return 0; } /** * llc_conn_ac_adjust_npta_by_rnr - decreases "npta" by one * @sk: current connection structure. * @skb: current event. * * After receiving "dec_cntr" times RNR command, this action decreases * "npta" by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_adjust_npta_by_rnr(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (llc->remote_busy_flag) if (!llc->dec_step) { if (!llc->dec_cntr) { llc->inc_cntr = llc->dec_cntr = 2; if (llc->npta > 0) --llc->npta; } else --llc->dec_cntr; } return 0; } /** * llc_conn_ac_dec_tx_win_size - decreases tx window size * @sk: current connection structure. * @skb: current event. * * After receiving of a REJ command or response, transmit window size is * decreased by number of PDUs which are outstanding yet. Returns 0 for * success, 1 otherwise. */ int llc_conn_ac_dec_tx_win_size(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); u8 unacked_pdu = skb_queue_len(&llc->pdu_unack_q); if (llc->k - unacked_pdu < 1) llc->k = 1; else llc->k -= unacked_pdu; return 0; } /** * llc_conn_ac_inc_tx_win_size - tx window size is inc by 1 * @sk: current connection structure. * @skb: current event. * * After receiving an RR response with f-bit set to one, transmit window * size is increased by one. Returns 0 for success, 1 otherwise. */ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); llc->k += 1; if (llc->k > (u8) ~LLC_2_SEQ_NBR_MODULO) llc->k = (u8) ~LLC_2_SEQ_NBR_MODULO; return 0; } int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); del_timer(&llc->pf_cycle_timer.timer); del_timer(&llc->ack_timer.timer); del_timer(&llc->rej_sent_timer.timer); del_timer(&llc->busy_state_timer.timer); llc->ack_must_be_send = 0; llc->ack_pf = 0; return 0; } int llc_conn_ac_stop_other_timers(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); del_timer(&llc->rej_sent_timer.timer); del_timer(&llc->pf_cycle_timer.timer); del_timer(&llc->busy_state_timer.timer); llc->ack_must_be_send = 0; llc->ack_pf = 0; return 0; } int llc_conn_ac_start_ack_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } int llc_conn_ac_start_rej_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); mod_timer(&llc->rej_sent_timer.timer, jiffies + llc->rej_sent_timer.expire); return 0; } int llc_conn_ac_start_ack_tmr_if_not_running(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); if (!timer_pending(&llc->ack_timer.timer)) mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); return 0; } int llc_conn_ac_stop_ack_timer(struct sock *sk, struct sk_buff *skb) { del_timer(&llc_sk(sk)->ack_timer.timer); return 0; } int llc_conn_ac_stop_p_timer(struct sock *sk, struct sk_buff *skb) { struct llc_sock *llc = llc_sk(sk); del_timer(&llc->pf_cycle_timer.timer); llc_conn_set_p_flag(sk, 0); return 0; } int llc_conn_ac_stop_rej_timer(struct sock *sk, struct sk_buff *skb) { del_timer(&llc_sk(sk)->rej_sent_timer.timer); return 0; } int llc_conn_ac_upd_nr_received(struct sock *sk, struct sk_buff *skb) { int acked; u16 unacked = 0; struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); struct llc_sock *llc = llc_sk(sk); llc->last_nr = PDU_SUPV_GET_Nr(pdu); acked = llc_conn_remove_acked_pdus(sk, llc->last_nr, &unacked); /* On loopback we don't queue I frames in unack_pdu_q queue. */ if (acked > 0 || (llc->dev->flags & IFF_LOOPBACK)) { llc->retry_count = 0; del_timer(&llc->ack_timer.timer); if (llc->failed_data_req) { /* already, we did not accept data from upper layer * (tx_window full or unacceptable state). Now, we * can send data and must inform to upper layer. */ llc->failed_data_req = 0; llc_conn_ac_data_confirm(sk, skb); } if (unacked) mod_timer(&llc->ack_timer.timer, jiffies + llc->ack_timer.expire); } else if (llc->failed_data_req) { u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); if (f_bit == 1) { llc->failed_data_req = 0; llc_conn_ac_data_confirm(sk, skb); } } return 0; } int llc_conn_ac_upd_p_flag(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); if (LLC_PDU_IS_RSP(pdu)) { u8 f_bit; llc_pdu_decode_pf_bit(skb, &f_bit); if (f_bit) { llc_conn_set_p_flag(sk, 0); llc_conn_ac_stop_p_timer(sk, skb); } } return 0; } int llc_conn_ac_set_data_flag_2(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 2; return 0; } int llc_conn_ac_set_data_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 0; return 0; } int llc_conn_ac_set_data_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->data_flag = 1; return 0; } int llc_conn_ac_set_data_flag_1_if_data_flag_eq_0(struct sock *sk, struct sk_buff *skb) { if (!llc_sk(sk)->data_flag) llc_sk(sk)->data_flag = 1; return 0; } int llc_conn_ac_set_p_flag_0(struct sock *sk, struct sk_buff *skb) { llc_conn_set_p_flag(sk, 0); return 0; } static int llc_conn_ac_set_p_flag_1(struct sock *sk, struct sk_buff *skb) { llc_conn_set_p_flag(sk, 1); return 0; } int llc_conn_ac_set_remote_busy_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->remote_busy_flag = 0; return 0; } int llc_conn_ac_set_cause_flag_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->cause_flag = 0; return 0; } int llc_conn_ac_set_cause_flag_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->cause_flag = 1; return 0; } int llc_conn_ac_set_retry_cnt_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->retry_count = 0; return 0; } int llc_conn_ac_inc_retry_cnt_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->retry_count++; return 0; } int llc_conn_ac_set_vr_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vR = 0; return 0; } int llc_conn_ac_inc_vr_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vR = PDU_GET_NEXT_Vr(llc_sk(sk)->vR); return 0; } int llc_conn_ac_set_vs_0(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = 0; return 0; } int llc_conn_ac_set_vs_nr(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = llc_sk(sk)->last_nr; return 0; } static int llc_conn_ac_inc_vs_by_1(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->vS = (llc_sk(sk)->vS + 1) % LLC_2_SEQ_NBR_MODULO; return 0; } static void llc_conn_tmr_common_cb(unsigned long timeout_data, u8 type) { struct sock *sk = (struct sock *)timeout_data; struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC); bh_lock_sock(sk); if (skb) { struct llc_conn_state_ev *ev = llc_conn_ev(skb); skb_set_owner_r(skb, sk); ev->type = type; llc_process_tmr_ev(sk, skb); } bh_unlock_sock(sk); } void llc_conn_pf_cycle_tmr_cb(unsigned long timeout_data) { llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_P_TMR); } void llc_conn_busy_tmr_cb(unsigned long timeout_data) { llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_BUSY_TMR); } void llc_conn_ack_tmr_cb(unsigned long timeout_data) { llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_ACK_TMR); } void llc_conn_rej_tmr_cb(unsigned long timeout_data) { llc_conn_tmr_common_cb(timeout_data, LLC_CONN_EV_TYPE_REJ_TMR); } int llc_conn_ac_rst_vs(struct sock *sk, struct sk_buff *skb) { llc_sk(sk)->X = llc_sk(sk)->vS; llc_conn_ac_set_vs_nr(sk, skb); return 0; } int llc_conn_ac_upd_vs(struct sock *sk, struct sk_buff *skb) { struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb); u8 nr = PDU_SUPV_GET_Nr(pdu); if (llc_circular_between(llc_sk(sk)->vS, nr, llc_sk(sk)->X)) llc_conn_ac_set_vs_nr(sk, skb); return 0; } /* * Non-standard actions; these not contained in IEEE specification; for * our own usage */ /** * llc_conn_disc - removes connection from SAP list and frees it * @sk: closed connection * @skb: occurred event */ int llc_conn_disc(struct sock *sk, struct sk_buff *skb) { /* FIXME: this thing seems to want to die */ return 0; } /** * llc_conn_reset - resets connection * @sk : reseting connection. * @skb: occurred event. * * Stop all timers, empty all queues and reset all flags. */ int llc_conn_reset(struct sock *sk, struct sk_buff *skb) { llc_sk_reset(sk); return 0; } /** * llc_circular_between - designates that b is between a and c or not * @a: lower bound * @b: element to see if is between a and b * @c: upper bound * * This function designates that b is between a and c or not (for example, * 0 is between 127 and 1). Returns 1 if b is between a and c, 0 * otherwise. */ u8 llc_circular_between(u8 a, u8 b, u8 c) { b = b - a; c = c - a; return b <= c; } /** * llc_process_tmr_ev - timer backend * @sk: active connection * @skb: occurred event * * This function is called from timer callback functions. When connection * is busy (during sending a data frame) timer expiration event must be * queued. Otherwise this event can be sent to connection state machine. * Queued events will process by llc_backlog_rcv function after sending * data frame. */ static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb) { if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) { printk(KERN_WARNING "%s: timer called on closed connection\n", __func__); kfree_skb(skb); } else { if (!sock_owned_by_user(sk)) llc_conn_state_process(sk, skb); else { llc_set_backlog_type(skb, LLC_EVENT); __sk_add_backlog(sk, skb); } } }
vakkov/kernel-adaptation-n900
net/llc/llc_c_ac.c
C
gpl-2.0
36,292
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * KVM/MIPS: MIPS specific KVM APIs * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. * Authors: Sanjay Lal <sanjayl@kymasys.com> */ #include <linux/errno.h> #include <linux/err.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/fs.h> #include <linux/bootmem.h> #include <asm/fpu.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <linux/kvm_host.h> #include "interrupt.h" #include "commpage.h" #define CREATE_TRACE_POINTS #include "trace.h" #ifndef VECTORSPACING #define VECTORSPACING 0x100 /* for EI/VI mode */ #endif #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) struct kvm_stats_debugfs_item debugfs_entries[] = { { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, {NULL} }; static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) { int i; for_each_possible_cpu(i) { vcpu->arch.guest_kernel_asid[i] = 0; vcpu->arch.guest_user_asid[i] = 0; } return 0; } /* * XXXKYMA: We are simulatoring a processor that has the WII bit set in * Config7, so we are "runnable" if interrupts are pending */ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) { return !!(vcpu->arch.pending_exceptions); } int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) { return 1; } int kvm_arch_hardware_enable(void) { return 0; } int kvm_arch_hardware_setup(void) { return 0; } void kvm_arch_check_processor_compat(void *rtn) { *(int *)rtn = 0; } static void kvm_mips_init_tlbs(struct kvm *kvm) { unsigned long wired; /* * Add a wired entry to the TLB, it is used to map the commpage to * the Guest kernel */ wired = read_c0_wired(); write_c0_wired(wired + 1); mtc0_tlbw_hazard(); kvm->arch.commpage_tlb = wired; kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), kvm->arch.commpage_tlb); } static void kvm_mips_init_vm_percpu(void *arg) { struct kvm *kvm = (struct kvm *)arg; kvm_mips_init_tlbs(kvm); kvm_mips_callbacks->vm_init(kvm); } int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) { if (atomic_inc_return(&kvm_mips_instance) == 1) { kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n", __func__); on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); } return 0; } void kvm_mips_free_vcpus(struct kvm *kvm) { unsigned int i; struct kvm_vcpu *vcpu; /* Put the pages we reserved for the guest pmap */ for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); } kfree(kvm->arch.guest_pmap); kvm_for_each_vcpu(i, vcpu, kvm) { kvm_arch_vcpu_free(vcpu); } mutex_lock(&kvm->lock); for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) kvm->vcpus[i] = NULL; atomic_set(&kvm->online_vcpus, 0); mutex_unlock(&kvm->lock); } static void kvm_mips_uninit_tlbs(void *arg) { /* Restore wired count */ write_c0_wired(0); mtc0_tlbw_hazard(); /* Clear out all the TLBs */ kvm_local_flush_tlb_all(); } void kvm_arch_destroy_vm(struct kvm *kvm) { kvm_mips_free_vcpus(kvm); /* If this is the last instance, restore wired count */ if (atomic_dec_return(&kvm_mips_instance) == 0) { kvm_debug("%s: last KVM instance, restoring TLB parameters\n", __func__); on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); } } long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { return -ENOIOCTLCMD; } int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, unsigned long npages) { return 0; } int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, const struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { return 0; } void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { unsigned long npages = 0; int i; kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", __func__, kvm, mem->slot, mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); /* Setup Guest PMAP table */ if (!kvm->arch.guest_pmap) { if (mem->slot == 0) npages = mem->memory_size >> PAGE_SHIFT; if (npages) { kvm->arch.guest_pmap_npages = npages; kvm->arch.guest_pmap = kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); if (!kvm->arch.guest_pmap) { kvm_err("Failed to allocate guest PMAP\n"); return; } kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", npages, kvm->arch.guest_pmap); /* Now setup the page table */ for (i = 0; i < npages; i++) kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; } } } struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err, size, offset; void *gebase; int i; struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); if (!vcpu) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto out_free_cpu; kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); /* * Allocate space for host mode exception handlers that handle * guest mode exits */ if (cpu_has_veic || cpu_has_vint) size = 0x200 + VECTORSPACING * 64; else size = 0x4000; /* Save Linux EBASE */ vcpu->arch.host_ebase = (void *)read_c0_ebase(); gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); if (!gebase) { err = -ENOMEM; goto out_uninit_cpu; } kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", ALIGN(size, PAGE_SIZE), gebase); /* Save new ebase */ vcpu->arch.guest_ebase = gebase; /* Copy L1 Guest Exception handler to correct offset */ /* TLB Refill, EXL = 0 */ memcpy(gebase, mips32_exception, mips32_exceptionEnd - mips32_exception); /* General Exception Entry point */ memcpy(gebase + 0x180, mips32_exception, mips32_exceptionEnd - mips32_exception); /* For vectored interrupts poke the exception code @ all offsets 0-7 */ for (i = 0; i < 8; i++) { kvm_debug("L1 Vectored handler @ %p\n", gebase + 0x200 + (i * VECTORSPACING)); memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, mips32_exceptionEnd - mips32_exception); } /* General handler, relocate to unmapped space for sanity's sake */ offset = 0x2000; kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n", gebase + offset, mips32_GuestExceptionEnd - mips32_GuestException); memcpy(gebase + offset, mips32_GuestException, mips32_GuestExceptionEnd - mips32_GuestException); /* Invalidate the icache for these ranges */ local_flush_icache_range((unsigned long)gebase, (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); /* * Allocate comm page for guest kernel, a TLB will be reserved for * mapping GVA @ 0xFFFF8000 to this page */ vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); if (!vcpu->arch.kseg0_commpage) { err = -ENOMEM; goto out_free_gebase; } kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); kvm_mips_commpage_init(vcpu); /* Init */ vcpu->arch.last_sched_cpu = -1; /* Start off the timer */ kvm_mips_init_count(vcpu); return vcpu; out_free_gebase: kfree(gebase); out_uninit_cpu: kvm_vcpu_uninit(vcpu); out_free_cpu: kfree(vcpu); out: return ERR_PTR(err); } void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { hrtimer_cancel(&vcpu->arch.comparecount_timer); kvm_vcpu_uninit(vcpu); kvm_mips_dump_stats(vcpu); kfree(vcpu->arch.guest_ebase); kfree(vcpu->arch.kseg0_commpage); kfree(vcpu); } void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) { kvm_arch_vcpu_free(vcpu); } int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = 0; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) kvm_mips_complete_mmio_load(vcpu, run); vcpu->mmio_needed = 0; } lose_fpu(1); local_irq_disable(); /* Check if we have any exceptions/interrupts pending */ kvm_mips_deliver_interrupts(vcpu, kvm_read_c0_guest_cause(vcpu->arch.cop0)); __kvm_guest_enter(); /* Disable hardware page table walking while in guest */ htw_stop(); r = __kvm_mips_vcpu_run(run, vcpu); /* Re-enable HTW before enabling interrupts */ htw_start(); __kvm_guest_exit(); local_irq_enable(); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; } int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) { int intr = (int)irq->irq; struct kvm_vcpu *dvcpu = NULL; if (intr == 3 || intr == -3 || intr == 4 || intr == -4) kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, (int)intr); if (irq->cpu == -1) dvcpu = vcpu; else dvcpu = vcpu->kvm->vcpus[irq->cpu]; if (intr == 2 || intr == 3 || intr == 4) { kvm_mips_callbacks->queue_io_int(dvcpu, irq); } else if (intr == -2 || intr == -3 || intr == -4) { kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); } else { kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, irq->cpu, irq->irq); return -EINVAL; } dvcpu->arch.wait = 0; if (waitqueue_active(&dvcpu->wq)) wake_up_interruptible(&dvcpu->wq); return 0; } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -ENOIOCTLCMD; } static u64 kvm_mips_get_one_regs[] = { KVM_REG_MIPS_R0, KVM_REG_MIPS_R1, KVM_REG_MIPS_R2, KVM_REG_MIPS_R3, KVM_REG_MIPS_R4, KVM_REG_MIPS_R5, KVM_REG_MIPS_R6, KVM_REG_MIPS_R7, KVM_REG_MIPS_R8, KVM_REG_MIPS_R9, KVM_REG_MIPS_R10, KVM_REG_MIPS_R11, KVM_REG_MIPS_R12, KVM_REG_MIPS_R13, KVM_REG_MIPS_R14, KVM_REG_MIPS_R15, KVM_REG_MIPS_R16, KVM_REG_MIPS_R17, KVM_REG_MIPS_R18, KVM_REG_MIPS_R19, KVM_REG_MIPS_R20, KVM_REG_MIPS_R21, KVM_REG_MIPS_R22, KVM_REG_MIPS_R23, KVM_REG_MIPS_R24, KVM_REG_MIPS_R25, KVM_REG_MIPS_R26, KVM_REG_MIPS_R27, KVM_REG_MIPS_R28, KVM_REG_MIPS_R29, KVM_REG_MIPS_R30, KVM_REG_MIPS_R31, KVM_REG_MIPS_HI, KVM_REG_MIPS_LO, KVM_REG_MIPS_PC, KVM_REG_MIPS_CP0_INDEX, KVM_REG_MIPS_CP0_CONTEXT, KVM_REG_MIPS_CP0_USERLOCAL, KVM_REG_MIPS_CP0_PAGEMASK, KVM_REG_MIPS_CP0_WIRED, KVM_REG_MIPS_CP0_HWRENA, KVM_REG_MIPS_CP0_BADVADDR, KVM_REG_MIPS_CP0_COUNT, KVM_REG_MIPS_CP0_ENTRYHI, KVM_REG_MIPS_CP0_COMPARE, KVM_REG_MIPS_CP0_STATUS, KVM_REG_MIPS_CP0_CAUSE, KVM_REG_MIPS_CP0_EPC, KVM_REG_MIPS_CP0_PRID, KVM_REG_MIPS_CP0_CONFIG, KVM_REG_MIPS_CP0_CONFIG1, KVM_REG_MIPS_CP0_CONFIG2, KVM_REG_MIPS_CP0_CONFIG3, KVM_REG_MIPS_CP0_CONFIG4, KVM_REG_MIPS_CP0_CONFIG5, KVM_REG_MIPS_CP0_CONFIG7, KVM_REG_MIPS_CP0_ERROREPC, KVM_REG_MIPS_COUNT_CTL, KVM_REG_MIPS_COUNT_RESUME, KVM_REG_MIPS_COUNT_HZ, }; static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_fpu_struct *fpu = &vcpu->arch.fpu; int ret; s64 v; s64 vs[2]; unsigned int idx; switch (reg->id) { /* General purpose registers */ case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; break; case KVM_REG_MIPS_HI: v = (long)vcpu->arch.hi; break; case KVM_REG_MIPS_LO: v = (long)vcpu->arch.lo; break; case KVM_REG_MIPS_PC: v = (long)vcpu->arch.pc; break; /* Floating point registers */ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_32(0); /* Odd singles in top of even double when FR=0 */ if (kvm_read_c0_guest_status(cop0) & ST0_FR) v = get_fpr32(&fpu->fpr[idx], 0); else v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); break; case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_64(0); /* Can't access odd doubles in FR=0 mode */ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) return -EINVAL; v = get_fpr64(&fpu->fpr[idx], 0); break; case KVM_REG_MIPS_FCR_IR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; v = boot_cpu_data.fpu_id; break; case KVM_REG_MIPS_FCR_CSR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; v = fpu->fcr31; break; /* MIPS SIMD Architecture (MSA) registers */ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; /* Can't access MSA registers in FR=0 mode */ if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_VEC_128(0); #ifdef CONFIG_CPU_LITTLE_ENDIAN /* least significant byte first */ vs[0] = get_fpr64(&fpu->fpr[idx], 0); vs[1] = get_fpr64(&fpu->fpr[idx], 1); #else /* most significant byte first */ vs[0] = get_fpr64(&fpu->fpr[idx], 1); vs[1] = get_fpr64(&fpu->fpr[idx], 0); #endif break; case KVM_REG_MIPS_MSA_IR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; v = boot_cpu_data.msa_id; break; case KVM_REG_MIPS_MSA_CSR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; v = fpu->msacsr; break; /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: v = (long)kvm_read_c0_guest_index(cop0); break; case KVM_REG_MIPS_CP0_CONTEXT: v = (long)kvm_read_c0_guest_context(cop0); break; case KVM_REG_MIPS_CP0_USERLOCAL: v = (long)kvm_read_c0_guest_userlocal(cop0); break; case KVM_REG_MIPS_CP0_PAGEMASK: v = (long)kvm_read_c0_guest_pagemask(cop0); break; case KVM_REG_MIPS_CP0_WIRED: v = (long)kvm_read_c0_guest_wired(cop0); break; case KVM_REG_MIPS_CP0_HWRENA: v = (long)kvm_read_c0_guest_hwrena(cop0); break; case KVM_REG_MIPS_CP0_BADVADDR: v = (long)kvm_read_c0_guest_badvaddr(cop0); break; case KVM_REG_MIPS_CP0_ENTRYHI: v = (long)kvm_read_c0_guest_entryhi(cop0); break; case KVM_REG_MIPS_CP0_COMPARE: v = (long)kvm_read_c0_guest_compare(cop0); break; case KVM_REG_MIPS_CP0_STATUS: v = (long)kvm_read_c0_guest_status(cop0); break; case KVM_REG_MIPS_CP0_CAUSE: v = (long)kvm_read_c0_guest_cause(cop0); break; case KVM_REG_MIPS_CP0_EPC: v = (long)kvm_read_c0_guest_epc(cop0); break; case KVM_REG_MIPS_CP0_PRID: v = (long)kvm_read_c0_guest_prid(cop0); break; case KVM_REG_MIPS_CP0_CONFIG: v = (long)kvm_read_c0_guest_config(cop0); break; case KVM_REG_MIPS_CP0_CONFIG1: v = (long)kvm_read_c0_guest_config1(cop0); break; case KVM_REG_MIPS_CP0_CONFIG2: v = (long)kvm_read_c0_guest_config2(cop0); break; case KVM_REG_MIPS_CP0_CONFIG3: v = (long)kvm_read_c0_guest_config3(cop0); break; case KVM_REG_MIPS_CP0_CONFIG4: v = (long)kvm_read_c0_guest_config4(cop0); break; case KVM_REG_MIPS_CP0_CONFIG5: v = (long)kvm_read_c0_guest_config5(cop0); break; case KVM_REG_MIPS_CP0_CONFIG7: v = (long)kvm_read_c0_guest_config7(cop0); break; case KVM_REG_MIPS_CP0_ERROREPC: v = (long)kvm_read_c0_guest_errorepc(cop0); break; /* registers to be handled specially */ case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_COUNT_CTL: case KVM_REG_MIPS_COUNT_RESUME: case KVM_REG_MIPS_COUNT_HZ: ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); if (ret) return ret; break; default: return -EINVAL; } if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; return put_user(v, uaddr64); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; u32 v32 = (u32)v; return put_user(v32, uaddr32); } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { void __user *uaddr = (void __user *)(long)reg->addr; return copy_to_user(uaddr, vs, 16); } else { return -EINVAL; } } static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_fpu_struct *fpu = &vcpu->arch.fpu; s64 v; s64 vs[2]; unsigned int idx; if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; if (get_user(v, uaddr64) != 0) return -EFAULT; } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; s32 v32; if (get_user(v32, uaddr32) != 0) return -EFAULT; v = (s64)v32; } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { void __user *uaddr = (void __user *)(long)reg->addr; return copy_from_user(vs, uaddr, 16); } else { return -EINVAL; } switch (reg->id) { /* General purpose registers */ case KVM_REG_MIPS_R0: /* Silently ignore requests to set $0 */ break; case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; break; case KVM_REG_MIPS_HI: vcpu->arch.hi = v; break; case KVM_REG_MIPS_LO: vcpu->arch.lo = v; break; case KVM_REG_MIPS_PC: vcpu->arch.pc = v; break; /* Floating point registers */ case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_32(0); /* Odd singles in top of even double when FR=0 */ if (kvm_read_c0_guest_status(cop0) & ST0_FR) set_fpr32(&fpu->fpr[idx], 0, v); else set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); break; case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_FPR_64(0); /* Can't access odd doubles in FR=0 mode */ if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) return -EINVAL; set_fpr64(&fpu->fpr[idx], 0, v); break; case KVM_REG_MIPS_FCR_IR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; /* Read-only */ break; case KVM_REG_MIPS_FCR_CSR: if (!kvm_mips_guest_has_fpu(&vcpu->arch)) return -EINVAL; fpu->fcr31 = v; break; /* MIPS SIMD Architecture (MSA) registers */ case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; idx = reg->id - KVM_REG_MIPS_VEC_128(0); #ifdef CONFIG_CPU_LITTLE_ENDIAN /* least significant byte first */ set_fpr64(&fpu->fpr[idx], 0, vs[0]); set_fpr64(&fpu->fpr[idx], 1, vs[1]); #else /* most significant byte first */ set_fpr64(&fpu->fpr[idx], 1, vs[0]); set_fpr64(&fpu->fpr[idx], 0, vs[1]); #endif break; case KVM_REG_MIPS_MSA_IR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; /* Read-only */ break; case KVM_REG_MIPS_MSA_CSR: if (!kvm_mips_guest_has_msa(&vcpu->arch)) return -EINVAL; fpu->msacsr = v; break; /* Co-processor 0 registers */ case KVM_REG_MIPS_CP0_INDEX: kvm_write_c0_guest_index(cop0, v); break; case KVM_REG_MIPS_CP0_CONTEXT: kvm_write_c0_guest_context(cop0, v); break; case KVM_REG_MIPS_CP0_USERLOCAL: kvm_write_c0_guest_userlocal(cop0, v); break; case KVM_REG_MIPS_CP0_PAGEMASK: kvm_write_c0_guest_pagemask(cop0, v); break; case KVM_REG_MIPS_CP0_WIRED: kvm_write_c0_guest_wired(cop0, v); break; case KVM_REG_MIPS_CP0_HWRENA: kvm_write_c0_guest_hwrena(cop0, v); break; case KVM_REG_MIPS_CP0_BADVADDR: kvm_write_c0_guest_badvaddr(cop0, v); break; case KVM_REG_MIPS_CP0_ENTRYHI: kvm_write_c0_guest_entryhi(cop0, v); break; case KVM_REG_MIPS_CP0_STATUS: kvm_write_c0_guest_status(cop0, v); break; case KVM_REG_MIPS_CP0_EPC: kvm_write_c0_guest_epc(cop0, v); break; case KVM_REG_MIPS_CP0_PRID: kvm_write_c0_guest_prid(cop0, v); break; case KVM_REG_MIPS_CP0_ERROREPC: kvm_write_c0_guest_errorepc(cop0, v); break; /* registers to be handled specially */ case KVM_REG_MIPS_CP0_COUNT: case KVM_REG_MIPS_CP0_COMPARE: case KVM_REG_MIPS_CP0_CAUSE: case KVM_REG_MIPS_CP0_CONFIG: case KVM_REG_MIPS_CP0_CONFIG1: case KVM_REG_MIPS_CP0_CONFIG2: case KVM_REG_MIPS_CP0_CONFIG3: case KVM_REG_MIPS_CP0_CONFIG4: case KVM_REG_MIPS_CP0_CONFIG5: case KVM_REG_MIPS_COUNT_CTL: case KVM_REG_MIPS_COUNT_RESUME: case KVM_REG_MIPS_COUNT_HZ: return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); default: return -EINVAL; } return 0; } static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, struct kvm_enable_cap *cap) { int r = 0; if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) return -EINVAL; if (cap->flags) return -EINVAL; if (cap->args[0]) return -EINVAL; switch (cap->cap) { case KVM_CAP_MIPS_FPU: vcpu->arch.fpu_enabled = true; break; case KVM_CAP_MIPS_MSA: vcpu->arch.msa_enabled = true; break; default: r = -EINVAL; break; } return r; } long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; long r; switch (ioctl) { case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) return kvm_mips_set_reg(vcpu, &reg); else return kvm_mips_get_reg(vcpu, &reg); } case KVM_GET_REG_LIST: { struct kvm_reg_list __user *user_list = argp; u64 __user *reg_dest; struct kvm_reg_list reg_list; unsigned n; if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n; reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); if (copy_to_user(user_list, &reg_list, sizeof(reg_list))) return -EFAULT; if (n < reg_list.n) return -E2BIG; reg_dest = user_list->reg; if (copy_to_user(reg_dest, kvm_mips_get_one_regs, sizeof(kvm_mips_get_one_regs))) return -EFAULT; return 0; } case KVM_NMI: /* Treat the NMI as a CPU reset */ r = kvm_mips_reset_vcpu(vcpu); break; case KVM_INTERRUPT: { struct kvm_mips_interrupt irq; r = -EFAULT; if (copy_from_user(&irq, argp, sizeof(irq))) goto out; kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); break; } case KVM_ENABLE_CAP: { struct kvm_enable_cap cap; r = -EFAULT; if (copy_from_user(&cap, argp, sizeof(cap))) goto out; r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); break; } default: r = -ENOIOCTLCMD; } out: return r; } /* Get (and clear) the dirty memory log for a memory slot. */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; unsigned long ga, ga_end; int is_dirty = 0; int r; unsigned long n; mutex_lock(&kvm->slots_lock); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) goto out; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot); ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, ga_end); n = kvm_dirty_bitmap_bytes(memslot); memset(memslot->dirty_bitmap, 0, n); } r = 0; out: mutex_unlock(&kvm->slots_lock); return r; } long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r; switch (ioctl) { default: r = -ENOIOCTLCMD; } return r; } int kvm_arch_init(void *opaque) { if (kvm_mips_callbacks) { kvm_err("kvm: module already exists\n"); return -EEXIST; } return kvm_mips_emulation_init(&kvm_mips_callbacks); } void kvm_arch_exit(void) { kvm_mips_callbacks = NULL; } int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { return -ENOIOCTLCMD; } void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { return -ENOIOCTLCMD; } int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) { int r; switch (ext) { case KVM_CAP_ONE_REG: case KVM_CAP_ENABLE_CAP: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_MIPS_FPU: r = !!cpu_has_fpu; break; case KVM_CAP_MIPS_MSA: /* * We don't support MSA vector partitioning yet: * 1) It would require explicit support which can't be tested * yet due to lack of support in current hardware. * 2) It extends the state that would need to be saved/restored * by e.g. QEMU for migration. * * When vector partitioning hardware becomes available, support * could be added by requiring a flag when enabling * KVM_CAP_MIPS_MSA capability to indicate that userland knows * to save/restore the appropriate extra state. */ r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); break; default: r = 0; break; } return r; } int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return kvm_mips_pending_timer(vcpu); } int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) { int i; struct mips_coproc *cop0; if (!vcpu) return -1; kvm_debug("VCPU Register Dump:\n"); kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); for (i = 0; i < 32; i += 4) { kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); } kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); cop0 = vcpu->arch.cop0; kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); return 0; } int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) vcpu->arch.gprs[i] = regs->gpr[i]; vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ vcpu->arch.hi = regs->hi; vcpu->arch.lo = regs->lo; vcpu->arch.pc = regs->pc; return 0; } int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) regs->gpr[i] = vcpu->arch.gprs[i]; regs->hi = vcpu->arch.hi; regs->lo = vcpu->arch.lo; regs->pc = vcpu->arch.pc; return 0; } static void kvm_mips_comparecount_func(unsigned long data) { struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; kvm_mips_callbacks->queue_timer_int(vcpu); vcpu->arch.wait = 0; if (waitqueue_active(&vcpu->wq)) wake_up_interruptible(&vcpu->wq); } /* low level hrtimer wake routine */ static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) { struct kvm_vcpu *vcpu; vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); kvm_mips_comparecount_func((unsigned long) vcpu); return kvm_mips_count_timeout(vcpu); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { kvm_mips_callbacks->vcpu_init(vcpu); hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; return 0; } int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) { return 0; } /* Initial guest state */ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { return kvm_mips_callbacks->vcpu_setup(vcpu); } static void kvm_mips_set_c0_status(void) { uint32_t status = read_c0_status(); if (cpu_has_dsp) status |= (ST0_MX); write_c0_status(status); ehb(); } /* * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) */ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) { uint32_t cause = vcpu->arch.host_cp0_cause; uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; enum emulation_result er = EMULATE_DONE; int ret = RESUME_GUEST; /* re-enable HTW before enabling interrupts */ htw_start(); /* Set a default exit reason */ run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; /* * Set the appropriate status bits based on host CPU features, * before we hit the scheduler */ kvm_mips_set_c0_status(); local_irq_enable(); kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", cause, opc, run, vcpu); /* * Do a privilege check, if in UM most of these exit conditions end up * causing an exception to be delivered to the Guest Kernel */ er = kvm_mips_check_privilege(cause, opc, run, vcpu); if (er == EMULATE_PRIV_FAIL) { goto skip_emul; } else if (er == EMULATE_FAIL) { run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; goto skip_emul; } switch (exccode) { case EXCCODE_INT: kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); ++vcpu->stat.int_exits; trace_kvm_exit(vcpu, INT_EXITS); if (need_resched()) cond_resched(); ret = RESUME_GUEST; break; case EXCCODE_CPU: kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); ++vcpu->stat.cop_unusable_exits; trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); /* XXXKYMA: Might need to return to user space */ if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) ret = RESUME_HOST; break; case EXCCODE_MOD: ++vcpu->stat.tlbmod_exits; trace_kvm_exit(vcpu, TLBMOD_EXITS); ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); break; case EXCCODE_TLBS: kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, badvaddr); ++vcpu->stat.tlbmiss_st_exits; trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); break; case EXCCODE_TLBL: kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", cause, opc, badvaddr); ++vcpu->stat.tlbmiss_ld_exits; trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); break; case EXCCODE_ADES: ++vcpu->stat.addrerr_st_exits; trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); break; case EXCCODE_ADEL: ++vcpu->stat.addrerr_ld_exits; trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); break; case EXCCODE_SYS: ++vcpu->stat.syscall_exits; trace_kvm_exit(vcpu, SYSCALL_EXITS); ret = kvm_mips_callbacks->handle_syscall(vcpu); break; case EXCCODE_RI: ++vcpu->stat.resvd_inst_exits; trace_kvm_exit(vcpu, RESVD_INST_EXITS); ret = kvm_mips_callbacks->handle_res_inst(vcpu); break; case EXCCODE_BP: ++vcpu->stat.break_inst_exits; trace_kvm_exit(vcpu, BREAK_INST_EXITS); ret = kvm_mips_callbacks->handle_break(vcpu); break; case EXCCODE_TR: ++vcpu->stat.trap_inst_exits; trace_kvm_exit(vcpu, TRAP_INST_EXITS); ret = kvm_mips_callbacks->handle_trap(vcpu); break; case EXCCODE_MSAFPE: ++vcpu->stat.msa_fpe_exits; trace_kvm_exit(vcpu, MSA_FPE_EXITS); ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); break; case EXCCODE_FPE: ++vcpu->stat.fpe_exits; trace_kvm_exit(vcpu, FPE_EXITS); ret = kvm_mips_callbacks->handle_fpe(vcpu); break; case EXCCODE_MSADIS: ++vcpu->stat.msa_disabled_exits; trace_kvm_exit(vcpu, MSA_DISABLED_EXITS); ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); break; default: kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, kvm_read_c0_guest_status(vcpu->arch.cop0)); kvm_arch_vcpu_dump_regs(vcpu); run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; break; } skip_emul: local_irq_disable(); if (er == EMULATE_DONE && !(ret & RESUME_HOST)) kvm_mips_deliver_interrupts(vcpu, cause); if (!(ret & RESUME_HOST)) { /* Only check for signals if not already exiting to userspace */ if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; ret = (-EINTR << 2) | RESUME_HOST; ++vcpu->stat.signal_exits; trace_kvm_exit(vcpu, SIGNAL_EXITS); } } if (ret == RESUME_GUEST) { /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. * * This should be before returning to the guest exception * vector, as it may well cause an [MSA] FP exception if there * are pending exception bits unmasked. (see * kvm_mips_csr_die_notifier() for how that is handled). */ if (kvm_mips_guest_has_fpu(&vcpu->arch) && read_c0_status() & ST0_CU1) __kvm_restore_fcsr(&vcpu->arch); if (kvm_mips_guest_has_msa(&vcpu->arch) && read_c0_config5() & MIPS_CONF5_MSAEN) __kvm_restore_msacsr(&vcpu->arch); } /* Disable HTW before returning to guest or host */ htw_stop(); return ret; } /* Enable FPU for guest and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned int sr, cfg5; preempt_disable(); sr = kvm_read_c0_guest_status(cop0); /* * If MSA state is already live, it is undefined how it interacts with * FR=0 FPU state, and we don't want to hit reserved instruction * exceptions trying to save the MSA state later when CU=1 && FR=1, so * play it safe and save it first. * * In theory we shouldn't ever hit this case since kvm_lose_fpu() should * get called when guest CU1 is set, however we can't trust the guest * not to clobber the status register directly via the commpage. */ if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) kvm_lose_fpu(vcpu); /* * Enable FPU for guest * We set FR and FRE according to guest context */ change_c0_status(ST0_CU1 | ST0_FR, sr); if (cpu_has_fre) { cfg5 = kvm_read_c0_guest_config5(cop0); change_c0_config5(MIPS_CONF5_FRE, cfg5); } enable_fpu_hazard(); /* If guest FPU state not active, restore it now */ if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) { __kvm_restore_fpu(&vcpu->arch); vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; } preempt_enable(); } #ifdef CONFIG_CPU_HAS_MSA /* Enable MSA for guest and restore context */ void kvm_own_msa(struct kvm_vcpu *vcpu) { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned int sr, cfg5; preempt_disable(); /* * Enable FPU if enabled in guest, since we're restoring FPU context * anyway. We set FR and FRE according to guest context. */ if (kvm_mips_guest_has_fpu(&vcpu->arch)) { sr = kvm_read_c0_guest_status(cop0); /* * If FR=0 FPU state is already live, it is undefined how it * interacts with MSA state, so play it safe and save it first. */ if (!(sr & ST0_FR) && (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) kvm_lose_fpu(vcpu); change_c0_status(ST0_CU1 | ST0_FR, sr); if (sr & ST0_CU1 && cpu_has_fre) { cfg5 = kvm_read_c0_guest_config5(cop0); change_c0_config5(MIPS_CONF5_FRE, cfg5); } } /* Enable MSA for guest */ set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { case KVM_MIPS_FPU_FPU: /* * Guest FPU state already loaded, only restore upper MSA state */ __kvm_restore_msa_upper(&vcpu->arch); vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; break; case 0: /* Neither FPU or MSA already active, restore full MSA state */ __kvm_restore_msa(&vcpu->arch); vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; if (kvm_mips_guest_has_fpu(&vcpu->arch)) vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; break; default: break; } preempt_enable(); } #endif /* Drop FPU & MSA without saving it */ void kvm_drop_fpu(struct kvm_vcpu *vcpu) { preempt_disable(); if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { disable_msa(); vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; } if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { clear_c0_status(ST0_CU1 | ST0_FR); vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; } preempt_enable(); } /* Save and disable FPU & MSA */ void kvm_lose_fpu(struct kvm_vcpu *vcpu) { /* * FPU & MSA get disabled in root context (hardware) when it is disabled * in guest context (software), but the register state in the hardware * may still be in use. This is why we explicitly re-enable the hardware * before saving. */ preempt_disable(); if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { set_c0_config5(MIPS_CONF5_MSAEN); enable_fpu_hazard(); __kvm_save_msa(&vcpu->arch); /* Disable MSA & FPU */ disable_msa(); if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) clear_c0_status(ST0_CU1 | ST0_FR); vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { set_c0_status(ST0_CU1); enable_fpu_hazard(); __kvm_save_fpu(&vcpu->arch); vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; /* Disable FPU */ clear_c0_status(ST0_CU1 | ST0_FR); } preempt_enable(); } /* * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP * exception if cause bits are set in the value being written. */ static int kvm_mips_csr_die_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { struct die_args *args = (struct die_args *)ptr; struct pt_regs *regs = args->regs; unsigned long pc; /* Only interested in FPE and MSAFPE */ if (cmd != DIE_FP && cmd != DIE_MSAFP) return NOTIFY_DONE; /* Return immediately if guest context isn't active */ if (!(current->flags & PF_VCPU)) return NOTIFY_DONE; /* Should never get here from user mode */ BUG_ON(user_mode(regs)); pc = instruction_pointer(regs); switch (cmd) { case DIE_FP: /* match 2nd instruction in __kvm_restore_fcsr */ if (pc != (unsigned long)&__kvm_restore_fcsr + 4) return NOTIFY_DONE; break; case DIE_MSAFP: /* match 2nd/3rd instruction in __kvm_restore_msacsr */ if (!cpu_has_msa || pc < (unsigned long)&__kvm_restore_msacsr + 4 || pc > (unsigned long)&__kvm_restore_msacsr + 8) return NOTIFY_DONE; break; } /* Move PC forward a little and continue executing */ instruction_pointer(regs) += 4; return NOTIFY_STOP; } static struct notifier_block kvm_mips_csr_die_notifier = { .notifier_call = kvm_mips_csr_die_notify, }; static int __init kvm_mips_init(void) { int ret; ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); if (ret) return ret; register_die_notifier(&kvm_mips_csr_die_notifier); /* * On MIPS, kernel modules are executed from "mapped space", which * requires TLBs. The TLB handling code is statically linked with * the rest of the kernel (tlb.c) to avoid the possibility of * double faulting. The issue is that the TLB code references * routines that are part of the the KVM module, which are only * available once the module is loaded. */ kvm_mips_gfn_to_pfn = gfn_to_pfn; kvm_mips_release_pfn_clean = kvm_release_pfn_clean; kvm_mips_is_error_pfn = is_error_pfn; return 0; } static void __exit kvm_mips_exit(void) { kvm_exit(); kvm_mips_gfn_to_pfn = NULL; kvm_mips_release_pfn_clean = NULL; kvm_mips_is_error_pfn = NULL; unregister_die_notifier(&kvm_mips_csr_die_notifier); } module_init(kvm_mips_init); module_exit(kvm_mips_exit); EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
gpkulkarni/linux-arm64
arch/mips/kvm/mips.c
C
gpl-2.0
41,449
/* vi: set sw=4 ts=4: */ /* * awk implementation for busybox * * Copyright (C) 2002 by Dmitry Zakharov <dmit@crp.bank.gov.ua> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ //usage:#define awk_trivial_usage //usage: "[OPTIONS] [AWK_PROGRAM] [FILE]..." //usage:#define awk_full_usage "\n\n" //usage: " -v VAR=VAL Set variable" //usage: "\n -F SEP Use SEP as field separator" //usage: "\n -f FILE Read program from FILE" #include "libbb.h" #include "xregex.h" #include <math.h> /* This is a NOEXEC applet. Be very careful! */ /* If you comment out one of these below, it will be #defined later * to perform debug printfs to stderr: */ #define debug_printf_walker(...) do {} while (0) #define debug_printf_eval(...) do {} while (0) #define debug_printf_parse(...) do {} while (0) #ifndef debug_printf_walker # define debug_printf_walker(...) (fprintf(stderr, __VA_ARGS__)) #endif #ifndef debug_printf_eval # define debug_printf_eval(...) (fprintf(stderr, __VA_ARGS__)) #endif #ifndef debug_printf_parse # define debug_printf_parse(...) (fprintf(stderr, __VA_ARGS__)) #endif #define MAXVARFMT 240 #define MINNVBLOCK 64 /* variable flags */ #define VF_NUMBER 0x0001 /* 1 = primary type is number */ #define VF_ARRAY 0x0002 /* 1 = it's an array */ #define VF_CACHED 0x0100 /* 1 = num/str value has cached str/num eq */ #define VF_USER 0x0200 /* 1 = user input (may be numeric string) */ #define VF_SPECIAL 0x0400 /* 1 = requires extra handling when changed */ #define VF_WALK 0x0800 /* 1 = variable has alloc'd x.walker list */ #define VF_FSTR 0x1000 /* 1 = var::string points to fstring buffer */ #define VF_CHILD 0x2000 /* 1 = function arg; x.parent points to source */ #define VF_DIRTY 0x4000 /* 1 = variable was set explicitly */ /* these flags are static, don't change them when value is changed */ #define VF_DONTTOUCH (VF_ARRAY | VF_SPECIAL | VF_WALK | VF_CHILD | VF_DIRTY) typedef struct walker_list { char *end; char *cur; struct walker_list *prev; char wbuf[1]; } walker_list; /* Variable */ typedef struct var_s { unsigned type; /* flags */ double number; char *string; union { int aidx; /* func arg idx (for compilation stage) */ struct xhash_s *array; /* array ptr */ struct var_s *parent; /* for func args, ptr to actual parameter */ walker_list *walker; /* list of array elements (for..in) */ } x; } var; /* Node chain (pattern-action chain, BEGIN, END, function bodies) */ typedef struct chain_s { struct node_s *first; struct node_s *last; const char *programname; } chain; /* Function */ typedef struct func_s { unsigned nargs; struct chain_s body; } func; /* I/O stream */ typedef struct rstream_s { FILE *F; char *buffer; int adv; int size; int pos; smallint is_pipe; } rstream; typedef struct hash_item_s { union { struct var_s v; /* variable/array hash */ struct rstream_s rs; /* redirect streams hash */ struct func_s f; /* functions hash */ } data; struct hash_item_s *next; /* next in chain */ char name[1]; /* really it's longer */ } hash_item; typedef struct xhash_s { unsigned nel; /* num of elements */ unsigned csize; /* current hash size */ unsigned nprime; /* next hash size in PRIMES[] */ unsigned glen; /* summary length of item names */ struct hash_item_s **items; } xhash; /* Tree node */ typedef struct node_s { uint32_t info; unsigned lineno; union { struct node_s *n; var *v; int aidx; char *new_progname; regex_t *re; } l; union { struct node_s *n; regex_t *ire; func *f; } r; union { struct node_s *n; } a; } node; /* Block of temporary variables */ typedef struct nvblock_s { int size; var *pos; struct nvblock_s *prev; struct nvblock_s *next; var nv[]; } nvblock; typedef struct tsplitter_s { node n; regex_t re[2]; } tsplitter; /* simple token classes */ /* Order and hex values are very important!!! See next_token() */ #define TC_SEQSTART 1 /* ( */ #define TC_SEQTERM (1 << 1) /* ) */ #define TC_REGEXP (1 << 2) /* /.../ */ #define TC_OUTRDR (1 << 3) /* | > >> */ #define TC_UOPPOST (1 << 4) /* unary postfix operator */ #define TC_UOPPRE1 (1 << 5) /* unary prefix operator */ #define TC_BINOPX (1 << 6) /* two-opnd operator */ #define TC_IN (1 << 7) #define TC_COMMA (1 << 8) #define TC_PIPE (1 << 9) /* input redirection pipe */ #define TC_UOPPRE2 (1 << 10) /* unary prefix operator */ #define TC_ARRTERM (1 << 11) /* ] */ #define TC_GRPSTART (1 << 12) /* { */ #define TC_GRPTERM (1 << 13) /* } */ #define TC_SEMICOL (1 << 14) #define TC_NEWLINE (1 << 15) #define TC_STATX (1 << 16) /* ctl statement (for, next...) */ #define TC_WHILE (1 << 17) #define TC_ELSE (1 << 18) #define TC_BUILTIN (1 << 19) #define TC_GETLINE (1 << 20) #define TC_FUNCDECL (1 << 21) /* `function' `func' */ #define TC_BEGIN (1 << 22) #define TC_END (1 << 23) #define TC_EOF (1 << 24) #define TC_VARIABLE (1 << 25) #define TC_ARRAY (1 << 26) #define TC_FUNCTION (1 << 27) #define TC_STRING (1 << 28) #define TC_NUMBER (1 << 29) #define TC_UOPPRE (TC_UOPPRE1 | TC_UOPPRE2) /* combined token classes */ #define TC_BINOP (TC_BINOPX | TC_COMMA | TC_PIPE | TC_IN) #define TC_UNARYOP (TC_UOPPRE | TC_UOPPOST) #define TC_OPERAND (TC_VARIABLE | TC_ARRAY | TC_FUNCTION \ | TC_BUILTIN | TC_GETLINE | TC_SEQSTART | TC_STRING | TC_NUMBER) #define TC_STATEMNT (TC_STATX | TC_WHILE) #define TC_OPTERM (TC_SEMICOL | TC_NEWLINE) /* word tokens, cannot mean something else if not expected */ #define TC_WORD (TC_IN | TC_STATEMNT | TC_ELSE | TC_BUILTIN \ | TC_GETLINE | TC_FUNCDECL | TC_BEGIN | TC_END) /* discard newlines after these */ #define TC_NOTERM (TC_COMMA | TC_GRPSTART | TC_GRPTERM \ | TC_BINOP | TC_OPTERM) /* what can expression begin with */ #define TC_OPSEQ (TC_OPERAND | TC_UOPPRE | TC_REGEXP) /* what can group begin with */ #define TC_GRPSEQ (TC_OPSEQ | TC_OPTERM | TC_STATEMNT | TC_GRPSTART) /* if previous token class is CONCAT1 and next is CONCAT2, concatenation */ /* operator is inserted between them */ #define TC_CONCAT1 (TC_VARIABLE | TC_ARRTERM | TC_SEQTERM \ | TC_STRING | TC_NUMBER | TC_UOPPOST) #define TC_CONCAT2 (TC_OPERAND | TC_UOPPRE) #define OF_RES1 0x010000 #define OF_RES2 0x020000 #define OF_STR1 0x040000 #define OF_STR2 0x080000 #define OF_NUM1 0x100000 #define OF_CHECKED 0x200000 /* combined operator flags */ #define xx 0 #define xV OF_RES2 #define xS (OF_RES2 | OF_STR2) #define Vx OF_RES1 #define VV (OF_RES1 | OF_RES2) #define Nx (OF_RES1 | OF_NUM1) #define NV (OF_RES1 | OF_NUM1 | OF_RES2) #define Sx (OF_RES1 | OF_STR1) #define SV (OF_RES1 | OF_STR1 | OF_RES2) #define SS (OF_RES1 | OF_STR1 | OF_RES2 | OF_STR2) #define OPCLSMASK 0xFF00 #define OPNMASK 0x007F /* operator priority is a highest byte (even: r->l, odd: l->r grouping) * For builtins it has different meaning: n n s3 s2 s1 v3 v2 v1, * n - min. number of args, vN - resolve Nth arg to var, sN - resolve to string */ #undef P #undef PRIMASK #undef PRIMASK2 #define P(x) (x << 24) #define PRIMASK 0x7F000000 #define PRIMASK2 0x7E000000 /* Operation classes */ #define SHIFT_TIL_THIS 0x0600 #define RECUR_FROM_THIS 0x1000 enum { OC_DELETE = 0x0100, OC_EXEC = 0x0200, OC_NEWSOURCE = 0x0300, OC_PRINT = 0x0400, OC_PRINTF = 0x0500, OC_WALKINIT = 0x0600, OC_BR = 0x0700, OC_BREAK = 0x0800, OC_CONTINUE = 0x0900, OC_EXIT = 0x0a00, OC_NEXT = 0x0b00, OC_NEXTFILE = 0x0c00, OC_TEST = 0x0d00, OC_WALKNEXT = 0x0e00, OC_BINARY = 0x1000, OC_BUILTIN = 0x1100, OC_COLON = 0x1200, OC_COMMA = 0x1300, OC_COMPARE = 0x1400, OC_CONCAT = 0x1500, OC_FBLTIN = 0x1600, OC_FIELD = 0x1700, OC_FNARG = 0x1800, OC_FUNC = 0x1900, OC_GETLINE = 0x1a00, OC_IN = 0x1b00, OC_LAND = 0x1c00, OC_LOR = 0x1d00, OC_MATCH = 0x1e00, OC_MOVE = 0x1f00, OC_PGETLINE = 0x2000, OC_REGEXP = 0x2100, OC_REPLACE = 0x2200, OC_RETURN = 0x2300, OC_SPRINTF = 0x2400, OC_TERNARY = 0x2500, OC_UNARY = 0x2600, OC_VAR = 0x2700, OC_DONE = 0x2800, ST_IF = 0x3000, ST_DO = 0x3100, ST_FOR = 0x3200, ST_WHILE = 0x3300 }; /* simple builtins */ enum { F_in, F_rn, F_co, F_ex, F_lg, F_si, F_sq, F_sr, F_ti, F_le, F_sy, F_ff, F_cl }; /* builtins */ enum { B_a2, B_ix, B_ma, B_sp, B_ss, B_ti, B_mt, B_lo, B_up, B_ge, B_gs, B_su, B_an, B_co, B_ls, B_or, B_rs, B_xo, }; /* tokens and their corresponding info values */ #define NTC "\377" /* switch to next token class (tc<<1) */ #define NTCC '\377' #define OC_B OC_BUILTIN static const char tokenlist[] ALIGN1 = "\1(" NTC "\1)" NTC "\1/" NTC /* REGEXP */ "\2>>" "\1>" "\1|" NTC /* OUTRDR */ "\2++" "\2--" NTC /* UOPPOST */ "\2++" "\2--" "\1$" NTC /* UOPPRE1 */ "\2==" "\1=" "\2+=" "\2-=" /* BINOPX */ "\2*=" "\2/=" "\2%=" "\2^=" "\1+" "\1-" "\3**=" "\2**" "\1/" "\1%" "\1^" "\1*" "\2!=" "\2>=" "\2<=" "\1>" "\1<" "\2!~" "\1~" "\2&&" "\2||" "\1?" "\1:" NTC "\2in" NTC "\1," NTC "\1|" NTC "\1+" "\1-" "\1!" NTC /* UOPPRE2 */ "\1]" NTC "\1{" NTC "\1}" NTC "\1;" NTC "\1\n" NTC "\2if" "\2do" "\3for" "\5break" /* STATX */ "\10continue" "\6delete" "\5print" "\6printf" "\4next" "\10nextfile" "\6return" "\4exit" NTC "\5while" NTC "\4else" NTC "\3and" "\5compl" "\6lshift" "\2or" "\6rshift" "\3xor" "\5close" "\6system" "\6fflush" "\5atan2" /* BUILTIN */ "\3cos" "\3exp" "\3int" "\3log" "\4rand" "\3sin" "\4sqrt" "\5srand" "\6gensub" "\4gsub" "\5index" "\6length" "\5match" "\5split" "\7sprintf" "\3sub" "\6substr" "\7systime" "\10strftime" "\6mktime" "\7tolower" "\7toupper" NTC "\7getline" NTC "\4func" "\10function" NTC "\5BEGIN" NTC "\3END" /* compiler adds trailing "\0" */ ; static const uint32_t tokeninfo[] = { 0, 0, OC_REGEXP, xS|'a', xS|'w', xS|'|', OC_UNARY|xV|P(9)|'p', OC_UNARY|xV|P(9)|'m', OC_UNARY|xV|P(9)|'P', OC_UNARY|xV|P(9)|'M', OC_FIELD|xV|P(5), OC_COMPARE|VV|P(39)|5, OC_MOVE|VV|P(74), OC_REPLACE|NV|P(74)|'+', OC_REPLACE|NV|P(74)|'-', OC_REPLACE|NV|P(74)|'*', OC_REPLACE|NV|P(74)|'/', OC_REPLACE|NV|P(74)|'%', OC_REPLACE|NV|P(74)|'&', OC_BINARY|NV|P(29)|'+', OC_BINARY|NV|P(29)|'-', OC_REPLACE|NV|P(74)|'&', OC_BINARY|NV|P(15)|'&', OC_BINARY|NV|P(25)|'/', OC_BINARY|NV|P(25)|'%', OC_BINARY|NV|P(15)|'&', OC_BINARY|NV|P(25)|'*', OC_COMPARE|VV|P(39)|4, OC_COMPARE|VV|P(39)|3, OC_COMPARE|VV|P(39)|0, OC_COMPARE|VV|P(39)|1, OC_COMPARE|VV|P(39)|2, OC_MATCH|Sx|P(45)|'!', OC_MATCH|Sx|P(45)|'~', OC_LAND|Vx|P(55), OC_LOR|Vx|P(59), OC_TERNARY|Vx|P(64)|'?', OC_COLON|xx|P(67)|':', OC_IN|SV|P(49), /* in */ OC_COMMA|SS|P(80), OC_PGETLINE|SV|P(37), OC_UNARY|xV|P(19)|'+', OC_UNARY|xV|P(19)|'-', OC_UNARY|xV|P(19)|'!', 0, /* ] */ 0, 0, 0, 0, /* \n */ ST_IF, ST_DO, ST_FOR, OC_BREAK, OC_CONTINUE, OC_DELETE|Vx, OC_PRINT, OC_PRINTF, OC_NEXT, OC_NEXTFILE, OC_RETURN|Vx, OC_EXIT|Nx, ST_WHILE, 0, /* else */ OC_B|B_an|P(0x83), OC_B|B_co|P(0x41), OC_B|B_ls|P(0x83), OC_B|B_or|P(0x83), OC_B|B_rs|P(0x83), OC_B|B_xo|P(0x83), OC_FBLTIN|Sx|F_cl, OC_FBLTIN|Sx|F_sy, OC_FBLTIN|Sx|F_ff, OC_B|B_a2|P(0x83), OC_FBLTIN|Nx|F_co, OC_FBLTIN|Nx|F_ex, OC_FBLTIN|Nx|F_in, OC_FBLTIN|Nx|F_lg, OC_FBLTIN|F_rn, OC_FBLTIN|Nx|F_si, OC_FBLTIN|Nx|F_sq, OC_FBLTIN|Nx|F_sr, OC_B|B_ge|P(0xd6), OC_B|B_gs|P(0xb6), OC_B|B_ix|P(0x9b), OC_FBLTIN|Sx|F_le, OC_B|B_ma|P(0x89), OC_B|B_sp|P(0x8b), OC_SPRINTF, OC_B|B_su|P(0xb6), OC_B|B_ss|P(0x8f), OC_FBLTIN|F_ti, OC_B|B_ti|P(0x0b), OC_B|B_mt|P(0x0b), OC_B|B_lo|P(0x49), OC_B|B_up|P(0x49), OC_GETLINE|SV|P(0), 0, 0, 0, 0 /* END */ }; /* internal variable names and their initial values */ /* asterisk marks SPECIAL vars; $ is just no-named Field0 */ enum { CONVFMT, OFMT, FS, OFS, ORS, RS, RT, FILENAME, SUBSEP, F0, ARGIND, ARGC, ARGV, ERRNO, FNR, NR, NF, IGNORECASE, ENVIRON, NUM_INTERNAL_VARS }; static const char vNames[] ALIGN1 = "CONVFMT\0" "OFMT\0" "FS\0*" "OFS\0" "ORS\0" "RS\0*" "RT\0" "FILENAME\0" "SUBSEP\0" "$\0*" "ARGIND\0" "ARGC\0" "ARGV\0" "ERRNO\0" "FNR\0" "NR\0" "NF\0*" "IGNORECASE\0*" "ENVIRON\0" "\0"; static const char vValues[] ALIGN1 = "%.6g\0" "%.6g\0" " \0" " \0" "\n\0" "\n\0" "\0" "\0" "\034\0" "\0" "\377"; /* hash size may grow to these values */ #define FIRST_PRIME 61 static const uint16_t PRIMES[] ALIGN2 = { 251, 1021, 4093, 16381, 65521 }; /* Globals. Split in two parts so that first one is addressed * with (mostly short) negative offsets. * NB: it's unsafe to put members of type "double" * into globals2 (gcc may fail to align them). */ struct globals { double t_double; chain beginseq, mainseq, endseq; chain *seq; node *break_ptr, *continue_ptr; rstream *iF; xhash *vhash, *ahash, *fdhash, *fnhash; const char *g_progname; int g_lineno; int nfields; int maxfields; /* used in fsrealloc() only */ var *Fields; nvblock *g_cb; char *g_pos; char *g_buf; smallint icase; smallint exiting; smallint nextrec; smallint nextfile; smallint is_f0_split; smallint t_rollback; }; struct globals2 { uint32_t t_info; /* often used */ uint32_t t_tclass; char *t_string; int t_lineno; var *intvar[NUM_INTERNAL_VARS]; /* often used */ /* former statics from various functions */ char *split_f0__fstrings; uint32_t next_token__save_tclass; uint32_t next_token__save_info; uint32_t next_token__ltclass; smallint next_token__concat_inserted; smallint next_input_file__files_happen; rstream next_input_file__rsm; var *evaluate__fnargs; unsigned evaluate__seed; regex_t evaluate__sreg; var ptest__v; tsplitter exec_builtin__tspl; /* biggest and least used members go last */ tsplitter fsplitter, rsplitter; }; #define G1 (ptr_to_globals[-1]) #define G (*(struct globals2 *)ptr_to_globals) /* For debug. nm --size-sort awk.o | grep -vi ' [tr] ' */ /*char G1size[sizeof(G1)]; - 0x74 */ /*char Gsize[sizeof(G)]; - 0x1c4 */ /* Trying to keep most of members accessible with short offsets: */ /*char Gofs_seed[offsetof(struct globals2, evaluate__seed)]; - 0x90 */ #define t_double (G1.t_double ) #define beginseq (G1.beginseq ) #define mainseq (G1.mainseq ) #define endseq (G1.endseq ) #define seq (G1.seq ) #define break_ptr (G1.break_ptr ) #define continue_ptr (G1.continue_ptr) #define iF (G1.iF ) #define vhash (G1.vhash ) #define ahash (G1.ahash ) #define fdhash (G1.fdhash ) #define fnhash (G1.fnhash ) #define g_progname (G1.g_progname ) #define g_lineno (G1.g_lineno ) #define nfields (G1.nfields ) #define maxfields (G1.maxfields ) #define Fields (G1.Fields ) #define g_cb (G1.g_cb ) #define g_pos (G1.g_pos ) #define g_buf (G1.g_buf ) #define icase (G1.icase ) #define exiting (G1.exiting ) #define nextrec (G1.nextrec ) #define nextfile (G1.nextfile ) #define is_f0_split (G1.is_f0_split ) #define t_rollback (G1.t_rollback ) #define t_info (G.t_info ) #define t_tclass (G.t_tclass ) #define t_string (G.t_string ) #define t_lineno (G.t_lineno ) #define intvar (G.intvar ) #define fsplitter (G.fsplitter ) #define rsplitter (G.rsplitter ) #define INIT_G() do { \ SET_PTR_TO_GLOBALS((char*)xzalloc(sizeof(G1)+sizeof(G)) + sizeof(G1)); \ G.next_token__ltclass = TC_OPTERM; \ G.evaluate__seed = 1; \ } while (0) /* function prototypes */ static void handle_special(var *); static node *parse_expr(uint32_t); static void chain_group(void); static var *evaluate(node *, var *); static rstream *next_input_file(void); static int fmt_num(char *, int, const char *, double, int); static int awk_exit(int) NORETURN; /* ---- error handling ---- */ static const char EMSG_INTERNAL_ERROR[] ALIGN1 = "Internal error"; static const char EMSG_UNEXP_EOS[] ALIGN1 = "Unexpected end of string"; static const char EMSG_UNEXP_TOKEN[] ALIGN1 = "Unexpected token"; static const char EMSG_DIV_BY_ZERO[] ALIGN1 = "Division by zero"; static const char EMSG_INV_FMT[] ALIGN1 = "Invalid format specifier"; static const char EMSG_TOO_FEW_ARGS[] ALIGN1 = "Too few arguments for builtin"; static const char EMSG_NOT_ARRAY[] ALIGN1 = "Not an array"; static const char EMSG_POSSIBLE_ERROR[] ALIGN1 = "Possible syntax error"; static const char EMSG_UNDEF_FUNC[] ALIGN1 = "Call to undefined function"; static const char EMSG_NO_MATH[] ALIGN1 = "Math support is not compiled in"; static void zero_out_var(var *vp) { memset(vp, 0, sizeof(*vp)); } static void syntax_error(const char *message) NORETURN; static void syntax_error(const char *message) { bb_error_msg_and_die("%s:%i: %s", g_progname, g_lineno, message); } /* ---- hash stuff ---- */ static unsigned hashidx(const char *name) { unsigned idx = 0; while (*name) idx = *name++ + (idx << 6) - idx; return idx; } /* create new hash */ static xhash *hash_init(void) { xhash *newhash; newhash = xzalloc(sizeof(*newhash)); newhash->csize = FIRST_PRIME; newhash->items = xzalloc(FIRST_PRIME * sizeof(newhash->items[0])); return newhash; } /* find item in hash, return ptr to data, NULL if not found */ static void *hash_search(xhash *hash, const char *name) { hash_item *hi; hi = hash->items[hashidx(name) % hash->csize]; while (hi) { if (strcmp(hi->name, name) == 0) return &hi->data; hi = hi->next; } return NULL; } /* grow hash if it becomes too big */ static void hash_rebuild(xhash *hash) { unsigned newsize, i, idx; hash_item **newitems, *hi, *thi; if (hash->nprime == ARRAY_SIZE(PRIMES)) return; newsize = PRIMES[hash->nprime++]; newitems = xzalloc(newsize * sizeof(newitems[0])); for (i = 0; i < hash->csize; i++) { hi = hash->items[i]; while (hi) { thi = hi; hi = thi->next; idx = hashidx(thi->name) % newsize; thi->next = newitems[idx]; newitems[idx] = thi; } } free(hash->items); hash->csize = newsize; hash->items = newitems; } /* find item in hash, add it if necessary. Return ptr to data */ static void *hash_find(xhash *hash, const char *name) { hash_item *hi; unsigned idx; int l; hi = hash_search(hash, name); if (!hi) { if (++hash->nel / hash->csize > 10) hash_rebuild(hash); l = strlen(name) + 1; hi = xzalloc(sizeof(*hi) + l); strcpy(hi->name, name); idx = hashidx(name) % hash->csize; hi->next = hash->items[idx]; hash->items[idx] = hi; hash->glen += l; } return &hi->data; } #define findvar(hash, name) ((var*) hash_find((hash), (name))) #define newvar(name) ((var*) hash_find(vhash, (name))) #define newfile(name) ((rstream*)hash_find(fdhash, (name))) #define newfunc(name) ((func*) hash_find(fnhash, (name))) static void hash_remove(xhash *hash, const char *name) { hash_item *hi, **phi; phi = &hash->items[hashidx(name) % hash->csize]; while (*phi) { hi = *phi; if (strcmp(hi->name, name) == 0) { hash->glen -= (strlen(name) + 1); hash->nel--; *phi = hi->next; free(hi); break; } phi = &hi->next; } } /* ------ some useful functions ------ */ static char *skip_spaces(char *p) { while (1) { if (*p == '\\' && p[1] == '\n') { p++; t_lineno++; } else if (*p != ' ' && *p != '\t') { break; } p++; } return p; } /* returns old *s, advances *s past word and terminating NUL */ static char *nextword(char **s) { char *p = *s; while (*(*s)++ != '\0') continue; return p; } static char nextchar(char **s) { char c, *pps; c = *(*s)++; pps = *s; if (c == '\\') c = bb_process_escape_sequence((const char**)s); /* Example awk statement: * s = "abc\"def" * we must treat \" as " */ if (c == '\\' && *s == pps) { /* unrecognized \z? */ c = *(*s); /* yes, fetch z */ if (c) (*s)++; /* advance unless z = NUL */ } return c; } /* TODO: merge with strcpy_and_process_escape_sequences()? */ static void unescape_string_in_place(char *s1) { char *s = s1; while ((*s1 = nextchar(&s)) != '\0') s1++; } static ALWAYS_INLINE int isalnum_(int c) { return (isalnum(c) || c == '_'); } static double my_strtod(char **pp) { char *cp = *pp; if (ENABLE_DESKTOP && cp[0] == '0') { /* Might be hex or octal integer: 0x123abc or 07777 */ char c = (cp[1] | 0x20); if (c == 'x' || isdigit(cp[1])) { unsigned long long ull = strtoull(cp, pp, 0); if (c == 'x') return ull; c = **pp; if (!isdigit(c) && c != '.') return ull; /* else: it may be a floating number. Examples: * 009.123 (*pp points to '9') * 000.123 (*pp points to '.') * fall through to strtod. */ } } return strtod(cp, pp); } /* -------- working with variables (set/get/copy/etc) -------- */ static xhash *iamarray(var *v) { var *a = v; while (a->type & VF_CHILD) a = a->x.parent; if (!(a->type & VF_ARRAY)) { a->type |= VF_ARRAY; a->x.array = hash_init(); } return a->x.array; } static void clear_array(xhash *array) { unsigned i; hash_item *hi, *thi; for (i = 0; i < array->csize; i++) { hi = array->items[i]; while (hi) { thi = hi; hi = hi->next; free(thi->data.v.string); free(thi); } array->items[i] = NULL; } array->glen = array->nel = 0; } /* clear a variable */ static var *clrvar(var *v) { if (!(v->type & VF_FSTR)) free(v->string); v->type &= VF_DONTTOUCH; v->type |= VF_DIRTY; v->string = NULL; return v; } /* assign string value to variable */ static var *setvar_p(var *v, char *value) { clrvar(v); v->string = value; handle_special(v); return v; } /* same as setvar_p but make a copy of string */ static var *setvar_s(var *v, const char *value) { return setvar_p(v, (value && *value) ? xstrdup(value) : NULL); } /* same as setvar_s but sets USER flag */ static var *setvar_u(var *v, const char *value) { v = setvar_s(v, value); v->type |= VF_USER; return v; } /* set array element to user string */ static void setari_u(var *a, int idx, const char *s) { var *v; v = findvar(iamarray(a), itoa(idx)); setvar_u(v, s); } /* assign numeric value to variable */ static var *setvar_i(var *v, double value) { clrvar(v); v->type |= VF_NUMBER; v->number = value; handle_special(v); return v; } static const char *getvar_s(var *v) { /* if v is numeric and has no cached string, convert it to string */ if ((v->type & (VF_NUMBER | VF_CACHED)) == VF_NUMBER) { fmt_num(g_buf, MAXVARFMT, getvar_s(intvar[CONVFMT]), v->number, TRUE); v->string = xstrdup(g_buf); v->type |= VF_CACHED; } return (v->string == NULL) ? "" : v->string; } static double getvar_i(var *v) { char *s; if ((v->type & (VF_NUMBER | VF_CACHED)) == 0) { v->number = 0; s = v->string; if (s && *s) { debug_printf_eval("getvar_i: '%s'->", s); v->number = my_strtod(&s); debug_printf_eval("%f (s:'%s')\n", v->number, s); if (v->type & VF_USER) { s = skip_spaces(s); if (*s != '\0') v->type &= ~VF_USER; } } else { debug_printf_eval("getvar_i: '%s'->zero\n", s); v->type &= ~VF_USER; } v->type |= VF_CACHED; } debug_printf_eval("getvar_i: %f\n", v->number); return v->number; } /* Used for operands of bitwise ops */ static unsigned long getvar_i_int(var *v) { double d = getvar_i(v); /* Casting doubles to longs is undefined for values outside * of target type range. Try to widen it as much as possible */ if (d >= 0) return (unsigned long)d; /* Why? Think about d == -4294967295.0 (assuming 32bit longs) */ return - (long) (unsigned long) (-d); } static var *copyvar(var *dest, const var *src) { if (dest != src) { clrvar(dest); dest->type |= (src->type & ~(VF_DONTTOUCH | VF_FSTR)); debug_printf_eval("copyvar: number:%f string:'%s'\n", src->number, src->string); dest->number = src->number; if (src->string) dest->string = xstrdup(src->string); } handle_special(dest); return dest; } static var *incvar(var *v) { return setvar_i(v, getvar_i(v) + 1.0); } /* return true if v is number or numeric string */ static int is_numeric(var *v) { getvar_i(v); return ((v->type ^ VF_DIRTY) & (VF_NUMBER | VF_USER | VF_DIRTY)); } /* return 1 when value of v corresponds to true, 0 otherwise */ static int istrue(var *v) { if (is_numeric(v)) return (v->number != 0); return (v->string && v->string[0]); } /* temporary variables allocator. Last allocated should be first freed */ static var *nvalloc(int n) { nvblock *pb = NULL; var *v, *r; int size; while (g_cb) { pb = g_cb; if ((g_cb->pos - g_cb->nv) + n <= g_cb->size) break; g_cb = g_cb->next; } if (!g_cb) { size = (n <= MINNVBLOCK) ? MINNVBLOCK : n; g_cb = xzalloc(sizeof(nvblock) + size * sizeof(var)); g_cb->size = size; g_cb->pos = g_cb->nv; g_cb->prev = pb; /*g_cb->next = NULL; - xzalloc did it */ if (pb) pb->next = g_cb; } v = r = g_cb->pos; g_cb->pos += n; while (v < g_cb->pos) { v->type = 0; v->string = NULL; v++; } return r; } static void nvfree(var *v) { var *p; if (v < g_cb->nv || v >= g_cb->pos) syntax_error(EMSG_INTERNAL_ERROR); for (p = v; p < g_cb->pos; p++) { if ((p->type & (VF_ARRAY | VF_CHILD)) == VF_ARRAY) { clear_array(iamarray(p)); free(p->x.array->items); free(p->x.array); } if (p->type & VF_WALK) { walker_list *n; walker_list *w = p->x.walker; debug_printf_walker("nvfree: freeing walker @%p\n", &p->x.walker); p->x.walker = NULL; while (w) { n = w->prev; debug_printf_walker(" free(%p)\n", w); free(w); w = n; } } clrvar(p); } g_cb->pos = v; while (g_cb->prev && g_cb->pos == g_cb->nv) { g_cb = g_cb->prev; } } /* ------- awk program text parsing ------- */ /* Parse next token pointed by global pos, place results into global ttt. * If token isn't expected, give away. Return token class */ static uint32_t next_token(uint32_t expected) { #define concat_inserted (G.next_token__concat_inserted) #define save_tclass (G.next_token__save_tclass) #define save_info (G.next_token__save_info) /* Initialized to TC_OPTERM: */ #define ltclass (G.next_token__ltclass) char *p, *s; const char *tl; uint32_t tc; const uint32_t *ti; if (t_rollback) { t_rollback = FALSE; } else if (concat_inserted) { concat_inserted = FALSE; t_tclass = save_tclass; t_info = save_info; } else { p = g_pos; readnext: p = skip_spaces(p); g_lineno = t_lineno; if (*p == '#') while (*p != '\n' && *p != '\0') p++; if (*p == '\n') t_lineno++; if (*p == '\0') { tc = TC_EOF; debug_printf_parse("%s: token found: TC_EOF\n", __func__); } else if (*p == '\"') { /* it's a string */ t_string = s = ++p; while (*p != '\"') { char *pp; if (*p == '\0' || *p == '\n') syntax_error(EMSG_UNEXP_EOS); pp = p; *s++ = nextchar(&pp); p = pp; } p++; *s = '\0'; tc = TC_STRING; debug_printf_parse("%s: token found:'%s' TC_STRING\n", __func__, t_string); } else if ((expected & TC_REGEXP) && *p == '/') { /* it's regexp */ t_string = s = ++p; while (*p != '/') { if (*p == '\0' || *p == '\n') syntax_error(EMSG_UNEXP_EOS); *s = *p++; if (*s++ == '\\') { char *pp = p; s[-1] = bb_process_escape_sequence((const char **)&pp); if (*p == '\\') *s++ = '\\'; if (pp == p) *s++ = *p++; else p = pp; } } p++; *s = '\0'; tc = TC_REGEXP; debug_printf_parse("%s: token found:'%s' TC_REGEXP\n", __func__, t_string); } else if (*p == '.' || isdigit(*p)) { /* it's a number */ char *pp = p; t_double = my_strtod(&pp); p = pp; if (*p == '.') syntax_error(EMSG_UNEXP_TOKEN); tc = TC_NUMBER; debug_printf_parse("%s: token found:%f TC_NUMBER\n", __func__, t_double); } else { /* search for something known */ tl = tokenlist; tc = 0x00000001; ti = tokeninfo; while (*tl) { int l = (unsigned char) *tl++; if (l == (unsigned char) NTCC) { tc <<= 1; continue; } /* if token class is expected, * token matches, * and it's not a longer word, */ if ((tc & (expected | TC_WORD | TC_NEWLINE)) && strncmp(p, tl, l) == 0 && !((tc & TC_WORD) && isalnum_(p[l])) ) { /* then this is what we are looking for */ t_info = *ti; debug_printf_parse("%s: token found:'%.*s' t_info:%x\n", __func__, l, p, t_info); p += l; goto token_found; } ti++; tl += l; } /* not a known token */ /* is it a name? (var/array/function) */ if (!isalnum_(*p)) syntax_error(EMSG_UNEXP_TOKEN); /* no */ /* yes */ t_string = --p; while (isalnum_(*++p)) { p[-1] = *p; } p[-1] = '\0'; tc = TC_VARIABLE; /* also consume whitespace between functionname and bracket */ if (!(expected & TC_VARIABLE) || (expected & TC_ARRAY)) p = skip_spaces(p); if (*p == '(') { tc = TC_FUNCTION; debug_printf_parse("%s: token found:'%s' TC_FUNCTION\n", __func__, t_string); } else { if (*p == '[') { p++; tc = TC_ARRAY; debug_printf_parse("%s: token found:'%s' TC_ARRAY\n", __func__, t_string); } else debug_printf_parse("%s: token found:'%s' TC_VARIABLE\n", __func__, t_string); } } token_found: g_pos = p; /* skipping newlines in some cases */ if ((ltclass & TC_NOTERM) && (tc & TC_NEWLINE)) goto readnext; /* insert concatenation operator when needed */ if ((ltclass & TC_CONCAT1) && (tc & TC_CONCAT2) && (expected & TC_BINOP)) { concat_inserted = TRUE; save_tclass = tc; save_info = t_info; tc = TC_BINOP; t_info = OC_CONCAT | SS | P(35); } t_tclass = tc; } ltclass = t_tclass; /* Are we ready for this? */ if (!(ltclass & expected)) syntax_error((ltclass & (TC_NEWLINE | TC_EOF)) ? EMSG_UNEXP_EOS : EMSG_UNEXP_TOKEN); return ltclass; #undef concat_inserted #undef save_tclass #undef save_info #undef ltclass } static void rollback_token(void) { t_rollback = TRUE; } static node *new_node(uint32_t info) { node *n; n = xzalloc(sizeof(node)); n->info = info; n->lineno = g_lineno; return n; } static void mk_re_node(const char *s, node *n, regex_t *re) { n->info = OC_REGEXP; n->l.re = re; n->r.ire = re + 1; xregcomp(re, s, REG_EXTENDED); xregcomp(re + 1, s, REG_EXTENDED | REG_ICASE); } static node *condition(void) { next_token(TC_SEQSTART); return parse_expr(TC_SEQTERM); } /* parse expression terminated by given argument, return ptr * to built subtree. Terminator is eaten by parse_expr */ static node *parse_expr(uint32_t iexp) { node sn; node *cn = &sn; node *vn, *glptr; uint32_t tc, xtc; var *v; debug_printf_parse("%s(%x)\n", __func__, iexp); sn.info = PRIMASK; sn.r.n = glptr = NULL; xtc = TC_OPERAND | TC_UOPPRE | TC_REGEXP | iexp; while (!((tc = next_token(xtc)) & iexp)) { if (glptr && (t_info == (OC_COMPARE | VV | P(39) | 2))) { /* input redirection (<) attached to glptr node */ debug_printf_parse("%s: input redir\n", __func__); cn = glptr->l.n = new_node(OC_CONCAT | SS | P(37)); cn->a.n = glptr; xtc = TC_OPERAND | TC_UOPPRE; glptr = NULL; } else if (tc & (TC_BINOP | TC_UOPPOST)) { debug_printf_parse("%s: TC_BINOP | TC_UOPPOST\n", __func__); /* for binary and postfix-unary operators, jump back over * previous operators with higher priority */ vn = cn; while (((t_info & PRIMASK) > (vn->a.n->info & PRIMASK2)) || ((t_info == vn->info) && ((t_info & OPCLSMASK) == OC_COLON)) ) { vn = vn->a.n; } if ((t_info & OPCLSMASK) == OC_TERNARY) t_info += P(6); cn = vn->a.n->r.n = new_node(t_info); cn->a.n = vn->a.n; if (tc & TC_BINOP) { cn->l.n = vn; xtc = TC_OPERAND | TC_UOPPRE | TC_REGEXP; if ((t_info & OPCLSMASK) == OC_PGETLINE) { /* it's a pipe */ next_token(TC_GETLINE); /* give maximum priority to this pipe */ cn->info &= ~PRIMASK; xtc = TC_OPERAND | TC_UOPPRE | TC_BINOP | iexp; } } else { cn->r.n = vn; xtc = TC_OPERAND | TC_UOPPRE | TC_BINOP | iexp; } vn->a.n = cn; } else { debug_printf_parse("%s: other\n", __func__); /* for operands and prefix-unary operators, attach them * to last node */ vn = cn; cn = vn->r.n = new_node(t_info); cn->a.n = vn; xtc = TC_OPERAND | TC_UOPPRE | TC_REGEXP; if (tc & (TC_OPERAND | TC_REGEXP)) { debug_printf_parse("%s: TC_OPERAND | TC_REGEXP\n", __func__); xtc = TC_UOPPRE | TC_UOPPOST | TC_BINOP | TC_OPERAND | iexp; /* one should be very careful with switch on tclass - * only simple tclasses should be used! */ switch (tc) { case TC_VARIABLE: case TC_ARRAY: debug_printf_parse("%s: TC_VARIABLE | TC_ARRAY\n", __func__); cn->info = OC_VAR; v = hash_search(ahash, t_string); if (v != NULL) { cn->info = OC_FNARG; cn->l.aidx = v->x.aidx; } else { cn->l.v = newvar(t_string); } if (tc & TC_ARRAY) { cn->info |= xS; cn->r.n = parse_expr(TC_ARRTERM); } break; case TC_NUMBER: case TC_STRING: debug_printf_parse("%s: TC_NUMBER | TC_STRING\n", __func__); cn->info = OC_VAR; v = cn->l.v = xzalloc(sizeof(var)); if (tc & TC_NUMBER) setvar_i(v, t_double); else setvar_s(v, t_string); break; case TC_REGEXP: debug_printf_parse("%s: TC_REGEXP\n", __func__); mk_re_node(t_string, cn, xzalloc(sizeof(regex_t)*2)); break; case TC_FUNCTION: debug_printf_parse("%s: TC_FUNCTION\n", __func__); cn->info = OC_FUNC; cn->r.f = newfunc(t_string); cn->l.n = condition(); break; case TC_SEQSTART: debug_printf_parse("%s: TC_SEQSTART\n", __func__); cn = vn->r.n = parse_expr(TC_SEQTERM); if (!cn) syntax_error("Empty sequence"); cn->a.n = vn; break; case TC_GETLINE: debug_printf_parse("%s: TC_GETLINE\n", __func__); glptr = cn; xtc = TC_OPERAND | TC_UOPPRE | TC_BINOP | iexp; break; case TC_BUILTIN: debug_printf_parse("%s: TC_BUILTIN\n", __func__); cn->l.n = condition(); break; } } } } debug_printf_parse("%s() returns %p\n", __func__, sn.r.n); return sn.r.n; } /* add node to chain. Return ptr to alloc'd node */ static node *chain_node(uint32_t info) { node *n; if (!seq->first) seq->first = seq->last = new_node(0); if (seq->programname != g_progname) { seq->programname = g_progname; n = chain_node(OC_NEWSOURCE); n->l.new_progname = xstrdup(g_progname); } n = seq->last; n->info = info; seq->last = n->a.n = new_node(OC_DONE); return n; } static void chain_expr(uint32_t info) { node *n; n = chain_node(info); n->l.n = parse_expr(TC_OPTERM | TC_GRPTERM); if (t_tclass & TC_GRPTERM) rollback_token(); } static node *chain_loop(node *nn) { node *n, *n2, *save_brk, *save_cont; save_brk = break_ptr; save_cont = continue_ptr; n = chain_node(OC_BR | Vx); continue_ptr = new_node(OC_EXEC); break_ptr = new_node(OC_EXEC); chain_group(); n2 = chain_node(OC_EXEC | Vx); n2->l.n = nn; n2->a.n = n; continue_ptr->a.n = n2; break_ptr->a.n = n->r.n = seq->last; continue_ptr = save_cont; break_ptr = save_brk; return n; } /* parse group and attach it to chain */ static void chain_group(void) { uint32_t c; node *n, *n2, *n3; do { c = next_token(TC_GRPSEQ); } while (c & TC_NEWLINE); if (c & TC_GRPSTART) { debug_printf_parse("%s: TC_GRPSTART\n", __func__); while (next_token(TC_GRPSEQ | TC_GRPTERM) != TC_GRPTERM) { debug_printf_parse("%s: !TC_GRPTERM\n", __func__); if (t_tclass & TC_NEWLINE) continue; rollback_token(); chain_group(); } debug_printf_parse("%s: TC_GRPTERM\n", __func__); } else if (c & (TC_OPSEQ | TC_OPTERM)) { debug_printf_parse("%s: TC_OPSEQ | TC_OPTERM\n", __func__); rollback_token(); chain_expr(OC_EXEC | Vx); } else { /* TC_STATEMNT */ debug_printf_parse("%s: TC_STATEMNT(?)\n", __func__); switch (t_info & OPCLSMASK) { case ST_IF: debug_printf_parse("%s: ST_IF\n", __func__); n = chain_node(OC_BR | Vx); n->l.n = condition(); chain_group(); n2 = chain_node(OC_EXEC); n->r.n = seq->last; if (next_token(TC_GRPSEQ | TC_GRPTERM | TC_ELSE) == TC_ELSE) { chain_group(); n2->a.n = seq->last; } else { rollback_token(); } break; case ST_WHILE: debug_printf_parse("%s: ST_WHILE\n", __func__); n2 = condition(); n = chain_loop(NULL); n->l.n = n2; break; case ST_DO: debug_printf_parse("%s: ST_DO\n", __func__); n2 = chain_node(OC_EXEC); n = chain_loop(NULL); n2->a.n = n->a.n; next_token(TC_WHILE); n->l.n = condition(); break; case ST_FOR: debug_printf_parse("%s: ST_FOR\n", __func__); next_token(TC_SEQSTART); n2 = parse_expr(TC_SEMICOL | TC_SEQTERM); if (t_tclass & TC_SEQTERM) { /* for-in */ if ((n2->info & OPCLSMASK) != OC_IN) syntax_error(EMSG_UNEXP_TOKEN); n = chain_node(OC_WALKINIT | VV); n->l.n = n2->l.n; n->r.n = n2->r.n; n = chain_loop(NULL); n->info = OC_WALKNEXT | Vx; n->l.n = n2->l.n; } else { /* for (;;) */ n = chain_node(OC_EXEC | Vx); n->l.n = n2; n2 = parse_expr(TC_SEMICOL); n3 = parse_expr(TC_SEQTERM); n = chain_loop(n3); n->l.n = n2; if (!n2) n->info = OC_EXEC; } break; case OC_PRINT: case OC_PRINTF: debug_printf_parse("%s: OC_PRINT[F]\n", __func__); n = chain_node(t_info); n->l.n = parse_expr(TC_OPTERM | TC_OUTRDR | TC_GRPTERM); if (t_tclass & TC_OUTRDR) { n->info |= t_info; n->r.n = parse_expr(TC_OPTERM | TC_GRPTERM); } if (t_tclass & TC_GRPTERM) rollback_token(); break; case OC_BREAK: debug_printf_parse("%s: OC_BREAK\n", __func__); n = chain_node(OC_EXEC); n->a.n = break_ptr; break; case OC_CONTINUE: debug_printf_parse("%s: OC_CONTINUE\n", __func__); n = chain_node(OC_EXEC); n->a.n = continue_ptr; break; /* delete, next, nextfile, return, exit */ default: debug_printf_parse("%s: default\n", __func__); chain_expr(t_info); } } } static void parse_program(char *p) { uint32_t tclass; node *cn; func *f; var *v; g_pos = p; t_lineno = 1; while ((tclass = next_token(TC_EOF | TC_OPSEQ | TC_GRPSTART | TC_OPTERM | TC_BEGIN | TC_END | TC_FUNCDECL)) != TC_EOF) { if (tclass & TC_OPTERM) { debug_printf_parse("%s: TC_OPTERM\n", __func__); continue; } seq = &mainseq; if (tclass & TC_BEGIN) { debug_printf_parse("%s: TC_BEGIN\n", __func__); seq = &beginseq; chain_group(); } else if (tclass & TC_END) { debug_printf_parse("%s: TC_END\n", __func__); seq = &endseq; chain_group(); } else if (tclass & TC_FUNCDECL) { debug_printf_parse("%s: TC_FUNCDECL\n", __func__); next_token(TC_FUNCTION); g_pos++; f = newfunc(t_string); f->body.first = NULL; f->nargs = 0; while (next_token(TC_VARIABLE | TC_SEQTERM) & TC_VARIABLE) { v = findvar(ahash, t_string); v->x.aidx = f->nargs++; if (next_token(TC_COMMA | TC_SEQTERM) & TC_SEQTERM) break; } seq = &f->body; chain_group(); clear_array(ahash); } else if (tclass & TC_OPSEQ) { debug_printf_parse("%s: TC_OPSEQ\n", __func__); rollback_token(); cn = chain_node(OC_TEST); cn->l.n = parse_expr(TC_OPTERM | TC_EOF | TC_GRPSTART); if (t_tclass & TC_GRPSTART) { debug_printf_parse("%s: TC_GRPSTART\n", __func__); rollback_token(); chain_group(); } else { debug_printf_parse("%s: !TC_GRPSTART\n", __func__); chain_node(OC_PRINT); } cn->r.n = mainseq.last; } else /* if (tclass & TC_GRPSTART) */ { debug_printf_parse("%s: TC_GRPSTART(?)\n", __func__); rollback_token(); chain_group(); } } debug_printf_parse("%s: TC_EOF\n", __func__); } /* -------- program execution part -------- */ static node *mk_splitter(const char *s, tsplitter *spl) { regex_t *re, *ire; node *n; re = &spl->re[0]; ire = &spl->re[1]; n = &spl->n; if ((n->info & OPCLSMASK) == OC_REGEXP) { regfree(re); regfree(ire); // TODO: nuke ire, use re+1? } if (s[0] && s[1]) { /* strlen(s) > 1 */ mk_re_node(s, n, re); } else { n->info = (uint32_t) s[0]; } return n; } /* use node as a regular expression. Supplied with node ptr and regex_t * storage space. Return ptr to regex (if result points to preg, it should * be later regfree'd manually */ static regex_t *as_regex(node *op, regex_t *preg) { int cflags; var *v; const char *s; if ((op->info & OPCLSMASK) == OC_REGEXP) { return icase ? op->r.ire : op->l.re; } v = nvalloc(1); s = getvar_s(evaluate(op, v)); cflags = icase ? REG_EXTENDED | REG_ICASE : REG_EXTENDED; /* Testcase where REG_EXTENDED fails (unpaired '{'): * echo Hi | awk 'gsub("@(samp|code|file)\{","");' * gawk 3.1.5 eats this. We revert to ~REG_EXTENDED * (maybe gsub is not supposed to use REG_EXTENDED?). */ if (regcomp(preg, s, cflags)) { cflags &= ~REG_EXTENDED; xregcomp(preg, s, cflags); } nvfree(v); return preg; } /* gradually increasing buffer. * note that we reallocate even if n == old_size, * and thus there is at least one extra allocated byte. */ static char* qrealloc(char *b, int n, int *size) { if (!b || n >= *size) { *size = n + (n>>1) + 80; b = xrealloc(b, *size); } return b; } /* resize field storage space */ static void fsrealloc(int size) { int i; if (size >= maxfields) { i = maxfields; maxfields = size + 16; Fields = xrealloc(Fields, maxfields * sizeof(Fields[0])); for (; i < maxfields; i++) { Fields[i].type = VF_SPECIAL; Fields[i].string = NULL; } } /* if size < nfields, clear extra field variables */ for (i = size; i < nfields; i++) { clrvar(Fields + i); } nfields = size; } static int awk_split(const char *s, node *spl, char **slist) { int l, n; char c[4]; char *s1; regmatch_t pmatch[2]; // TODO: why [2]? [1] is enough... /* in worst case, each char would be a separate field */ *slist = s1 = xzalloc(strlen(s) * 2 + 3); strcpy(s1, s); c[0] = c[1] = (char)spl->info; c[2] = c[3] = '\0'; if (*getvar_s(intvar[RS]) == '\0') c[2] = '\n'; n = 0; if ((spl->info & OPCLSMASK) == OC_REGEXP) { /* regex split */ if (!*s) return n; /* "": zero fields */ n++; /* at least one field will be there */ do { l = strcspn(s, c+2); /* len till next NUL or \n */ if (regexec(icase ? spl->r.ire : spl->l.re, s, 1, pmatch, 0) == 0 && pmatch[0].rm_so <= l ) { l = pmatch[0].rm_so; if (pmatch[0].rm_eo == 0) { l++; pmatch[0].rm_eo++; } n++; /* we saw yet another delimiter */ } else { pmatch[0].rm_eo = l; if (s[l]) pmatch[0].rm_eo++; } memcpy(s1, s, l); /* make sure we remove *all* of the separator chars */ do { s1[l] = '\0'; } while (++l < pmatch[0].rm_eo); nextword(&s1); s += pmatch[0].rm_eo; } while (*s); return n; } if (c[0] == '\0') { /* null split */ while (*s) { *s1++ = *s++; *s1++ = '\0'; n++; } return n; } if (c[0] != ' ') { /* single-character split */ if (icase) { c[0] = toupper(c[0]); c[1] = tolower(c[1]); } if (*s1) n++; while ((s1 = strpbrk(s1, c)) != NULL) { *s1++ = '\0'; n++; } return n; } /* space split */ while (*s) { s = skip_whitespace(s); if (!*s) break; n++; while (*s && !isspace(*s)) *s1++ = *s++; *s1++ = '\0'; } return n; } static void split_f0(void) { /* static char *fstrings; */ #define fstrings (G.split_f0__fstrings) int i, n; char *s; if (is_f0_split) return; is_f0_split = TRUE; free(fstrings); fsrealloc(0); n = awk_split(getvar_s(intvar[F0]), &fsplitter.n, &fstrings); fsrealloc(n); s = fstrings; for (i = 0; i < n; i++) { Fields[i].string = nextword(&s); Fields[i].type |= (VF_FSTR | VF_USER | VF_DIRTY); } /* set NF manually to avoid side effects */ clrvar(intvar[NF]); intvar[NF]->type = VF_NUMBER | VF_SPECIAL; intvar[NF]->number = nfields; #undef fstrings } /* perform additional actions when some internal variables changed */ static void handle_special(var *v) { int n; char *b; const char *sep, *s; int sl, l, len, i, bsize; if (!(v->type & VF_SPECIAL)) return; if (v == intvar[NF]) { n = (int)getvar_i(v); fsrealloc(n); /* recalculate $0 */ sep = getvar_s(intvar[OFS]); sl = strlen(sep); b = NULL; len = 0; for (i = 0; i < n; i++) { s = getvar_s(&Fields[i]); l = strlen(s); if (b) { memcpy(b+len, sep, sl); len += sl; } b = qrealloc(b, len+l+sl, &bsize); memcpy(b+len, s, l); len += l; } if (b) b[len] = '\0'; setvar_p(intvar[F0], b); is_f0_split = TRUE; } else if (v == intvar[F0]) { is_f0_split = FALSE; } else if (v == intvar[FS]) { /* * The POSIX-2008 standard says that changing FS should have no effect on the * current input line, but only on the next one. The language is: * * > Before the first reference to a field in the record is evaluated, the record * > shall be split into fields, according to the rules in Regular Expressions, * > using the value of FS that was current at the time the record was read. * * So, split up current line before assignment to FS: */ split_f0(); mk_splitter(getvar_s(v), &fsplitter); } else if (v == intvar[RS]) { mk_splitter(getvar_s(v), &rsplitter); } else if (v == intvar[IGNORECASE]) { icase = istrue(v); } else { /* $n */ n = getvar_i(intvar[NF]); setvar_i(intvar[NF], n > v-Fields ? n : v-Fields+1); /* right here v is invalid. Just to note... */ } } /* step through func/builtin/etc arguments */ static node *nextarg(node **pn) { node *n; n = *pn; if (n && (n->info & OPCLSMASK) == OC_COMMA) { *pn = n->r.n; n = n->l.n; } else { *pn = NULL; } return n; } static void hashwalk_init(var *v, xhash *array) { hash_item *hi; unsigned i; walker_list *w; walker_list *prev_walker; if (v->type & VF_WALK) { prev_walker = v->x.walker; } else { v->type |= VF_WALK; prev_walker = NULL; } debug_printf_walker("hashwalk_init: prev_walker:%p\n", prev_walker); w = v->x.walker = xzalloc(sizeof(*w) + array->glen + 1); /* why + 1? */ debug_printf_walker(" walker@%p=%p\n", &v->x.walker, w); w->cur = w->end = w->wbuf; w->prev = prev_walker; for (i = 0; i < array->csize; i++) { hi = array->items[i]; while (hi) { strcpy(w->end, hi->name); nextword(&w->end); hi = hi->next; } } } static int hashwalk_next(var *v) { walker_list *w = v->x.walker; if (w->cur >= w->end) { walker_list *prev_walker = w->prev; debug_printf_walker("end of iteration, free(walker@%p:%p), prev_walker:%p\n", &v->x.walker, w, prev_walker); free(w); v->x.walker = prev_walker; return FALSE; } setvar_s(v, nextword(&w->cur)); return TRUE; } /* evaluate node, return 1 when result is true, 0 otherwise */ static int ptest(node *pattern) { /* ptest__v is "static": to save stack space? */ return istrue(evaluate(pattern, &G.ptest__v)); } /* read next record from stream rsm into a variable v */ static int awk_getline(rstream *rsm, var *v) { char *b; regmatch_t pmatch[2]; int size, a, p, pp = 0; int fd, so, eo, r, rp; char c, *m, *s; debug_printf_eval("entered %s()\n", __func__); /* we're using our own buffer since we need access to accumulating * characters */ fd = fileno(rsm->F); m = rsm->buffer; a = rsm->adv; p = rsm->pos; size = rsm->size; c = (char) rsplitter.n.info; rp = 0; if (!m) m = qrealloc(m, 256, &size); do { b = m + a; so = eo = p; r = 1; if (p > 0) { if ((rsplitter.n.info & OPCLSMASK) == OC_REGEXP) { if (regexec(icase ? rsplitter.n.r.ire : rsplitter.n.l.re, b, 1, pmatch, 0) == 0) { so = pmatch[0].rm_so; eo = pmatch[0].rm_eo; if (b[eo] != '\0') break; } } else if (c != '\0') { s = strchr(b+pp, c); if (!s) s = memchr(b+pp, '\0', p - pp); if (s) { so = eo = s-b; eo++; break; } } else { while (b[rp] == '\n') rp++; s = strstr(b+rp, "\n\n"); if (s) { so = eo = s-b; while (b[eo] == '\n') eo++; if (b[eo] != '\0') break; } } } if (a > 0) { memmove(m, m+a, p+1); b = m; a = 0; } m = qrealloc(m, a+p+128, &size); b = m + a; pp = p; p += safe_read(fd, b+p, size-p-1); if (p < pp) { p = 0; r = 0; setvar_i(intvar[ERRNO], errno); } b[p] = '\0'; } while (p > pp); if (p == 0) { r--; } else { c = b[so]; b[so] = '\0'; setvar_s(v, b+rp); v->type |= VF_USER; b[so] = c; c = b[eo]; b[eo] = '\0'; setvar_s(intvar[RT], b+so); b[eo] = c; } rsm->buffer = m; rsm->adv = a + eo; rsm->pos = p - eo; rsm->size = size; debug_printf_eval("returning from %s(): %d\n", __func__, r); return r; } static int fmt_num(char *b, int size, const char *format, double n, int int_as_int) { int r = 0; char c; const char *s = format; if (int_as_int && n == (int)n) { r = snprintf(b, size, "%d", (int)n); } else { do { c = *s; } while (c && *++s); if (strchr("diouxX", c)) { r = snprintf(b, size, format, (int)n); } else if (strchr("eEfgG", c)) { r = snprintf(b, size, format, n); } else { syntax_error(EMSG_INV_FMT); } } return r; } /* formatted output into an allocated buffer, return ptr to buffer */ static char *awk_printf(node *n) { char *b = NULL; char *fmt, *s, *f; const char *s1; int i, j, incr, bsize; char c, c1; var *v, *arg; v = nvalloc(1); fmt = f = xstrdup(getvar_s(evaluate(nextarg(&n), v))); i = 0; while (*f) { s = f; while (*f && (*f != '%' || *++f == '%')) f++; while (*f && !isalpha(*f)) { if (*f == '*') syntax_error("%*x formats are not supported"); f++; } incr = (f - s) + MAXVARFMT; b = qrealloc(b, incr + i, &bsize); c = *f; if (c != '\0') f++; c1 = *f; *f = '\0'; arg = evaluate(nextarg(&n), v); j = i; if (c == 'c' || !c) { i += sprintf(b+i, s, is_numeric(arg) ? (char)getvar_i(arg) : *getvar_s(arg)); } else if (c == 's') { s1 = getvar_s(arg); b = qrealloc(b, incr+i+strlen(s1), &bsize); i += sprintf(b+i, s, s1); } else { i += fmt_num(b+i, incr, s, getvar_i(arg), FALSE); } *f = c1; /* if there was an error while sprintf, return value is negative */ if (i < j) i = j; } free(fmt); nvfree(v); b = xrealloc(b, i + 1); b[i] = '\0'; return b; } /* Common substitution routine. * Replace (nm)'th substring of (src) that matches (rn) with (repl), * store result into (dest), return number of substitutions. * If nm = 0, replace all matches. * If src or dst is NULL, use $0. * If subexp != 0, enable subexpression matching (\1-\9). */ static int awk_sub(node *rn, const char *repl, int nm, var *src, var *dest, int subexp) { char *resbuf; const char *sp; int match_no, residx, replen, resbufsize; int regexec_flags; regmatch_t pmatch[10]; regex_t sreg, *regex; resbuf = NULL; residx = 0; match_no = 0; regexec_flags = 0; regex = as_regex(rn, &sreg); sp = getvar_s(src ? src : intvar[F0]); replen = strlen(repl); while (regexec(regex, sp, 10, pmatch, regexec_flags) == 0) { int so = pmatch[0].rm_so; int eo = pmatch[0].rm_eo; //bb_error_msg("match %u: [%u,%u] '%s'%p", match_no+1, so, eo, sp,sp); resbuf = qrealloc(resbuf, residx + eo + replen, &resbufsize); memcpy(resbuf + residx, sp, eo); residx += eo; if (++match_no >= nm) { const char *s; int nbs; /* replace */ residx -= (eo - so); nbs = 0; for (s = repl; *s; s++) { char c = resbuf[residx++] = *s; if (c == '\\') { nbs++; continue; } if (c == '&' || (subexp && c >= '0' && c <= '9')) { int j; residx -= ((nbs + 3) >> 1); j = 0; if (c != '&') { j = c - '0'; nbs++; } if (nbs % 2) { resbuf[residx++] = c; } else { int n = pmatch[j].rm_eo - pmatch[j].rm_so; resbuf = qrealloc(resbuf, residx + replen + n, &resbufsize); memcpy(resbuf + residx, sp + pmatch[j].rm_so, n); residx += n; } } nbs = 0; } } regexec_flags = REG_NOTBOL; sp += eo; if (match_no == nm) break; if (eo == so) { /* Empty match (e.g. "b*" will match anywhere). * Advance by one char. */ //BUG (bug 1333): //gsub(/\<b*/,"") on "abc" will reach this point, advance to "bc" //... and will erroneously match "b" even though it is NOT at the word start. //we need REG_NOTBOW but it does not exist... //TODO: if EXTRA_COMPAT=y, use GNU matching and re_search, //it should be able to do it correctly. /* Subtle: this is safe only because * qrealloc allocated at least one extra byte */ resbuf[residx] = *sp; if (*sp == '\0') goto ret; sp++; residx++; } } resbuf = qrealloc(resbuf, residx + strlen(sp), &resbufsize); strcpy(resbuf + residx, sp); ret: //bb_error_msg("end sp:'%s'%p", sp,sp); setvar_p(dest ? dest : intvar[F0], resbuf); if (regex == &sreg) regfree(regex); return match_no; } static NOINLINE int do_mktime(const char *ds) { struct tm then; int count; /*memset(&then, 0, sizeof(then)); - not needed */ then.tm_isdst = -1; /* default is unknown */ /* manpage of mktime says these fields are ints, * so we can sscanf stuff directly into them */ count = sscanf(ds, "%u %u %u %u %u %u %d", &then.tm_year, &then.tm_mon, &then.tm_mday, &then.tm_hour, &then.tm_min, &then.tm_sec, &then.tm_isdst); if (count < 6 || (unsigned)then.tm_mon < 1 || (unsigned)then.tm_year < 1900 ) { return -1; } then.tm_mon -= 1; then.tm_year -= 1900; return mktime(&then); } static NOINLINE var *exec_builtin(node *op, var *res) { #define tspl (G.exec_builtin__tspl) var *tv; node *an[4]; var *av[4]; const char *as[4]; regmatch_t pmatch[2]; regex_t sreg, *re; node *spl; uint32_t isr, info; int nargs; time_t tt; int i, l, ll, n; tv = nvalloc(4); isr = info = op->info; op = op->l.n; av[2] = av[3] = NULL; for (i = 0; i < 4 && op; i++) { an[i] = nextarg(&op); if (isr & 0x09000000) av[i] = evaluate(an[i], &tv[i]); if (isr & 0x08000000) as[i] = getvar_s(av[i]); isr >>= 1; } nargs = i; if ((uint32_t)nargs < (info >> 30)) syntax_error(EMSG_TOO_FEW_ARGS); info &= OPNMASK; switch (info) { case B_a2: if (ENABLE_FEATURE_AWK_LIBM) setvar_i(res, atan2(getvar_i(av[0]), getvar_i(av[1]))); else syntax_error(EMSG_NO_MATH); break; case B_sp: { char *s, *s1; if (nargs > 2) { spl = (an[2]->info & OPCLSMASK) == OC_REGEXP ? an[2] : mk_splitter(getvar_s(evaluate(an[2], &tv[2])), &tspl); } else { spl = &fsplitter.n; } n = awk_split(as[0], spl, &s); s1 = s; clear_array(iamarray(av[1])); for (i = 1; i <= n; i++) setari_u(av[1], i, nextword(&s)); free(s1); setvar_i(res, n); break; } case B_ss: { char *s; l = strlen(as[0]); i = getvar_i(av[1]) - 1; if (i > l) i = l; if (i < 0) i = 0; n = (nargs > 2) ? getvar_i(av[2]) : l-i; if (n < 0) n = 0; s = xstrndup(as[0]+i, n); setvar_p(res, s); break; } /* Bitwise ops must assume that operands are unsigned. GNU Awk 3.1.5: * awk '{ print or(-1,1) }' gives "4.29497e+09", not "-2.xxxe+09" */ case B_an: setvar_i(res, getvar_i_int(av[0]) & getvar_i_int(av[1])); break; case B_co: setvar_i(res, ~getvar_i_int(av[0])); break; case B_ls: setvar_i(res, getvar_i_int(av[0]) << getvar_i_int(av[1])); break; case B_or: setvar_i(res, getvar_i_int(av[0]) | getvar_i_int(av[1])); break; case B_rs: setvar_i(res, getvar_i_int(av[0]) >> getvar_i_int(av[1])); break; case B_xo: setvar_i(res, getvar_i_int(av[0]) ^ getvar_i_int(av[1])); break; case B_lo: case B_up: { char *s, *s1; s1 = s = xstrdup(as[0]); while (*s1) { //*s1 = (info == B_up) ? toupper(*s1) : tolower(*s1); if ((unsigned char)((*s1 | 0x20) - 'a') <= ('z' - 'a')) *s1 = (info == B_up) ? (*s1 & 0xdf) : (*s1 | 0x20); s1++; } setvar_p(res, s); break; } case B_ix: n = 0; ll = strlen(as[1]); l = strlen(as[0]) - ll; if (ll > 0 && l >= 0) { if (!icase) { char *s = strstr(as[0], as[1]); if (s) n = (s - as[0]) + 1; } else { /* this piece of code is terribly slow and * really should be rewritten */ for (i = 0; i <= l; i++) { if (strncasecmp(as[0]+i, as[1], ll) == 0) { n = i+1; break; } } } } setvar_i(res, n); break; case B_ti: if (nargs > 1) tt = getvar_i(av[1]); else time(&tt); //s = (nargs > 0) ? as[0] : "%a %b %d %H:%M:%S %Z %Y"; i = strftime(g_buf, MAXVARFMT, ((nargs > 0) ? as[0] : "%a %b %d %H:%M:%S %Z %Y"), localtime(&tt)); g_buf[i] = '\0'; setvar_s(res, g_buf); break; case B_mt: setvar_i(res, do_mktime(as[0])); break; case B_ma: re = as_regex(an[1], &sreg); n = regexec(re, as[0], 1, pmatch, 0); if (n == 0) { pmatch[0].rm_so++; pmatch[0].rm_eo++; } else { pmatch[0].rm_so = 0; pmatch[0].rm_eo = -1; } setvar_i(newvar("RSTART"), pmatch[0].rm_so); setvar_i(newvar("RLENGTH"), pmatch[0].rm_eo - pmatch[0].rm_so); setvar_i(res, pmatch[0].rm_so); if (re == &sreg) regfree(re); break; case B_ge: awk_sub(an[0], as[1], getvar_i(av[2]), av[3], res, TRUE); break; case B_gs: setvar_i(res, awk_sub(an[0], as[1], 0, av[2], av[2], FALSE)); break; case B_su: setvar_i(res, awk_sub(an[0], as[1], 1, av[2], av[2], FALSE)); break; } nvfree(tv); return res; #undef tspl } /* * Evaluate node - the heart of the program. Supplied with subtree * and place where to store result. returns ptr to result. */ #define XC(n) ((n) >> 8) static var *evaluate(node *op, var *res) { /* This procedure is recursive so we should count every byte */ #define fnargs (G.evaluate__fnargs) /* seed is initialized to 1 */ #define seed (G.evaluate__seed) #define sreg (G.evaluate__sreg) var *v1; if (!op) return setvar_s(res, NULL); debug_printf_eval("entered %s()\n", __func__); v1 = nvalloc(2); while (op) { struct { var *v; const char *s; } L; struct { var *v; const char *s; } R; static double L_d; uint32_t opinfo; int opn; node *op1; opinfo = op->info; opn = (opinfo & OPNMASK); g_lineno = op->lineno; op1 = op->l.n; debug_printf_eval("opinfo:%08x opn:%08x\n", opinfo, opn); /* execute inevitable things */ if (opinfo & OF_RES1) L.v = evaluate(op1, v1); if (opinfo & OF_RES2) R.v = evaluate(op->r.n, v1+1); if (opinfo & OF_STR1) { L.s = getvar_s(L.v); debug_printf_eval("L.s:'%s'\n", L.s); } if (opinfo & OF_STR2) { R.s = getvar_s(R.v); debug_printf_eval("R.s:'%s'\n", R.s); } if (opinfo & OF_NUM1) { L_d = getvar_i(L.v); debug_printf_eval("L_d:%f\n", L_d); } debug_printf_eval("switch(0x%x)\n", XC(opinfo & OPCLSMASK)); switch (XC(opinfo & OPCLSMASK)) { /* -- iterative node type -- */ /* test pattern */ case XC( OC_TEST ): if ((op1->info & OPCLSMASK) == OC_COMMA) { /* it's range pattern */ if ((opinfo & OF_CHECKED) || ptest(op1->l.n)) { op->info |= OF_CHECKED; if (ptest(op1->r.n)) op->info &= ~OF_CHECKED; op = op->a.n; } else { op = op->r.n; } } else { op = ptest(op1) ? op->a.n : op->r.n; } break; /* just evaluate an expression, also used as unconditional jump */ case XC( OC_EXEC ): break; /* branch, used in if-else and various loops */ case XC( OC_BR ): op = istrue(L.v) ? op->a.n : op->r.n; break; /* initialize for-in loop */ case XC( OC_WALKINIT ): hashwalk_init(L.v, iamarray(R.v)); break; /* get next array item */ case XC( OC_WALKNEXT ): op = hashwalk_next(L.v) ? op->a.n : op->r.n; break; case XC( OC_PRINT ): case XC( OC_PRINTF ): { FILE *F = stdout; if (op->r.n) { rstream *rsm = newfile(R.s); if (!rsm->F) { if (opn == '|') { rsm->F = popen(R.s, "w"); if (rsm->F == NULL) bb_perror_msg_and_die("popen"); rsm->is_pipe = 1; } else { rsm->F = xfopen(R.s, opn=='w' ? "w" : "a"); } } F = rsm->F; } if ((opinfo & OPCLSMASK) == OC_PRINT) { if (!op1) { fputs(getvar_s(intvar[F0]), F); } else { while (op1) { var *v = evaluate(nextarg(&op1), v1); if (v->type & VF_NUMBER) { fmt_num(g_buf, MAXVARFMT, getvar_s(intvar[OFMT]), getvar_i(v), TRUE); fputs(g_buf, F); } else { fputs(getvar_s(v), F); } if (op1) fputs(getvar_s(intvar[OFS]), F); } } fputs(getvar_s(intvar[ORS]), F); } else { /* OC_PRINTF */ char *s = awk_printf(op1); fputs(s, F); free(s); } fflush(F); break; } case XC( OC_DELETE ): { uint32_t info = op1->info & OPCLSMASK; var *v; if (info == OC_VAR) { v = op1->l.v; } else if (info == OC_FNARG) { v = &fnargs[op1->l.aidx]; } else { syntax_error(EMSG_NOT_ARRAY); } if (op1->r.n) { const char *s; clrvar(L.v); s = getvar_s(evaluate(op1->r.n, v1)); hash_remove(iamarray(v), s); } else { clear_array(iamarray(v)); } break; } case XC( OC_NEWSOURCE ): g_progname = op->l.new_progname; break; case XC( OC_RETURN ): copyvar(res, L.v); break; case XC( OC_NEXTFILE ): nextfile = TRUE; case XC( OC_NEXT ): nextrec = TRUE; case XC( OC_DONE ): clrvar(res); break; case XC( OC_EXIT ): awk_exit(L_d); /* -- recursive node type -- */ case XC( OC_VAR ): L.v = op->l.v; if (L.v == intvar[NF]) split_f0(); goto v_cont; case XC( OC_FNARG ): L.v = &fnargs[op->l.aidx]; v_cont: res = op->r.n ? findvar(iamarray(L.v), R.s) : L.v; break; case XC( OC_IN ): setvar_i(res, hash_search(iamarray(R.v), L.s) ? 1 : 0); break; case XC( OC_REGEXP ): op1 = op; L.s = getvar_s(intvar[F0]); goto re_cont; case XC( OC_MATCH ): op1 = op->r.n; re_cont: { regex_t *re = as_regex(op1, &sreg); int i = regexec(re, L.s, 0, NULL, 0); if (re == &sreg) regfree(re); setvar_i(res, (i == 0) ^ (opn == '!')); } break; case XC( OC_MOVE ): debug_printf_eval("MOVE\n"); /* if source is a temporary string, jusk relink it to dest */ //Disabled: if R.v is numeric but happens to have cached R.v->string, //then L.v ends up being a string, which is wrong // if (R.v == v1+1 && R.v->string) { // res = setvar_p(L.v, R.v->string); // R.v->string = NULL; // } else { res = copyvar(L.v, R.v); // } break; case XC( OC_TERNARY ): if ((op->r.n->info & OPCLSMASK) != OC_COLON) syntax_error(EMSG_POSSIBLE_ERROR); res = evaluate(istrue(L.v) ? op->r.n->l.n : op->r.n->r.n, res); break; case XC( OC_FUNC ): { var *vbeg, *v; const char *sv_progname; if (!op->r.f->body.first) syntax_error(EMSG_UNDEF_FUNC); vbeg = v = nvalloc(op->r.f->nargs + 1); while (op1) { var *arg = evaluate(nextarg(&op1), v1); copyvar(v, arg); v->type |= VF_CHILD; v->x.parent = arg; if (++v - vbeg >= (int) op->r.f->nargs) break; } v = fnargs; fnargs = vbeg; sv_progname = g_progname; res = evaluate(op->r.f->body.first, res); g_progname = sv_progname; nvfree(fnargs); fnargs = v; break; } case XC( OC_GETLINE ): case XC( OC_PGETLINE ): { rstream *rsm; int i; if (op1) { rsm = newfile(L.s); if (!rsm->F) { if ((opinfo & OPCLSMASK) == OC_PGETLINE) { rsm->F = popen(L.s, "r"); rsm->is_pipe = TRUE; } else { rsm->F = fopen_for_read(L.s); /* not xfopen! */ } } } else { if (!iF) iF = next_input_file(); rsm = iF; } if (!rsm || !rsm->F) { setvar_i(intvar[ERRNO], errno); setvar_i(res, -1); break; } if (!op->r.n) R.v = intvar[F0]; i = awk_getline(rsm, R.v); if (i > 0 && !op1) { incvar(intvar[FNR]); incvar(intvar[NR]); } setvar_i(res, i); break; } /* simple builtins */ case XC( OC_FBLTIN ): { static double R_d; switch (opn) { case F_in: R_d = (int)L_d; break; case F_rn: R_d = (double)rand() / (double)RAND_MAX; break; case F_co: if (ENABLE_FEATURE_AWK_LIBM) { R_d = cos(L_d); break; } case F_ex: if (ENABLE_FEATURE_AWK_LIBM) { R_d = exp(L_d); break; } case F_lg: if (ENABLE_FEATURE_AWK_LIBM) { R_d = log(L_d); break; } case F_si: if (ENABLE_FEATURE_AWK_LIBM) { R_d = sin(L_d); break; } case F_sq: if (ENABLE_FEATURE_AWK_LIBM) { R_d = sqrt(L_d); break; } syntax_error(EMSG_NO_MATH); break; case F_sr: R_d = (double)seed; seed = op1 ? (unsigned)L_d : (unsigned)time(NULL); srand(seed); break; case F_ti: R_d = time(NULL); break; case F_le: if (!op1) L.s = getvar_s(intvar[F0]); R_d = strlen(L.s); break; case F_sy: fflush_all(); R_d = (ENABLE_FEATURE_ALLOW_EXEC && L.s && *L.s) ? (system(L.s) >> 8) : 0; break; case F_ff: if (!op1) { fflush(stdout); } else if (L.s && *L.s) { rstream *rsm = newfile(L.s); fflush(rsm->F); } else { fflush_all(); } break; case F_cl: { rstream *rsm; int err = 0; rsm = (rstream *)hash_search(fdhash, L.s); debug_printf_eval("OC_FBLTIN F_cl rsm:%p\n", rsm); if (rsm) { debug_printf_eval("OC_FBLTIN F_cl " "rsm->is_pipe:%d, ->F:%p\n", rsm->is_pipe, rsm->F); /* Can be NULL if open failed. Example: * getline line <"doesnt_exist"; * close("doesnt_exist"); <--- here rsm->F is NULL */ if (rsm->F) err = rsm->is_pipe ? pclose(rsm->F) : fclose(rsm->F); free(rsm->buffer); hash_remove(fdhash, L.s); } if (err) setvar_i(intvar[ERRNO], errno); R_d = (double)err; break; } } /* switch */ setvar_i(res, R_d); break; } case XC( OC_BUILTIN ): res = exec_builtin(op, res); break; case XC( OC_SPRINTF ): setvar_p(res, awk_printf(op1)); break; case XC( OC_UNARY ): { double Ld, R_d; Ld = R_d = getvar_i(R.v); switch (opn) { case 'P': Ld = ++R_d; goto r_op_change; case 'p': R_d++; goto r_op_change; case 'M': Ld = --R_d; goto r_op_change; case 'm': R_d--; r_op_change: setvar_i(R.v, R_d); break; case '!': Ld = !istrue(R.v); break; case '-': Ld = -R_d; break; } setvar_i(res, Ld); break; } case XC( OC_FIELD ): { int i = (int)getvar_i(R.v); if (i == 0) { res = intvar[F0]; } else { split_f0(); if (i > nfields) fsrealloc(i); res = &Fields[i - 1]; } break; } /* concatenation (" ") and index joining (",") */ case XC( OC_CONCAT ): case XC( OC_COMMA ): { const char *sep = ""; if ((opinfo & OPCLSMASK) == OC_COMMA) sep = getvar_s(intvar[SUBSEP]); setvar_p(res, xasprintf("%s%s%s", L.s, sep, R.s)); break; } case XC( OC_LAND ): setvar_i(res, istrue(L.v) ? ptest(op->r.n) : 0); break; case XC( OC_LOR ): setvar_i(res, istrue(L.v) ? 1 : ptest(op->r.n)); break; case XC( OC_BINARY ): case XC( OC_REPLACE ): { double R_d = getvar_i(R.v); debug_printf_eval("BINARY/REPLACE: R_d:%f opn:%c\n", R_d, opn); switch (opn) { case '+': L_d += R_d; break; case '-': L_d -= R_d; break; case '*': L_d *= R_d; break; case '/': if (R_d == 0) syntax_error(EMSG_DIV_BY_ZERO); L_d /= R_d; break; case '&': if (ENABLE_FEATURE_AWK_LIBM) L_d = pow(L_d, R_d); else syntax_error(EMSG_NO_MATH); break; case '%': if (R_d == 0) syntax_error(EMSG_DIV_BY_ZERO); L_d -= (int)(L_d / R_d) * R_d; break; } debug_printf_eval("BINARY/REPLACE result:%f\n", L_d); res = setvar_i(((opinfo & OPCLSMASK) == OC_BINARY) ? res : L.v, L_d); break; } case XC( OC_COMPARE ): { static int i; double Ld; if (is_numeric(L.v) && is_numeric(R.v)) { Ld = getvar_i(L.v) - getvar_i(R.v); } else { const char *l = getvar_s(L.v); const char *r = getvar_s(R.v); Ld = icase ? strcasecmp(l, r) : strcmp(l, r); } switch (opn & 0xfe) { case 0: i = (Ld > 0); break; case 2: i = (Ld >= 0); break; case 4: i = (Ld == 0); break; } setvar_i(res, (i == 0) ^ (opn & 1)); break; } default: syntax_error(EMSG_POSSIBLE_ERROR); } if ((opinfo & OPCLSMASK) <= SHIFT_TIL_THIS) op = op->a.n; if ((opinfo & OPCLSMASK) >= RECUR_FROM_THIS) break; if (nextrec) break; } /* while (op) */ nvfree(v1); debug_printf_eval("returning from %s(): %p\n", __func__, res); return res; #undef fnargs #undef seed #undef sreg } /* -------- main & co. -------- */ static int awk_exit(int r) { var tv; unsigned i; hash_item *hi; zero_out_var(&tv); if (!exiting) { exiting = TRUE; nextrec = FALSE; evaluate(endseq.first, &tv); } /* waiting for children */ for (i = 0; i < fdhash->csize; i++) { hi = fdhash->items[i]; while (hi) { if (hi->data.rs.F && hi->data.rs.is_pipe) pclose(hi->data.rs.F); hi = hi->next; } } exit(r); } /* if expr looks like "var=value", perform assignment and return 1, * otherwise return 0 */ static int is_assignment(const char *expr) { char *exprc, *val; if (!isalnum_(*expr) || (val = strchr(expr, '=')) == NULL) { return FALSE; } exprc = xstrdup(expr); val = exprc + (val - expr); *val++ = '\0'; unescape_string_in_place(val); setvar_u(newvar(exprc), val); free(exprc); return TRUE; } /* switch to next input file */ static rstream *next_input_file(void) { #define rsm (G.next_input_file__rsm) #define files_happen (G.next_input_file__files_happen) FILE *F; const char *fname, *ind; if (rsm.F) fclose(rsm.F); rsm.F = NULL; rsm.pos = rsm.adv = 0; for (;;) { if (getvar_i(intvar[ARGIND])+1 >= getvar_i(intvar[ARGC])) { if (files_happen) return NULL; fname = "-"; F = stdin; break; } ind = getvar_s(incvar(intvar[ARGIND])); fname = getvar_s(findvar(iamarray(intvar[ARGV]), ind)); if (fname && *fname && !is_assignment(fname)) { F = xfopen_stdin(fname); break; } } files_happen = TRUE; setvar_s(intvar[FILENAME], fname); rsm.F = F; return &rsm; #undef rsm #undef files_happen } int awk_main(int argc, char **argv) MAIN_EXTERNALLY_VISIBLE; int awk_main(int argc, char **argv) { unsigned opt; char *opt_F; llist_t *list_v = NULL; llist_t *list_f = NULL; int i, j; var *v; var tv; char **envp; char *vnames = (char *)vNames; /* cheat */ char *vvalues = (char *)vValues; INIT_G(); /* Undo busybox.c, or else strtod may eat ','! This breaks parsing: * $1,$2 == '$1,' '$2', NOT '$1' ',' '$2' */ if (ENABLE_LOCALE_SUPPORT) setlocale(LC_NUMERIC, "C"); zero_out_var(&tv); /* allocate global buffer */ g_buf = xmalloc(MAXVARFMT + 1); vhash = hash_init(); ahash = hash_init(); fdhash = hash_init(); fnhash = hash_init(); /* initialize variables */ for (i = 0; *vnames; i++) { intvar[i] = v = newvar(nextword(&vnames)); if (*vvalues != '\377') setvar_s(v, nextword(&vvalues)); else setvar_i(v, 0); if (*vnames == '*') { v->type |= VF_SPECIAL; vnames++; } } handle_special(intvar[FS]); handle_special(intvar[RS]); newfile("/dev/stdin")->F = stdin; newfile("/dev/stdout")->F = stdout; newfile("/dev/stderr")->F = stderr; /* Huh, people report that sometimes environ is NULL. Oh well. */ if (environ) for (envp = environ; *envp; envp++) { /* environ is writable, thus we don't strdup it needlessly */ char *s = *envp; char *s1 = strchr(s, '='); if (s1) { *s1 = '\0'; /* Both findvar and setvar_u take const char* * as 2nd arg -> environment is not trashed */ setvar_u(findvar(iamarray(intvar[ENVIRON]), s), s1 + 1); *s1 = '='; } } opt_complementary = "v::f::"; /* -v and -f can occur multiple times */ opt = getopt32(argv, "F:v:f:W:", &opt_F, &list_v, &list_f, NULL); argv += optind; argc -= optind; if (opt & 0x1) { /* -F */ unescape_string_in_place(opt_F); setvar_s(intvar[FS], opt_F); } while (list_v) { /* -v */ if (!is_assignment(llist_pop(&list_v))) bb_show_usage(); } if (list_f) { /* -f */ do { char *s = NULL; FILE *from_file; g_progname = llist_pop(&list_f); from_file = xfopen_stdin(g_progname); /* one byte is reserved for some trick in next_token */ for (i = j = 1; j > 0; i += j) { s = xrealloc(s, i + 4096); j = fread(s + i, 1, 4094, from_file); } s[i] = '\0'; fclose(from_file); parse_program(s + 1); free(s); } while (list_f); argc++; } else { // no -f: take program from 1st parameter if (!argc) bb_show_usage(); g_progname = "cmd. line"; parse_program(*argv++); } if (opt & 0x8) // -W bb_error_msg("warning: option -W is ignored"); /* fill in ARGV array */ setvar_i(intvar[ARGC], argc); setari_u(intvar[ARGV], 0, "awk"); i = 0; while (*argv) setari_u(intvar[ARGV], ++i, *argv++); evaluate(beginseq.first, &tv); if (!mainseq.first && !endseq.first) awk_exit(EXIT_SUCCESS); /* input file could already be opened in BEGIN block */ if (!iF) iF = next_input_file(); /* passing through input files */ while (iF) { nextfile = FALSE; setvar_i(intvar[FNR], 0); while ((i = awk_getline(iF, intvar[F0])) > 0) { nextrec = FALSE; incvar(intvar[NR]); incvar(intvar[FNR]); evaluate(mainseq.first, &tv); if (nextfile) break; } if (i < 0) syntax_error(strerror(errno)); iF = next_input_file(); } awk_exit(EXIT_SUCCESS); /*return 0;*/ }
MWisBest/external_busybox
editors/awk.c
C
gpl-2.0
74,466
/* claed8.f -- translated by f2c (version 20061008). You must link the resulting object file with libf2c: on Microsoft Windows system, link with libf2c.lib; on Linux or Unix systems, link with .../path/to/libf2c.a -lm or, if you install libf2c.a in a standard place, with -lf2c -lm -- in that order, at the end of the command line, as in cc *.o -lf2c -lm Source for libf2c is in /netlib/f2c/libf2c.zip, e.g., http://www.netlib.org/f2c/libf2c.zip */ #include "f2c.h" #include "blaswrap.h" /* Table of constant values */ static real c_b3 = -1.f; static integer c__1 = 1; /* Subroutine */ int claed8_(integer *k, integer *n, integer *qsiz, complex * q, integer *ldq, real *d__, real *rho, integer *cutpnt, real *z__, real *dlamda, complex *q2, integer *ldq2, real *w, integer *indxp, integer *indx, integer *indxq, integer *perm, integer *givptr, integer *givcol, real *givnum, integer *info) { /* System generated locals */ integer q_dim1, q_offset, q2_dim1, q2_offset, i__1; real r__1; /* Builtin functions */ double sqrt(doublereal); /* Local variables */ real c__; integer i__, j; real s, t; integer k2, n1, n2, jp, n1p1; real eps, tau, tol; integer jlam, imax, jmax; extern /* Subroutine */ int sscal_(integer *, real *, real *, integer *), ccopy_(integer *, complex *, integer *, complex *, integer *), csrot_(integer *, complex *, integer *, complex *, integer *, real *, real *), scopy_(integer *, real *, integer *, real *, integer *); extern doublereal slapy2_(real *, real *), slamch_(char *); extern /* Subroutine */ int clacpy_(char *, integer *, integer *, complex *, integer *, complex *, integer *), xerbla_(char *, integer *); extern integer isamax_(integer *, real *, integer *); extern /* Subroutine */ int slamrg_(integer *, integer *, real *, integer *, integer *, integer *); /* -- LAPACK routine (version 3.2) -- */ /* Univ. of Tennessee, Univ. of California Berkeley and NAG Ltd.. */ /* November 2006 */ /* .. Scalar Arguments .. */ /* .. */ /* .. Array Arguments .. */ /* .. */ /* Purpose */ /* ======= */ /* CLAED8 merges the two sets of eigenvalues together into a single */ /* sorted set. Then it tries to deflate the size of the problem. */ /* There are two ways in which deflation can occur: when two or more */ /* eigenvalues are close together or if there is a tiny element in the */ /* Z vector. For each such occurrence the order of the related secular */ /* equation problem is reduced by one. */ /* Arguments */ /* ========= */ /* K (output) INTEGER */ /* Contains the number of non-deflated eigenvalues. */ /* This is the order of the related secular equation. */ /* N (input) INTEGER */ /* The dimension of the symmetric tridiagonal matrix. N >= 0. */ /* QSIZ (input) INTEGER */ /* The dimension of the unitary matrix used to reduce */ /* the dense or band matrix to tridiagonal form. */ /* QSIZ >= N if ICOMPQ = 1. */ /* Q (input/output) COMPLEX array, dimension (LDQ,N) */ /* On entry, Q contains the eigenvectors of the partially solved */ /* system which has been previously updated in matrix */ /* multiplies with other partially solved eigensystems. */ /* On exit, Q contains the trailing (N-K) updated eigenvectors */ /* (those which were deflated) in its last N-K columns. */ /* LDQ (input) INTEGER */ /* The leading dimension of the array Q. LDQ >= max( 1, N ). */ /* D (input/output) REAL array, dimension (N) */ /* On entry, D contains the eigenvalues of the two submatrices to */ /* be combined. On exit, D contains the trailing (N-K) updated */ /* eigenvalues (those which were deflated) sorted into increasing */ /* order. */ /* RHO (input/output) REAL */ /* Contains the off diagonal element associated with the rank-1 */ /* cut which originally split the two submatrices which are now */ /* being recombined. RHO is modified during the computation to */ /* the value required by SLAED3. */ /* CUTPNT (input) INTEGER */ /* Contains the location of the last eigenvalue in the leading */ /* sub-matrix. MIN(1,N) <= CUTPNT <= N. */ /* Z (input) REAL array, dimension (N) */ /* On input this vector contains the updating vector (the last */ /* row of the first sub-eigenvector matrix and the first row of */ /* the second sub-eigenvector matrix). The contents of Z are */ /* destroyed during the updating process. */ /* DLAMDA (output) REAL array, dimension (N) */ /* Contains a copy of the first K eigenvalues which will be used */ /* by SLAED3 to form the secular equation. */ /* Q2 (output) COMPLEX array, dimension (LDQ2,N) */ /* If ICOMPQ = 0, Q2 is not referenced. Otherwise, */ /* Contains a copy of the first K eigenvectors which will be used */ /* by SLAED7 in a matrix multiply (SGEMM) to update the new */ /* eigenvectors. */ /* LDQ2 (input) INTEGER */ /* The leading dimension of the array Q2. LDQ2 >= max( 1, N ). */ /* W (output) REAL array, dimension (N) */ /* This will hold the first k values of the final */ /* deflation-altered z-vector and will be passed to SLAED3. */ /* INDXP (workspace) INTEGER array, dimension (N) */ /* This will contain the permutation used to place deflated */ /* values of D at the end of the array. On output INDXP(1:K) */ /* points to the nondeflated D-values and INDXP(K+1:N) */ /* points to the deflated eigenvalues. */ /* INDX (workspace) INTEGER array, dimension (N) */ /* This will contain the permutation used to sort the contents of */ /* D into ascending order. */ /* INDXQ (input) INTEGER array, dimension (N) */ /* This contains the permutation which separately sorts the two */ /* sub-problems in D into ascending order. Note that elements in */ /* the second half of this permutation must first have CUTPNT */ /* added to their values in order to be accurate. */ /* PERM (output) INTEGER array, dimension (N) */ /* Contains the permutations (from deflation and sorting) to be */ /* applied to each eigenblock. */ /* GIVPTR (output) INTEGER */ /* Contains the number of Givens rotations which took place in */ /* this subproblem. */ /* GIVCOL (output) INTEGER array, dimension (2, N) */ /* Each pair of numbers indicates a pair of columns to take place */ /* in a Givens rotation. */ /* GIVNUM (output) REAL array, dimension (2, N) */ /* Each number indicates the S value to be used in the */ /* corresponding Givens rotation. */ /* INFO (output) INTEGER */ /* = 0: successful exit. */ /* < 0: if INFO = -i, the i-th argument had an illegal value. */ /* ===================================================================== */ /* .. Parameters .. */ /* .. */ /* .. Local Scalars .. */ /* .. */ /* .. External Functions .. */ /* .. */ /* .. External Subroutines .. */ /* .. */ /* .. Intrinsic Functions .. */ /* .. */ /* .. Executable Statements .. */ /* Test the input parameters. */ /* Parameter adjustments */ q_dim1 = *ldq; q_offset = 1 + q_dim1; q -= q_offset; --d__; --z__; --dlamda; q2_dim1 = *ldq2; q2_offset = 1 + q2_dim1; q2 -= q2_offset; --w; --indxp; --indx; --indxq; --perm; givcol -= 3; givnum -= 3; /* Function Body */ *info = 0; if (*n < 0) { *info = -2; } else if (*qsiz < *n) { *info = -3; } else if (*ldq < max(1,*n)) { *info = -5; } else if (*cutpnt < min(1,*n) || *cutpnt > *n) { *info = -8; } else if (*ldq2 < max(1,*n)) { *info = -12; } if (*info != 0) { i__1 = -(*info); xerbla_("CLAED8", &i__1); return 0; } /* Quick return if possible */ if (*n == 0) { return 0; } n1 = *cutpnt; n2 = *n - n1; n1p1 = n1 + 1; if (*rho < 0.f) { sscal_(&n2, &c_b3, &z__[n1p1], &c__1); } /* Normalize z so that norm(z) = 1 */ t = 1.f / sqrt(2.f); i__1 = *n; for (j = 1; j <= i__1; ++j) { indx[j] = j; /* L10: */ } sscal_(n, &t, &z__[1], &c__1); *rho = (r__1 = *rho * 2.f, dabs(r__1)); /* Sort the eigenvalues into increasing order */ i__1 = *n; for (i__ = *cutpnt + 1; i__ <= i__1; ++i__) { indxq[i__] += *cutpnt; /* L20: */ } i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { dlamda[i__] = d__[indxq[i__]]; w[i__] = z__[indxq[i__]]; /* L30: */ } i__ = 1; j = *cutpnt + 1; slamrg_(&n1, &n2, &dlamda[1], &c__1, &c__1, &indx[1]); i__1 = *n; for (i__ = 1; i__ <= i__1; ++i__) { d__[i__] = dlamda[indx[i__]]; z__[i__] = w[indx[i__]]; /* L40: */ } /* Calculate the allowable deflation tolerance */ imax = isamax_(n, &z__[1], &c__1); jmax = isamax_(n, &d__[1], &c__1); eps = slamch_("Epsilon"); tol = eps * 8.f * (r__1 = d__[jmax], dabs(r__1)); /* If the rank-1 modifier is small enough, no more needs to be done */ /* -- except to reorganize Q so that its columns correspond with the */ /* elements in D. */ if (*rho * (r__1 = z__[imax], dabs(r__1)) <= tol) { *k = 0; i__1 = *n; for (j = 1; j <= i__1; ++j) { perm[j] = indxq[indx[j]]; ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1] , &c__1); /* L50: */ } clacpy_("A", qsiz, n, &q2[q2_dim1 + 1], ldq2, &q[q_dim1 + 1], ldq); return 0; } /* If there are multiple eigenvalues then the problem deflates. Here */ /* the number of equal eigenvalues are found. As each equal */ /* eigenvalue is found, an elementary reflector is computed to rotate */ /* the corresponding eigensubspace so that the corresponding */ /* components of Z are zero in this new basis. */ *k = 0; *givptr = 0; k2 = *n + 1; i__1 = *n; for (j = 1; j <= i__1; ++j) { if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; if (j == *n) { goto L100; } } else { jlam = j; goto L70; } /* L60: */ } L70: ++j; if (j > *n) { goto L90; } if (*rho * (r__1 = z__[j], dabs(r__1)) <= tol) { /* Deflate due to small z component. */ --k2; indxp[k2] = j; } else { /* Check if eigenvalues are close enough to allow deflation. */ s = z__[jlam]; c__ = z__[j]; /* Find sqrt(a**2+b**2) without overflow or */ /* destructive underflow. */ tau = slapy2_(&c__, &s); t = d__[j] - d__[jlam]; c__ /= tau; s = -s / tau; if ((r__1 = t * c__ * s, dabs(r__1)) <= tol) { /* Deflation is possible. */ z__[j] = tau; z__[jlam] = 0.f; /* Record the appropriate Givens rotation */ ++(*givptr); givcol[(*givptr << 1) + 1] = indxq[indx[jlam]]; givcol[(*givptr << 1) + 2] = indxq[indx[j]]; givnum[(*givptr << 1) + 1] = c__; givnum[(*givptr << 1) + 2] = s; csrot_(qsiz, &q[indxq[indx[jlam]] * q_dim1 + 1], &c__1, &q[indxq[ indx[j]] * q_dim1 + 1], &c__1, &c__, &s); t = d__[jlam] * c__ * c__ + d__[j] * s * s; d__[j] = d__[jlam] * s * s + d__[j] * c__ * c__; d__[jlam] = t; --k2; i__ = 1; L80: if (k2 + i__ <= *n) { if (d__[jlam] < d__[indxp[k2 + i__]]) { indxp[k2 + i__ - 1] = indxp[k2 + i__]; indxp[k2 + i__] = jlam; ++i__; goto L80; } else { indxp[k2 + i__ - 1] = jlam; } } else { indxp[k2 + i__ - 1] = jlam; } jlam = j; } else { ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; jlam = j; } } goto L70; L90: /* Record the last eigenvalue. */ ++(*k); w[*k] = z__[jlam]; dlamda[*k] = d__[jlam]; indxp[*k] = jlam; L100: /* Sort the eigenvalues and corresponding eigenvectors into DLAMDA */ /* and Q2 respectively. The eigenvalues/vectors which were not */ /* deflated go into the first K slots of DLAMDA and Q2 respectively, */ /* while those which were deflated go into the last N - K slots. */ i__1 = *n; for (j = 1; j <= i__1; ++j) { jp = indxp[j]; dlamda[j] = d__[jp]; perm[j] = indxq[indx[jp]]; ccopy_(qsiz, &q[perm[j] * q_dim1 + 1], &c__1, &q2[j * q2_dim1 + 1], & c__1); /* L110: */ } /* The deflated eigenvalues and their corresponding vectors go back */ /* into the last N - K slots of D and Q respectively. */ if (*k < *n) { i__1 = *n - *k; scopy_(&i__1, &dlamda[*k + 1], &c__1, &d__[*k + 1], &c__1); i__1 = *n - *k; clacpy_("A", qsiz, &i__1, &q2[(*k + 1) * q2_dim1 + 1], ldq2, &q[(*k + 1) * q_dim1 + 1], ldq); } return 0; /* End of CLAED8 */ } /* claed8_ */
CreativeCimmons/ORB-SLAM-Android-app
slam_ext/external/clapack-3.2.1-CMAKE/SRC/claed8.c
C
gpl-2.0
13,065
/* * Sun RPC is a product of Sun Microsystems, Inc. and is provided for * unrestricted use provided that this legend is included on all tape * media and as a part of the software program in whole or part. Users * may copy or modify Sun RPC without charge, but are not authorized * to license or distribute it to anyone else except as part of a product or * program developed by the user. * * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE. * * Sun RPC is provided with no support and without any obligation on the * part of Sun Microsystems, Inc. to assist in its use, correction, * modification or enhancement. * * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC * OR ANY PART THEREOF. * * In no event will Sun Microsystems, Inc. be liable for any lost revenue * or profits or other special, indirect and consequential damages, even if * Sun has been advised of the possibility of such damages. * * Sun Microsystems, Inc. * 2550 Garcia Avenue * Mountain View, California 94043 */ #if defined(LIBC_SCCS) && !defined(lint) /*static char *sccsid = "from: @(#)xdr_rec.c 1.21 87/08/11 Copyr 1984 Sun Micro";*/ /*static char *sccsid = "from: @(#)xdr_rec.c 2.2 88/08/01 4.0 RPCSRC";*/ static char *rcsid = "$FreeBSD: src/lib/libc/xdr/xdr_rec.c,v 1.12 2000/01/19 06:12:32 wpaul Exp $"; #endif /* * xdr_rec.c, Implements TCP/IP based XDR streams with a "record marking" * layer above tcp (for rpc's use). * * Copyright (C) 1984, Sun Microsystems, Inc. * * These routines interface XDRSTREAMS to a tcp/ip connection. * There is a record marking layer between the xdr stream * and the tcp transport level. A record is composed on one or more * record fragments. A record fragment is a thirty-two bit header followed * by n bytes of data, where n is contained in the header. The header * is represented as a htonl(u_long). Thegh order bit encodes * whether or not the fragment is the last fragment of the record * (1 => fragment is last, 0 => more fragments to follow. * The other 31 bits encode the byte length of the fragment. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <rpc/types.h> #include <rpc/xdr.h> #include <netinet/in.h> #include <unistd.h> /* for lseek() */ typedef struct rec_strm RECSTREAM; static u_int fix_buf_size(u_int); static bool_t flush_out(RECSTREAM *rstrm, bool_t eor); static bool_t get_input_bytes(RECSTREAM *rstrm, caddr_t addr, int len); static bool_t set_input_fragment(RECSTREAM *rstrm); static bool_t skip_input_bytes(RECSTREAM *rstrm, long cnt); static bool_t xdrrec_getlong(XDR *xdrs, long *lp); static bool_t xdrrec_putlong(XDR *xdrs, const long *lp); static bool_t xdrrec_getbytes(XDR *xdrs, caddr_t addr, u_int len); static bool_t xdrrec_putbytes(XDR *xdrs, const char *addr, u_int len); static u_int xdrrec_getpos(XDR *xdrs); static bool_t xdrrec_setpos(XDR *xdrs, u_int pos); static int32_t *xdrrec_inline(XDR *xdrs, u_int len); static void xdrrec_destroy(XDR *xdrs); static struct xdr_ops xdrrec_ops = { xdrrec_getlong, xdrrec_putlong, xdrrec_getbytes, xdrrec_putbytes, xdrrec_getpos, xdrrec_setpos, xdrrec_inline, xdrrec_destroy }; /* * A record is composed of one or more record fragments. * A record fragment is a two-byte header followed by zero to * 2**32-1 bytes. The header is treated as a long unsigned and is * encode/decoded to the network via htonl/ntohl. The low order 31 bits * are a byte count of the fragment. The highest order bit is a boolean: * 1 => this fragment is the last fragment of the record, * 0 => this fragment is followed by more fragment(s). * * The fragment/record machinery is not general; it is constructed to * meet the needs of xdr and rpc based on tcp. */ #define LAST_FRAG ((u_int32_t)(1L << 31)) struct rec_strm { caddr_t tcp_handle; caddr_t the_buffer; /* * out-goung bits */ int (*writeit) (caddr_t, caddr_t, int); caddr_t out_base; /* output buffer (points to frag header) */ caddr_t out_finger; /* next output position */ caddr_t out_boundry; /* data cannot up to this address */ u_int32_t *frag_header; /* beginning of current fragment */ bool_t frag_sent; /* true if buffer sent in middle of record */ /* * in-coming bits */ int (*readit) (caddr_t, caddr_t, int); u_long in_size; /* fixed size of the input buffer */ caddr_t in_base; caddr_t in_finger; /* location of next byte to be had */ caddr_t in_boundry; /* can read up to this location */ long fbtbc; /* fragment bytes to be consumed */ bool_t last_frag; u_int sendsize; u_int recvsize; }; /* * Create an xdr handle for xdrrec * xdrrec_create fills in xdrs. Sendsize and recvsize are * send and recv buffer sizes (0 => use default). * tcp_handle is an opaque handle that is passed as the first parameter to * the procedures readit and writeit. Readit and writeit are read and * write respectively. They are like the system * calls expect that they take an opaque handle rather than an fd. */ void xdrrec_create( XDR *xdrs, u_int sendsize, u_int recvsize, caddr_t tcp_handle, int (*readit)(char*, char*, int), /* like read, but pass it a tcp_handle, not sock */ int (*writeit)(char*, char*, int) /* like write, but pass it a tcp_handle, not sock */ ) { RECSTREAM *rstrm = (RECSTREAM *)mem_alloc(sizeof(RECSTREAM)); if (rstrm == NULL) { (void)fprintf(stderr, "xdrrec_create: out of memory\n"); /* * This is bad. Should rework xdrrec_create to * return a handle, and in this case return NULL */ return; } /* * adjust sizes and allocate buffer quad byte aligned */ rstrm->sendsize = sendsize = fix_buf_size(sendsize); rstrm->recvsize = recvsize = fix_buf_size(recvsize); rstrm->the_buffer = mem_alloc(sendsize + recvsize + BYTES_PER_XDR_UNIT); if (rstrm->the_buffer == NULL) { (void)fprintf(stderr, "xdrrec_create: out of memory\n"); return; } for (rstrm->out_base = rstrm->the_buffer; (uintptr_t)rstrm->out_base % BYTES_PER_XDR_UNIT != 0; rstrm->out_base++); rstrm->in_base = rstrm->out_base + sendsize; /* * now the rest ... */ xdrs->x_ops = &xdrrec_ops; xdrs->x_private = (caddr_t)rstrm; rstrm->tcp_handle = tcp_handle; rstrm->readit = readit; rstrm->writeit = writeit; rstrm->out_finger = rstrm->out_boundry = rstrm->out_base; rstrm->frag_header = (u_int32_t *)rstrm->out_base; rstrm->out_finger += sizeof(u_int32_t); rstrm->out_boundry += sendsize; rstrm->frag_sent = FALSE; rstrm->in_size = recvsize; rstrm->in_boundry = rstrm->in_base; rstrm->in_finger = (rstrm->in_boundry += recvsize); rstrm->fbtbc = 0; rstrm->last_frag = TRUE; } /* * The reoutines defined below are the xdr ops which will go into the * xdr handle filled in by xdrrec_create. */ static bool_t xdrrec_getlong( XDR *xdrs, long *lp) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); int32_t *buflp = (int32_t *)(rstrm->in_finger); int32_t mylong; /* first try the inline, fast case */ if ((rstrm->fbtbc >= sizeof(int32_t)) && (((intptr_t)rstrm->in_boundry - (intptr_t)buflp) >= sizeof(int32_t))) { *lp = (long)ntohl((u_int32_t)(*buflp)); rstrm->fbtbc -= sizeof(int32_t); rstrm->in_finger += sizeof(int32_t); } else { if (! xdrrec_getbytes(xdrs, (caddr_t)&mylong, sizeof(int32_t))) return (FALSE); *lp = (long)ntohl((u_int32_t)mylong); } return (TRUE); } static bool_t xdrrec_putlong( XDR *xdrs, const long *lp) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); int32_t *dest_lp = ((int32_t *)(rstrm->out_finger)); if ((rstrm->out_finger += sizeof(int32_t)) > rstrm->out_boundry) { /* * this case should almost never happen so the code is * inefficient */ rstrm->out_finger -= sizeof(int32_t); rstrm->frag_sent = TRUE; if (! flush_out(rstrm, FALSE)) return (FALSE); dest_lp = ((int32_t *)(rstrm->out_finger)); rstrm->out_finger += sizeof(int32_t); } *dest_lp = (int32_t)htonl((u_int32_t)(*lp)); return (TRUE); } static bool_t /* must manage buffers, fragments, and records */ xdrrec_getbytes( XDR *xdrs, caddr_t addr, u_int len) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); int current; while (len > 0) { current = rstrm->fbtbc; if (current == 0) { if (rstrm->last_frag) return (FALSE); if (! set_input_fragment(rstrm)) return (FALSE); continue; } current = (len < current) ? len : current; if (! get_input_bytes(rstrm, addr, current)) return (FALSE); addr += current; rstrm->fbtbc -= current; len -= current; } return (TRUE); } static bool_t xdrrec_putbytes( XDR *xdrs, const char *addr, u_int len) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); long current; while (len > 0) { current = (intptr_t)rstrm->out_boundry - (intptr_t)rstrm->out_finger; current = (len < current) ? len : current; memcpy(rstrm->out_finger, addr, current); rstrm->out_finger += current; addr += current; len -= current; if (rstrm->out_finger == rstrm->out_boundry) { rstrm->frag_sent = TRUE; if (! flush_out(rstrm, FALSE)) return (FALSE); } } return (TRUE); } static u_int xdrrec_getpos( XDR *xdrs) { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; long pos; pos = lseek((intptr_t)rstrm->tcp_handle, (off_t) 0, 1); if (pos != -1) switch (xdrs->x_op) { case XDR_ENCODE: pos += rstrm->out_finger - rstrm->out_base; break; case XDR_DECODE: pos -= rstrm->in_boundry - rstrm->in_finger; break; default: pos = -1; break; } return ((u_int) pos); } static bool_t xdrrec_setpos( XDR *xdrs, u_int pos) { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; u_int currpos = xdrrec_getpos(xdrs); int delta = currpos - pos; caddr_t newpos; if ((int)currpos != -1) switch (xdrs->x_op) { case XDR_ENCODE: newpos = rstrm->out_finger - delta; if ((newpos > (caddr_t)(rstrm->frag_header)) && (newpos < rstrm->out_boundry)) { rstrm->out_finger = newpos; return (TRUE); } break; case XDR_DECODE: newpos = rstrm->in_finger - delta; if ((delta < (int)(rstrm->fbtbc)) && (newpos <= rstrm->in_boundry) && (newpos >= rstrm->in_base)) { rstrm->in_finger = newpos; rstrm->fbtbc -= delta; return (TRUE); } break; case XDR_FREE: /* to avoid warning */ break; } return (FALSE); } static int32_t * xdrrec_inline( XDR *xdrs, u_int len) { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; int32_t * buf = NULL; switch (xdrs->x_op) { case XDR_ENCODE: if ((rstrm->out_finger + len) <= rstrm->out_boundry) { buf = (int32_t *) rstrm->out_finger; rstrm->out_finger += len; } break; case XDR_DECODE: if ((len <= rstrm->fbtbc) && ((rstrm->in_finger + len) <= rstrm->in_boundry)) { buf = (int32_t *) rstrm->in_finger; rstrm->fbtbc -= len; rstrm->in_finger += len; } break; case XDR_FREE: /* to avoid warning */ break; } return (buf); } static void xdrrec_destroy( XDR *xdrs) { RECSTREAM *rstrm = (RECSTREAM *)xdrs->x_private; mem_free(rstrm->the_buffer, rstrm->sendsize + rstrm->recvsize + BYTES_PER_XDR_UNIT); mem_free((caddr_t)rstrm, sizeof(RECSTREAM)); } /* * Exported routines to manage xdr records */ /* * Before reading (deserializing from the stream, one should always call * this procedure to guarantee proper record alignment. */ bool_t xdrrec_skiprecord( XDR *xdrs) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); while (rstrm->fbtbc > 0 || (! rstrm->last_frag)) { if (! skip_input_bytes(rstrm, rstrm->fbtbc)) return (FALSE); rstrm->fbtbc = 0; if ((! rstrm->last_frag) && (! set_input_fragment(rstrm))) return (FALSE); } rstrm->last_frag = FALSE; return (TRUE); } /* * Look ahead fuction. * Returns TRUE iff there is no more input in the buffer * after consuming the rest of the current record. */ bool_t xdrrec_eof( XDR *xdrs) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); while (rstrm->fbtbc > 0 || (! rstrm->last_frag)) { if (! skip_input_bytes(rstrm, rstrm->fbtbc)) return (TRUE); rstrm->fbtbc = 0; if ((! rstrm->last_frag) && (! set_input_fragment(rstrm))) return (TRUE); } if (rstrm->in_finger == rstrm->in_boundry) return (TRUE); return (FALSE); } /* * The client must tell the package when an end-of-record has occurred. * The second paraemters tells whether the record should be flushed to the * (output) tcp stream. (This let's the package support batched or * pipelined procedure calls.) TRUE => immmediate flush to tcp connection. */ bool_t xdrrec_endofrecord( XDR *xdrs, bool_t sendnow) { RECSTREAM *rstrm = (RECSTREAM *)(xdrs->x_private); u_long len; /* fragment length */ if (sendnow || rstrm->frag_sent || ((uintptr_t)rstrm->out_finger + sizeof(u_int32_t) >= (uintptr_t)rstrm->out_boundry)) { rstrm->frag_sent = FALSE; return (flush_out(rstrm, TRUE)); } len = (uintptr_t)(rstrm->out_finger) - (uintptr_t)(rstrm->frag_header) - sizeof(u_int32_t); *(rstrm->frag_header) = htonl((u_int32_t)len | LAST_FRAG); rstrm->frag_header = (u_int32_t *)rstrm->out_finger; rstrm->out_finger += sizeof(u_int32_t); return (TRUE); } /* * Internal useful routines */ static bool_t flush_out( RECSTREAM *rstrm, bool_t eor) { u_int32_t eormask = (eor == TRUE) ? LAST_FRAG : 0; u_int32_t len = (uintptr_t)(rstrm->out_finger) - (uintptr_t)(rstrm->frag_header) - sizeof(u_int32_t); *(rstrm->frag_header) = htonl(len | eormask); len = (uintptr_t)(rstrm->out_finger) - (uintptr_t)(rstrm->out_base); if ((*(rstrm->writeit))(rstrm->tcp_handle, rstrm->out_base, (int)len) != (int)len) return (FALSE); rstrm->frag_header = (u_int32_t *)rstrm->out_base; rstrm->out_finger = (caddr_t)rstrm->out_base + sizeof(u_int32_t); return (TRUE); } static bool_t /* knows nothing about records! Only about input buffers */ fill_input_buf( RECSTREAM *rstrm) { caddr_t where; u_long i; long len; where = rstrm->in_base; i = (uintptr_t)rstrm->in_boundry % BYTES_PER_XDR_UNIT; where += i; len = rstrm->in_size - i; if ((len = (*(rstrm->readit))(rstrm->tcp_handle, where, len)) == -1) return (FALSE); rstrm->in_finger = where; where += len; rstrm->in_boundry = where; return (TRUE); } static bool_t /* knows nothing about records! Only about input buffers */ get_input_bytes( RECSTREAM *rstrm, caddr_t addr, int len) { long current; while (len > 0) { current = (intptr_t)rstrm->in_boundry - (intptr_t)rstrm->in_finger; if (current == 0) { if (! fill_input_buf(rstrm)) return (FALSE); continue; } current = (len < current) ? len : current; memcpy(addr, rstrm->in_finger, current); rstrm->in_finger += current; addr += current; len -= current; } return (TRUE); } static bool_t /* next two bytes of the input stream are treated as a header */ set_input_fragment( RECSTREAM *rstrm) { u_int32_t header; if (! get_input_bytes(rstrm, (caddr_t)&header, sizeof(header))) return (FALSE); header = (long)ntohl(header); rstrm->last_frag = ((header & LAST_FRAG) == 0) ? FALSE : TRUE; /* * Sanity check. Try not to accept wildly incorrect * record sizes. Unfortunately, the only record size * we can positively identify as being 'wildly incorrect' * is zero. Ridiculously large record sizes may look wrong, * but we don't have any way to be certain that they aren't * what the client actually intended to send us. */ if (header == 0) return(FALSE); rstrm->fbtbc = header & (~LAST_FRAG); return (TRUE); } static bool_t /* consumes input bytes; knows nothing about records! */ skip_input_bytes( RECSTREAM *rstrm, long cnt) { long current; while (cnt > 0) { current = (intptr_t)rstrm->in_boundry - (intptr_t)rstrm->in_finger; if (current == 0) { if (! fill_input_buf(rstrm)) return (FALSE); continue; } current = (cnt < current) ? cnt : current; rstrm->in_finger += current; cnt -= current; } return (TRUE); } static u_int fix_buf_size( u_int s) { if (s < 100) s = 4000; return (RNDUP(s)); }
atgreen/RTEMS
cpukit/librpc/src/xdr/xdr_rec.c
C
gpl-2.0
16,201
/* * Copyright (C) 2005 Ole André Vadla Ravnås <oleavr@gmail.com> * Copyright (C) 2008 Ramiro Polla * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdlib.h> #include <string.h> #include <stdint.h> #include "avcodec.h" #include "internal.h" #include "get_bits.h" #include "bytestream.h" #include "dsputil.h" #include "thread.h" #define MIMIC_HEADER_SIZE 20 typedef struct { AVCodecContext *avctx; int num_vblocks[3]; int num_hblocks[3]; void *swap_buf; int swap_buf_size; int cur_index; int prev_index; AVFrame buf_ptrs [16]; AVPicture flipped_ptrs[16]; DECLARE_ALIGNED(16, DCTELEM, dct_block)[64]; GetBitContext gb; ScanTable scantable; DSPContext dsp; VLC vlc; /* Kept in the context so multithreading can have a constant to read from */ int next_cur_index; int next_prev_index; } MimicContext; static const uint32_t huffcodes[] = { 0x0000000a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000b, 0x0000001b, 0x00000038, 0x00000078, 0x00000079, 0x0000007a, 0x000000f9, 0x000000fa, 0x000003fb, 0x000007f8, 0x000007f9, 0x000007fa, 0x000007fb, 0x00000ff8, 0x00000ff9, 0x00000001, 0x00000039, 0x0000007b, 0x000000fb, 0x000001f8, 0x000001f9, 0x00000ffa, 0x00000ffb, 0x00001ff8, 0x00001ff9, 0x00001ffa, 0x00001ffb, 0x00003ff8, 0x00003ff9, 0x00003ffa, 0x00000000, 0x00000004, 0x0000003a, 0x000001fa, 0x00003ffb, 0x00007ff8, 0x00007ff9, 0x00007ffa, 0x00007ffb, 0x0000fff8, 0x0000fff9, 0x0000fffa, 0x0000fffb, 0x0001fff8, 0x0001fff9, 0x0001fffa, 0x00000000, 0x0000000c, 0x000000f8, 0x000001fb, 0x0001fffb, 0x0003fff8, 0x0003fff9, 0x0003fffa, 0x0003fffb, 0x0007fff8, 0x0007fff9, 0x0007fffa, 0x0007fffb, 0x000ffff8, 0x000ffff9, 0x000ffffa, 0x00000000, 0x0000001a, 0x000003f8, 0x000ffffb, 0x001ffff8, 0x001ffff9, 0x001ffffa, 0x001ffffb, 0x003ffff8, 0x003ffff9, 0x003ffffa, 0x003ffffb, 0x007ffff8, 0x007ffff9, 0x007ffffa, 0x007ffffb, 0x00000000, 0x0000003b, 0x000003f9, 0x00fffff8, 0x00fffff9, 0x00fffffa, 0x00fffffb, 0x01fffff8, 0x01fffff9, 0x01fffffa, 0x01fffffb, 0x03fffff8, 0x03fffff9, 0x03fffffa, 0x03fffffb, 0x07fffff8, 0x00000000, 0x000003fa, 0x07fffff9, 0x07fffffa, 0x07fffffb, 0x0ffffff8, 0x0ffffff9, 0x0ffffffa, 0x0ffffffb, 0x1ffffff8, 0x1ffffff9, 0x1ffffffa, 0x1ffffffb, 0x3ffffff8, 0x3ffffff9, 0x3ffffffa, }; static const uint8_t huffbits[] = { 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 4, 5, 6, 7, 7, 7, 8, 8, 10, 11, 11, 11, 11, 12, 12, 2, 6, 7, 8, 9, 9, 12, 12, 13, 13, 13, 13, 14, 14, 14, 0, 3, 6, 9, 14, 15, 15, 15, 15, 16, 16, 16, 16, 17, 17, 17, 0, 4, 8, 9, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 0, 5, 10, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 0, 6, 10, 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 0, 10, 27, 27, 27, 28, 28, 28, 28, 29, 29, 29, 29, 30, 30, 30, }; static const uint8_t col_zag[64] = { 0, 8, 1, 2, 9, 16, 24, 17, 10, 3, 4, 11, 18, 25, 32, 40, 33, 26, 19, 12, 5, 6, 13, 20, 27, 34, 41, 48, 56, 49, 42, 35, 28, 21, 14, 7, 15, 22, 29, 36, 43, 50, 57, 58, 51, 44, 37, 30, 23, 31, 38, 45, 52, 59, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63, }; static av_cold int mimic_decode_init(AVCodecContext *avctx) { MimicContext *ctx = avctx->priv_data; ctx->prev_index = 0; ctx->cur_index = 15; if(init_vlc(&ctx->vlc, 11, FF_ARRAY_ELEMS(huffbits), huffbits, 1, 1, huffcodes, 4, 4, 0)) { av_log(avctx, AV_LOG_ERROR, "error initializing vlc table\n"); return -1; } dsputil_init(&ctx->dsp, avctx); ff_init_scantable(ctx->dsp.idct_permutation, &ctx->scantable, col_zag); return 0; } static int mimic_decode_update_thread_context(AVCodecContext *avctx, const AVCodecContext *avctx_from) { MimicContext *dst = avctx->priv_data, *src = avctx_from->priv_data; if (avctx == avctx_from) return 0; dst->cur_index = src->next_cur_index; dst->prev_index = src->next_prev_index; memcpy(dst->buf_ptrs, src->buf_ptrs, sizeof(src->buf_ptrs)); memcpy(dst->flipped_ptrs, src->flipped_ptrs, sizeof(src->flipped_ptrs)); memset(&dst->buf_ptrs[dst->cur_index], 0, sizeof(AVFrame)); return 0; } static const int8_t vlcdec_lookup[9][64] = { { 0, }, { -1, 1, }, { -3, 3, -2, 2, }, { -7, 7, -6, 6, -5, 5, -4, 4, }, { -15, 15, -14, 14, -13, 13, -12, 12, -11, 11, -10, 10, -9, 9, -8, 8, }, { -31, 31, -30, 30, -29, 29, -28, 28, -27, 27, -26, 26, -25, 25, -24, 24, -23, 23, -22, 22, -21, 21, -20, 20, -19, 19, -18, 18, -17, 17, -16, 16, }, { -63, 63, -62, 62, -61, 61, -60, 60, -59, 59, -58, 58, -57, 57, -56, 56, -55, 55, -54, 54, -53, 53, -52, 52, -51, 51, -50, 50, -49, 49, -48, 48, -47, 47, -46, 46, -45, 45, -44, 44, -43, 43, -42, 42, -41, 41, -40, 40, -39, 39, -38, 38, -37, 37, -36, 36, -35, 35, -34, 34, -33, 33, -32, 32, }, { -127, 127, -126, 126, -125, 125, -124, 124, -123, 123, -122, 122, -121, 121, -120, 120, -119, 119, -118, 118, -117, 117, -116, 116, -115, 115, -114, 114, -113, 113, -112, 112, -111, 111, -110, 110, -109, 109, -108, 108, -107, 107, -106, 106, -105, 105, -104, 104, -103, 103, -102, 102, -101, 101, -100, 100, -99, 99, -98, 98, -97, 97, -96, 96, }, { -95, 95, -94, 94, -93, 93, -92, 92, -91, 91, -90, 90, -89, 89, -88, 88, -87, 87, -86, 86, -85, 85, -84, 84, -83, 83, -82, 82, -81, 81, -80, 80, -79, 79, -78, 78, -77, 77, -76, 76, -75, 75, -74, 74, -73, 73, -72, 72, -71, 71, -70, 70, -69, 69, -68, 68, -67, 67, -66, 66, -65, 65, -64, 64, }, }; static int vlc_decode_block(MimicContext *ctx, int num_coeffs, int qscale) { DCTELEM *block = ctx->dct_block; unsigned int pos; ctx->dsp.clear_block(block); block[0] = get_bits(&ctx->gb, 8) << 3; for(pos = 1; pos < num_coeffs; pos++) { uint32_t vlc, num_bits; int value; int coeff; vlc = get_vlc2(&ctx->gb, ctx->vlc.table, ctx->vlc.bits, 3); if(!vlc) /* end-of-block code */ return 1; if(vlc == -1) return 0; /* pos_add and num_bits are coded in the vlc code */ pos += vlc&15; // pos_add num_bits = vlc>>4; // num_bits if(pos >= 64) return 0; value = get_bits(&ctx->gb, num_bits); /* FFmpeg's IDCT behaves somewhat different from the original code, so * a factor of 4 was added to the input */ coeff = vlcdec_lookup[num_bits][value]; if(pos<3) coeff <<= 4; else /* TODO Use >> 10 instead of / 1001 */ coeff = (coeff * qscale) / 1001; block[ctx->scantable.permutated[pos]] = coeff; } return 1; } static int decode(MimicContext *ctx, int quality, int num_coeffs, int is_iframe) { int y, x, plane, cur_row = 0; for(plane = 0; plane < 3; plane++) { const int is_chroma = !!plane; const int qscale = av_clip(10000-quality,is_chroma?1000:2000,10000)<<2; const int stride = ctx->flipped_ptrs[ctx->cur_index].linesize[plane]; const uint8_t *src = ctx->flipped_ptrs[ctx->prev_index].data[plane]; uint8_t *dst = ctx->flipped_ptrs[ctx->cur_index ].data[plane]; for(y = 0; y < ctx->num_vblocks[plane]; y++) { for(x = 0; x < ctx->num_hblocks[plane]; x++) { /* Check for a change condition in the current block. * - iframes always change. * - Luma plane changes on get_bits1 == 0 * - Chroma planes change on get_bits1 == 1 */ if(is_iframe || get_bits1(&ctx->gb) == is_chroma) { /* Luma planes may use a backreference from the 15 last * frames preceding the previous. (get_bits1 == 1) * Chroma planes don't use backreferences. */ if(is_chroma || is_iframe || !get_bits1(&ctx->gb)) { if(!vlc_decode_block(ctx, num_coeffs, qscale)) return 0; ctx->dsp.idct_put(dst, stride, ctx->dct_block); } else { unsigned int backref = get_bits(&ctx->gb, 4); int index = (ctx->cur_index+backref)&15; uint8_t *p = ctx->flipped_ptrs[index].data[0]; ff_thread_await_progress(&ctx->buf_ptrs[index], cur_row, 0); if(p) { p += src - ctx->flipped_ptrs[ctx->prev_index].data[plane]; ctx->dsp.put_pixels_tab[1][0](dst, p, stride, 8); } else { av_log(ctx->avctx, AV_LOG_ERROR, "No such backreference! Buggy sample.\n"); } } } else { ff_thread_await_progress(&ctx->buf_ptrs[ctx->prev_index], cur_row, 0); ctx->dsp.put_pixels_tab[1][0](dst, src, stride, 8); } src += 8; dst += 8; } src += (stride - ctx->num_hblocks[plane])<<3; dst += (stride - ctx->num_hblocks[plane])<<3; ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], cur_row++, 0); } } return 1; } /** * Flip the buffer upside-down and put it in the YVU order to match the * way Mimic encodes frames. */ static void prepare_avpic(MimicContext *ctx, AVPicture *dst, AVPicture *src) { int i; dst->data[0] = src->data[0]+( ctx->avctx->height -1)*src->linesize[0]; dst->data[1] = src->data[2]+((ctx->avctx->height>>1)-1)*src->linesize[2]; dst->data[2] = src->data[1]+((ctx->avctx->height>>1)-1)*src->linesize[1]; for(i = 0; i < 3; i++) dst->linesize[i] = -src->linesize[i]; } static int mimic_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt) { const uint8_t *buf = avpkt->data; int buf_size = avpkt->size; MimicContext *ctx = avctx->priv_data; int is_pframe; int width, height; int quality, num_coeffs; int swap_buf_size = buf_size - MIMIC_HEADER_SIZE; if(buf_size < MIMIC_HEADER_SIZE) { av_log(avctx, AV_LOG_ERROR, "insufficient data\n"); return -1; } buf += 2; /* some constant (always 256) */ quality = bytestream_get_le16(&buf); width = bytestream_get_le16(&buf); height = bytestream_get_le16(&buf); buf += 4; /* some constant */ is_pframe = bytestream_get_le32(&buf); num_coeffs = bytestream_get_byte(&buf); buf += 3; /* some constant */ if(!ctx->avctx) { int i; if(!(width == 160 && height == 120) && !(width == 320 && height == 240)) { av_log(avctx, AV_LOG_ERROR, "invalid width/height!\n"); return -1; } ctx->avctx = avctx; avctx->width = width; avctx->height = height; avctx->pix_fmt = PIX_FMT_YUV420P; for(i = 0; i < 3; i++) { ctx->num_vblocks[i] = -((-height) >> (3 + !!i)); ctx->num_hblocks[i] = width >> (3 + !!i) ; } } else if(width != ctx->avctx->width || height != ctx->avctx->height) { av_log(avctx, AV_LOG_ERROR, "resolution changing is not supported\n"); return -1; } if(is_pframe && !ctx->buf_ptrs[ctx->prev_index].data[0]) { av_log(avctx, AV_LOG_ERROR, "decoding must start with keyframe\n"); return -1; } ctx->buf_ptrs[ctx->cur_index].reference = 3; ctx->buf_ptrs[ctx->cur_index].pict_type = is_pframe ? AV_PICTURE_TYPE_P:AV_PICTURE_TYPE_I; if(ff_thread_get_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index])) { av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); return -1; } ctx->next_prev_index = ctx->cur_index; ctx->next_cur_index = (ctx->cur_index - 1) & 15; prepare_avpic(ctx, &ctx->flipped_ptrs[ctx->cur_index], (AVPicture*) &ctx->buf_ptrs[ctx->cur_index]); ff_thread_finish_setup(avctx); av_fast_padded_malloc(&ctx->swap_buf, &ctx->swap_buf_size, swap_buf_size); if(!ctx->swap_buf) return AVERROR(ENOMEM); ctx->dsp.bswap_buf(ctx->swap_buf, (const uint32_t*) buf, swap_buf_size>>2); init_get_bits(&ctx->gb, ctx->swap_buf, swap_buf_size << 3); if(!decode(ctx, quality, num_coeffs, !is_pframe)) { if (avctx->active_thread_type&FF_THREAD_FRAME) ff_thread_report_progress(&ctx->buf_ptrs[ctx->cur_index], INT_MAX, 0); else { ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]); return -1; } } *(AVFrame*)data = ctx->buf_ptrs[ctx->cur_index]; *data_size = sizeof(AVFrame); ctx->prev_index = ctx->next_prev_index; ctx->cur_index = ctx->next_cur_index; /* Only release frames that aren't used for backreferences anymore */ if(ctx->buf_ptrs[ctx->cur_index].data[0]) ff_thread_release_buffer(avctx, &ctx->buf_ptrs[ctx->cur_index]); return buf_size; } static av_cold int mimic_decode_end(AVCodecContext *avctx) { MimicContext *ctx = avctx->priv_data; int i; av_free(ctx->swap_buf); if (avctx->internal->is_copy) return 0; for(i = 0; i < 16; i++) if(ctx->buf_ptrs[i].data[0]) ff_thread_release_buffer(avctx, &ctx->buf_ptrs[i]); free_vlc(&ctx->vlc); return 0; } AVCodec ff_mimic_decoder = { .name = "mimic", .type = AVMEDIA_TYPE_VIDEO, .id = CODEC_ID_MIMIC, .priv_data_size = sizeof(MimicContext), .init = mimic_decode_init, .close = mimic_decode_end, .decode = mimic_decode_frame, .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS, .long_name = NULL_IF_CONFIG_SMALL("Mimic"), .update_thread_context = ONLY_IF_THREADS_ENABLED(mimic_decode_update_thread_context) };
stayupthetree/pewp
lib/ffmpeg/libavcodec/mimic.c
C
gpl-2.0
15,587
/* * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ /* * @test * @bug 8031321 * @library /testlibrary /testlibrary/whitebox /compiler/whitebox .. * @build BlsiTestI * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main/othervm -Xbootclasspath/a:. -Xbatch -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI * -XX:+IgnoreUnrecognizedVMOptions -XX:+UseBMI1Instructions BlsiTestI */ import java.lang.reflect.Method; public class BlsiTestI extends BmiIntrinsicBase.BmiTestCase { protected BlsiTestI(Method method) { super(method); //from intel manual VEX.NDD.LZ.0F38.W0 F3 /3 instrMask = new byte[]{ (byte) 0xFF, (byte) 0x1F, (byte) 0x00, (byte) 0xFF, (byte) 0b0011_1000}; instrPattern = new byte[]{ (byte) 0xC4, // prefix for 3-byte VEX instruction (byte) 0x02, // 00010 implied 0F 38 leading opcode bytes (byte) 0x00, (byte) 0xF3, (byte) 0b0001_1000}; // bits 543 == 011 (3) } public static void main(String[] args) throws Exception { BmiIntrinsicBase.verifyTestCase(BlsiTestI::new, TestBlsiI.BlsiIExpr.class.getDeclaredMethods()); BmiIntrinsicBase.verifyTestCase(BlsiTestI::new, TestBlsiI.BlsiICommutativeExpr.class.getDeclaredMethods()); } }
infobip/infobip-open-jdk-8
hotspot/test/compiler/intrinsics/bmi/verifycode/BlsiTestI.java
Java
gpl-2.0
2,402
// Copyright 2010 Dolphin Emulator Project // Licensed under GPLv2+ // Refer to the license.txt file included. #include <cstring> #include "Core/DSP/DSPAnalyzer.h" #include "Core/DSP/DSPCore.h" #include "Core/DSP/DSPEmitter.h" #include "Core/DSP/DSPHost.h" #include "Core/DSP/DSPInterpreter.h" #include "Core/DSP/DSPMemoryMap.h" #define MAX_BLOCK_SIZE 250 #define DSP_IDLE_SKIP_CYCLES 0x1000 using namespace Gen; DSPEmitter::DSPEmitter() : gpr(*this), storeIndex(-1), storeIndex2(-1) { m_compiledCode = nullptr; AllocCodeSpace(COMPILED_CODE_SIZE); blocks = new DSPCompiledCode[MAX_BLOCKS]; blockLinks = new Block[MAX_BLOCKS]; blockSize = new u16[MAX_BLOCKS]; compileSR = 0; compileSR |= SR_INT_ENABLE; compileSR |= SR_EXT_INT_ENABLE; CompileDispatcher(); stubEntryPoint = CompileStub(); //clear all of the block references for (int i = 0x0000; i < MAX_BLOCKS; i++) { blocks[i] = (DSPCompiledCode)stubEntryPoint; blockLinks[i] = nullptr; blockSize[i] = 0; } } DSPEmitter::~DSPEmitter() { delete[] blocks; delete[] blockLinks; delete[] blockSize; FreeCodeSpace(); } void DSPEmitter::ClearIRAM() { for (int i = 0x0000; i < 0x1000; i++) { blocks[i] = (DSPCompiledCode)stubEntryPoint; blockLinks[i] = nullptr; blockSize[i] = 0; unresolvedJumps[i].clear(); } g_dsp.reset_dspjit_codespace = true; } void DSPEmitter::ClearIRAMandDSPJITCodespaceReset() { ClearCodeSpace(); CompileDispatcher(); stubEntryPoint = CompileStub(); for (int i = 0x0000; i < 0x10000; i++) { blocks[i] = (DSPCompiledCode)stubEntryPoint; blockLinks[i] = nullptr; blockSize[i] = 0; unresolvedJumps[i].clear(); } g_dsp.reset_dspjit_codespace = false; } // Must go out of block if exception is detected void DSPEmitter::checkExceptions(u32 retval) { // Check for interrupts and exceptions TEST(8, M(&g_dsp.exceptions), Imm8(0xff)); FixupBranch skipCheck = J_CC(CC_Z, true); MOV(16, M(&(g_dsp.pc)), Imm16(compilePC)); DSPJitRegCache c(gpr); gpr.saveRegs(); ABI_CallFunction((void *)&DSPCore_CheckExceptions); MOV(32, R(EAX), Imm32(retval)); JMP(returnDispatcher, true); gpr.loadRegs(false); gpr.flushRegs(c,false); SetJumpTarget(skipCheck); } bool DSPEmitter::FlagsNeeded() { if (!(DSPAnalyzer::code_flags[compilePC] & DSPAnalyzer::CODE_START_OF_INST) || (DSPAnalyzer::code_flags[compilePC] & DSPAnalyzer::CODE_UPDATE_SR)) return true; else return false; } void DSPEmitter::Default(UDSPInstruction inst) { if (opTable[inst]->reads_pc) { // Increment PC - we shouldn't need to do this for every instruction. only for branches and end of block. // Fallbacks to interpreter need this for fetching immediate values MOV(16, M(&(g_dsp.pc)), Imm16(compilePC + 1)); } // Fall back to interpreter gpr.pushRegs(); _assert_msg_(DSPLLE, opTable[inst]->intFunc, "No function for %04x",inst); ABI_CallFunctionC16((void*)opTable[inst]->intFunc, inst); gpr.popRegs(); } void DSPEmitter::EmitInstruction(UDSPInstruction inst) { const DSPOPCTemplate *tinst = GetOpTemplate(inst); bool ext_is_jit = false; // Call extended if (tinst->extended) { if ((inst >> 12) == 0x3) { if (! extOpTable[inst & 0x7F]->jitFunc) { // Fall back to interpreter gpr.pushRegs(); ABI_CallFunctionC16((void*)extOpTable[inst & 0x7F]->intFunc, inst); gpr.popRegs(); INFO_LOG(DSPLLE, "Instruction not JITed(ext part): %04x\n", inst); ext_is_jit = false; } else { (this->*extOpTable[inst & 0x7F]->jitFunc)(inst); ext_is_jit = true; } } else { if (!extOpTable[inst & 0xFF]->jitFunc) { // Fall back to interpreter gpr.pushRegs(); ABI_CallFunctionC16((void*)extOpTable[inst & 0xFF]->intFunc, inst); gpr.popRegs(); INFO_LOG(DSPLLE, "Instruction not JITed(ext part): %04x\n", inst); ext_is_jit = false; } else { (this->*extOpTable[inst & 0xFF]->jitFunc)(inst); ext_is_jit = true; } } } // Main instruction if (!opTable[inst]->jitFunc) { Default(inst); INFO_LOG(DSPLLE, "Instruction not JITed(main part): %04x\n", inst); } else { (this->*opTable[inst]->jitFunc)(inst); } // Backlog if (tinst->extended) { if (!ext_is_jit) { //need to call the online cleanup function because //the writeBackLog gets populated at runtime gpr.pushRegs(); ABI_CallFunction((void*)::applyWriteBackLog); gpr.popRegs(); } else { popExtValueToReg(); } } } void DSPEmitter::Compile(u16 start_addr) { // Remember the current block address for later startAddr = start_addr; unresolvedJumps[start_addr].clear(); const u8 *entryPoint = AlignCode16(); /* // Check for other exceptions if (dsp_SR_is_flag_set(SR_INT_ENABLE)) return; if (g_dsp.exceptions == 0) return; */ gpr.loadRegs(); blockLinkEntry = GetCodePtr(); compilePC = start_addr; bool fixup_pc = false; blockSize[start_addr] = 0; while (compilePC < start_addr + MAX_BLOCK_SIZE) { if (DSPAnalyzer::code_flags[compilePC] & DSPAnalyzer::CODE_CHECK_INT) checkExceptions(blockSize[start_addr]); UDSPInstruction inst = dsp_imem_read(compilePC); const DSPOPCTemplate *opcode = GetOpTemplate(inst); EmitInstruction(inst); blockSize[start_addr]++; compilePC += opcode->size; // If the block was trying to link into itself, remove the link unresolvedJumps[start_addr].remove(compilePC); fixup_pc = true; // Handle loop condition, only if current instruction was flagged as a loop destination // by the analyzer. if (DSPAnalyzer::code_flags[compilePC-1] & DSPAnalyzer::CODE_LOOP_END) { MOVZX(32, 16, EAX, M(&(g_dsp.r.st[2]))); TEST(32, R(EAX), R(EAX)); FixupBranch rLoopAddressExit = J_CC(CC_LE, true); MOVZX(32, 16, EAX, M(&g_dsp.r.st[3])); TEST(32, R(EAX), R(EAX)); FixupBranch rLoopCounterExit = J_CC(CC_LE, true); if (!opcode->branch) { //branch insns update the g_dsp.pc MOV(16, M(&(g_dsp.pc)), Imm16(compilePC)); } // These functions branch and therefore only need to be called in the // end of each block and in this order DSPJitRegCache c(gpr); HandleLoop(); gpr.saveRegs(); if (!DSPHost::OnThread() && DSPAnalyzer::code_flags[start_addr] & DSPAnalyzer::CODE_IDLE_SKIP) { MOV(16, R(EAX), Imm16(DSP_IDLE_SKIP_CYCLES)); } else { MOV(16, R(EAX), Imm16(blockSize[start_addr])); } JMP(returnDispatcher, true); gpr.loadRegs(false); gpr.flushRegs(c,false); SetJumpTarget(rLoopAddressExit); SetJumpTarget(rLoopCounterExit); } if (opcode->branch) { //don't update g_dsp.pc -- the branch insn already did fixup_pc = false; if (opcode->uncond_branch) { break; } else if (!opcode->jitFunc) { //look at g_dsp.pc if we actually branched MOV(16, R(AX), M(&g_dsp.pc)); CMP(16, R(AX), Imm16(compilePC)); FixupBranch rNoBranch = J_CC(CC_Z, true); DSPJitRegCache c(gpr); //don't update g_dsp.pc -- the branch insn already did gpr.saveRegs(); if (!DSPHost::OnThread() && DSPAnalyzer::code_flags[start_addr] & DSPAnalyzer::CODE_IDLE_SKIP) { MOV(16, R(EAX), Imm16(DSP_IDLE_SKIP_CYCLES)); } else { MOV(16, R(EAX), Imm16(blockSize[start_addr])); } JMP(returnDispatcher, true); gpr.loadRegs(false); gpr.flushRegs(c,false); SetJumpTarget(rNoBranch); } } // End the block if we're before an idle skip address if (DSPAnalyzer::code_flags[compilePC] & DSPAnalyzer::CODE_IDLE_SKIP) { break; } } if (fixup_pc) { MOV(16, M(&(g_dsp.pc)), Imm16(compilePC)); } blocks[start_addr] = (DSPCompiledCode)entryPoint; // Mark this block as a linkable destination if it does not contain // any unresolved CALL's if (unresolvedJumps[start_addr].empty()) { blockLinks[start_addr] = blockLinkEntry; for (u16 i = 0x0000; i < 0xffff; ++i) { if (!unresolvedJumps[i].empty()) { // Check if there were any blocks waiting for this block to be linkable size_t size = unresolvedJumps[i].size(); unresolvedJumps[i].remove(start_addr); if (unresolvedJumps[i].size() < size) { // Mark the block to be recompiled again blocks[i] = (DSPCompiledCode)stubEntryPoint; blockLinks[i] = nullptr; blockSize[i] = 0; } } } } if (blockSize[start_addr] == 0) { // just a safeguard, should never happen anymore. // if it does we might get stuck over in RunForCycles. ERROR_LOG(DSPLLE, "Block at 0x%04x has zero size", start_addr); blockSize[start_addr] = 1; } gpr.saveRegs(); if (!DSPHost::OnThread() && DSPAnalyzer::code_flags[start_addr] & DSPAnalyzer::CODE_IDLE_SKIP) { MOV(16, R(EAX), Imm16(DSP_IDLE_SKIP_CYCLES)); } else { MOV(16, R(EAX), Imm16(blockSize[start_addr])); } JMP(returnDispatcher, true); } const u8 *DSPEmitter::CompileStub() { const u8 *entryPoint = AlignCode16(); ABI_CallFunction((void *)&CompileCurrent); XOR(32, R(EAX), R(EAX)); // Return 0 cycles executed JMP(returnDispatcher); return entryPoint; } void DSPEmitter::CompileDispatcher() { enterDispatcher = AlignCode16(); // We don't use floating point (high 16 bits). BitSet32 registers_used = ABI_ALL_CALLEE_SAVED & BitSet32(0xffff); ABI_PushRegistersAndAdjustStack(registers_used, 8); const u8 *dispatcherLoop = GetCodePtr(); FixupBranch exceptionExit; if (DSPHost::OnThread()) { CMP(8, M(const_cast<bool*>(&g_dsp.external_interrupt_waiting)), Imm8(0)); exceptionExit = J_CC(CC_NE); } // Check for DSP halt TEST(8, M(&g_dsp.cr), Imm8(CR_HALT)); FixupBranch _halt = J_CC(CC_NE); // Execute block. Cycles executed returned in EAX. MOVZX(64, 16, ECX, M(&g_dsp.pc)); MOV(64, R(RBX), ImmPtr(blocks)); JMPptr(MComplex(RBX, RCX, SCALE_8, 0)); returnDispatcher = GetCodePtr(); // Decrement cyclesLeft SUB(16, M(&cyclesLeft), R(EAX)); J_CC(CC_A, dispatcherLoop); // DSP gave up the remaining cycles. SetJumpTarget(_halt); if (DSPHost::OnThread()) { SetJumpTarget(exceptionExit); } //MOV(32, M(&cyclesLeft), Imm32(0)); ABI_PopRegistersAndAdjustStack(registers_used, 8); RET(); }
moncefmechri/dolphin
Source/Core/Core/DSP/DSPEmitter.cpp
C++
gpl-2.0
10,029
/* * Copyright (c) 2006-2008 Intel Corporation * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> * Copyright (c) 2008 Red Hat Inc. * * DRM core CRTC related functions * * Permission to use, copy, modify, distribute, and sell this software and its * documentation for any purpose is hereby granted without fee, provided that * the above copyright notice appear in all copies and that both that copyright * notice and this permission notice appear in supporting documentation, and * that the name of the copyright holders not be used in advertising or * publicity pertaining to distribution of the software without specific, * written prior permission. The copyright holders make no representations * about the suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE * OF THIS SOFTWARE. * * Authors: * Keith Packard * Eric Anholt <eric@anholt.net> * Dave Airlie <airlied@linux.ie> * Jesse Barnes <jesse.barnes@intel.com> */ #include <linux/ctype.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/export.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_atomic.h> #include "drm_crtc_internal.h" #include "drm_internal.h" static struct drm_framebuffer * internal_framebuffer_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *r, struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ const char *fnname(int val) \ { \ int i; \ for (i = 0; i < ARRAY_SIZE(list); i++) { \ if (list[i].type == val) \ return list[i].name; \ } \ return "(unknown)"; \ } /* * Global properties */ static const struct drm_prop_enum_list drm_dpms_enum_list[] = { { DRM_MODE_DPMS_ON, "On" }, { DRM_MODE_DPMS_STANDBY, "Standby" }, { DRM_MODE_DPMS_SUSPEND, "Suspend" }, { DRM_MODE_DPMS_OFF, "Off" } }; DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list) static const struct drm_prop_enum_list drm_plane_type_enum_list[] = { { DRM_PLANE_TYPE_OVERLAY, "Overlay" }, { DRM_PLANE_TYPE_PRIMARY, "Primary" }, { DRM_PLANE_TYPE_CURSOR, "Cursor" }, }; /* * Optional properties */ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] = { { DRM_MODE_SCALE_NONE, "None" }, { DRM_MODE_SCALE_FULLSCREEN, "Full" }, { DRM_MODE_SCALE_CENTER, "Center" }, { DRM_MODE_SCALE_ASPECT, "Full aspect" }, }; static const struct drm_prop_enum_list drm_aspect_ratio_enum_list[] = { { DRM_MODE_PICTURE_ASPECT_NONE, "Automatic" }, { DRM_MODE_PICTURE_ASPECT_4_3, "4:3" }, { DRM_MODE_PICTURE_ASPECT_16_9, "16:9" }, }; /* * Non-global properties, but "required" for certain connectors. */ static const struct drm_prop_enum_list drm_dvi_i_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_select_name, drm_dvi_i_select_enum_list) static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_DVID, "DVI-D" }, /* DVI-I */ { DRM_MODE_SUBCONNECTOR_DVIA, "DVI-A" }, /* DVI-I */ }; DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name, drm_dvi_i_subconnector_enum_list) static const struct drm_prop_enum_list drm_tv_select_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) static const struct drm_prop_enum_list drm_tv_subconnector_enum_list[] = { { DRM_MODE_SUBCONNECTOR_Unknown, "Unknown" }, /* DVI-I and TV-out */ { DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ { DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ }; DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, drm_tv_subconnector_enum_list) static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = { { DRM_MODE_DIRTY_OFF, "Off" }, { DRM_MODE_DIRTY_ON, "On" }, { DRM_MODE_DIRTY_ANNOTATE, "Annotate" }, }; struct drm_conn_prop_enum_list { int type; const char *name; struct ida ida; }; /* * Connector and encoder types. */ static struct drm_conn_prop_enum_list drm_connector_enum_list[] = { { DRM_MODE_CONNECTOR_Unknown, "Unknown" }, { DRM_MODE_CONNECTOR_VGA, "VGA" }, { DRM_MODE_CONNECTOR_DVII, "DVI-I" }, { DRM_MODE_CONNECTOR_DVID, "DVI-D" }, { DRM_MODE_CONNECTOR_DVIA, "DVI-A" }, { DRM_MODE_CONNECTOR_Composite, "Composite" }, { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" }, { DRM_MODE_CONNECTOR_LVDS, "LVDS" }, { DRM_MODE_CONNECTOR_Component, "Component" }, { DRM_MODE_CONNECTOR_9PinDIN, "DIN" }, { DRM_MODE_CONNECTOR_DisplayPort, "DP" }, { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" }, { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" }, { DRM_MODE_CONNECTOR_TV, "TV" }, { DRM_MODE_CONNECTOR_eDP, "eDP" }, { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" }, { DRM_MODE_CONNECTOR_DSI, "DSI" }, }; static const struct drm_prop_enum_list drm_encoder_enum_list[] = { { DRM_MODE_ENCODER_NONE, "None" }, { DRM_MODE_ENCODER_DAC, "DAC" }, { DRM_MODE_ENCODER_TMDS, "TMDS" }, { DRM_MODE_ENCODER_LVDS, "LVDS" }, { DRM_MODE_ENCODER_TVDAC, "TV" }, { DRM_MODE_ENCODER_VIRTUAL, "Virtual" }, { DRM_MODE_ENCODER_DSI, "DSI" }, { DRM_MODE_ENCODER_DPMST, "DP MST" }, }; static const struct drm_prop_enum_list drm_subpixel_enum_list[] = { { SubPixelUnknown, "Unknown" }, { SubPixelHorizontalRGB, "Horizontal RGB" }, { SubPixelHorizontalBGR, "Horizontal BGR" }, { SubPixelVerticalRGB, "Vertical RGB" }, { SubPixelVerticalBGR, "Vertical BGR" }, { SubPixelNone, "None" }, }; void drm_connector_ida_init(void) { int i; for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) ida_init(&drm_connector_enum_list[i].ida); } void drm_connector_ida_destroy(void) { int i; for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++) ida_destroy(&drm_connector_enum_list[i].ida); } /** * drm_get_connector_status_name - return a string for connector status * @status: connector status to compute name of * * In contrast to the other drm_get_*_name functions this one here returns a * const pointer and hence is threadsafe. */ const char *drm_get_connector_status_name(enum drm_connector_status status) { if (status == connector_status_connected) return "connected"; else if (status == connector_status_disconnected) return "disconnected"; else return "unknown"; } EXPORT_SYMBOL(drm_get_connector_status_name); /** * drm_get_subpixel_order_name - return a string for a given subpixel enum * @order: enum of subpixel_order * * Note you could abuse this and return something out of bounds, but that * would be a caller error. No unscrubbed user data should make it here. */ const char *drm_get_subpixel_order_name(enum subpixel_order order) { return drm_subpixel_enum_list[order].name; } EXPORT_SYMBOL(drm_get_subpixel_order_name); static char printable_char(int c) { return isascii(c) && isprint(c) ? c : '?'; } /** * drm_get_format_name - return a string for drm fourcc format * @format: format to compute name of * * Note that the buffer used by this function is globally shared and owned by * the function itself. * * FIXME: This isn't really multithreading safe. */ const char *drm_get_format_name(uint32_t format) { static char buf[32]; snprintf(buf, sizeof(buf), "%c%c%c%c %s-endian (0x%08x)", printable_char(format & 0xff), printable_char((format >> 8) & 0xff), printable_char((format >> 16) & 0xff), printable_char((format >> 24) & 0x7f), format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little", format); return buf; } EXPORT_SYMBOL(drm_get_format_name); /* * Internal function to assign a slot in the object idr and optionally * register the object into the idr. */ static int drm_mode_object_get_reg(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type, bool register_obj) { int ret; mutex_lock(&dev->mode_config.idr_mutex); ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, 1, 0, GFP_KERNEL); if (ret >= 0) { /* * Set up the object linking under the protection of the idr * lock so that other users can't see inconsistent state. */ obj->id = ret; obj->type = obj_type; } mutex_unlock(&dev->mode_config.idr_mutex); return ret < 0 ? ret : 0; } /** * drm_mode_object_get - allocate a new modeset identifier * @dev: DRM device * @obj: object pointer, used to generate unique ID * @obj_type: object type * * Create a unique identifier based on @ptr in @dev's identifier space. Used * for tracking modes, CRTCs and connectors. Note that despite the _get postfix * modeset identifiers are _not_ reference counted. Hence don't use this for * reference counted modeset objects like framebuffers. * * Returns: * Zero on success, error code on failure. */ int drm_mode_object_get(struct drm_device *dev, struct drm_mode_object *obj, uint32_t obj_type) { return drm_mode_object_get_reg(dev, obj, obj_type, true); } static void drm_mode_object_register(struct drm_device *dev, struct drm_mode_object *obj) { mutex_lock(&dev->mode_config.idr_mutex); idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); mutex_unlock(&dev->mode_config.idr_mutex); } /** * drm_mode_object_put - free a modeset identifer * @dev: DRM device * @object: object to free * * Free @id from @dev's unique identifier pool. Note that despite the _get * postfix modeset identifiers are _not_ reference counted. Hence don't use this * for reference counted modeset objects like framebuffers. */ void drm_mode_object_put(struct drm_device *dev, struct drm_mode_object *object) { mutex_lock(&dev->mode_config.idr_mutex); idr_remove(&dev->mode_config.crtc_idr, object->id); mutex_unlock(&dev->mode_config.idr_mutex); } static struct drm_mode_object *_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL; mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) obj = NULL; if (obj && obj->id != id) obj = NULL; /* don't leak out unref'd fb's */ if (obj && (obj->type == DRM_MODE_OBJECT_FB || obj->type == DRM_MODE_OBJECT_BLOB)) obj = NULL; mutex_unlock(&dev->mode_config.idr_mutex); return obj; } /** * drm_mode_object_find - look up a drm object with static lifetime * @dev: drm device * @id: id of the mode object * @type: type of the mode object * * Note that framebuffers cannot be looked up with this functions - since those * are reference counted, they need special treatment. Even with * DRM_MODE_OBJECT_ANY (although that will simply return NULL * rather than WARN_ON()). */ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) { struct drm_mode_object *obj = NULL; /* Framebuffers are reference counted and need their own lookup * function.*/ WARN_ON(type == DRM_MODE_OBJECT_FB || type == DRM_MODE_OBJECT_BLOB); obj = _object_find(dev, id, type); return obj; } EXPORT_SYMBOL(drm_mode_object_find); /** * drm_framebuffer_init - initialize a framebuffer * @dev: DRM device * @fb: framebuffer to be initialized * @funcs: ... with these functions * * Allocates an ID for the framebuffer's parent mode object, sets its mode * functions & device file and adds it to the master fd list. * * IMPORTANT: * This functions publishes the fb and makes it available for concurrent access * by other users. Which means by this point the fb _must_ be fully set up - * since all the fb attributes are invariant over its lifetime, no further * locking but only correct reference counting is required. * * Returns: * Zero on success, error code on failure. */ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { int ret; mutex_lock(&dev->mode_config.fb_lock); kref_init(&fb->refcount); INIT_LIST_HEAD(&fb->filp_head); fb->dev = dev; fb->funcs = funcs; ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); if (ret) goto out; dev->mode_config.num_fb++; list_add(&fb->head, &dev->mode_config.fb_list); out: mutex_unlock(&dev->mode_config.fb_lock); return ret; } EXPORT_SYMBOL(drm_framebuffer_init); /* dev->mode_config.fb_lock must be held! */ static void __drm_framebuffer_unregister(struct drm_device *dev, struct drm_framebuffer *fb) { mutex_lock(&dev->mode_config.idr_mutex); idr_remove(&dev->mode_config.crtc_idr, fb->base.id); mutex_unlock(&dev->mode_config.idr_mutex); fb->base.id = 0; } static void drm_framebuffer_free(struct kref *kref) { struct drm_framebuffer *fb = container_of(kref, struct drm_framebuffer, refcount); struct drm_device *dev = fb->dev; /* * The lookup idr holds a weak reference, which has not necessarily been * removed at this point. Check for that. */ mutex_lock(&dev->mode_config.fb_lock); if (fb->base.id) { /* Mark fb as reaped and drop idr ref. */ __drm_framebuffer_unregister(dev, fb); } mutex_unlock(&dev->mode_config.fb_lock); fb->funcs->destroy(fb); } static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev, uint32_t id) { struct drm_mode_object *obj = NULL; struct drm_framebuffer *fb; mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id)) fb = NULL; else fb = obj_to_fb(obj); mutex_unlock(&dev->mode_config.idr_mutex); return fb; } /** * drm_framebuffer_lookup - look up a drm framebuffer and grab a reference * @dev: drm device * @id: id of the fb object * * If successful, this grabs an additional reference to the framebuffer - * callers need to make sure to eventually unreference the returned framebuffer * again, using @drm_framebuffer_unreference. */ struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, uint32_t id) { struct drm_framebuffer *fb; mutex_lock(&dev->mode_config.fb_lock); fb = __drm_framebuffer_lookup(dev, id); if (fb) { if (!kref_get_unless_zero(&fb->refcount)) fb = NULL; } mutex_unlock(&dev->mode_config.fb_lock); return fb; } EXPORT_SYMBOL(drm_framebuffer_lookup); /** * drm_framebuffer_unreference - unref a framebuffer * @fb: framebuffer to unref * * This functions decrements the fb's refcount and frees it if it drops to zero. */ void drm_framebuffer_unreference(struct drm_framebuffer *fb) { DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); kref_put(&fb->refcount, drm_framebuffer_free); } EXPORT_SYMBOL(drm_framebuffer_unreference); /** * drm_framebuffer_reference - incr the fb refcnt * @fb: framebuffer * * This functions increments the fb's refcount. */ void drm_framebuffer_reference(struct drm_framebuffer *fb) { DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); kref_get(&fb->refcount); } EXPORT_SYMBOL(drm_framebuffer_reference); /** * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr * @fb: fb to unregister * * Drivers need to call this when cleaning up driver-private framebuffers, e.g. * those used for fbdev. Note that the caller must hold a reference of it's own, * i.e. the object may not be destroyed through this call (since it'll lead to a * locking inversion). */ void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) { struct drm_device *dev; if (!fb) return; dev = fb->dev; mutex_lock(&dev->mode_config.fb_lock); /* Mark fb as reaped and drop idr ref. */ __drm_framebuffer_unregister(dev, fb); mutex_unlock(&dev->mode_config.fb_lock); } EXPORT_SYMBOL(drm_framebuffer_unregister_private); /** * drm_framebuffer_cleanup - remove a framebuffer object * @fb: framebuffer to remove * * Cleanup framebuffer. This function is intended to be used from the drivers * ->destroy callback. It can also be used to clean up driver private * framebuffers embedded into a larger structure. * * Note that this function does not remove the fb from active usuage - if it is * still used anywhere, hilarity can ensue since userspace could call getfb on * the id and get back -EINVAL. Obviously no concern at driver unload time. * * Also, the framebuffer will not be removed from the lookup idr - for * user-created framebuffers this will happen in in the rmfb ioctl. For * driver-private objects (e.g. for fbdev) drivers need to explicitly call * drm_framebuffer_unregister_private. */ void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); dev->mode_config.num_fb--; mutex_unlock(&dev->mode_config.fb_lock); } EXPORT_SYMBOL(drm_framebuffer_cleanup); /** * drm_framebuffer_remove - remove and unreference a framebuffer object * @fb: framebuffer to remove * * Scans all the CRTCs and planes in @dev's mode_config. If they're * using @fb, removes it, setting it to NULL. Then drops the reference to the * passed-in framebuffer. Might take the modeset locks. * * Note that this function optimizes the cleanup away if the caller holds the * last reference to the framebuffer. It is also guaranteed to not take the * modeset locks in this case. */ void drm_framebuffer_remove(struct drm_framebuffer *fb) { struct drm_device *dev; struct drm_crtc *crtc; struct drm_plane *plane; struct drm_mode_set set; int ret; if (!fb) return; dev = fb->dev; WARN_ON(!list_empty(&fb->filp_head)); /* * drm ABI mandates that we remove any deleted framebuffers from active * useage. But since most sane clients only remove framebuffers they no * longer need, try to optimize this away. * * Since we're holding a reference ourselves, observing a refcount of 1 * means that we're the last holder and can skip it. Also, the refcount * can never increase from 1 again, so we don't need any barriers or * locks. * * Note that userspace could try to race with use and instate a new * usage _after_ we've cleared all current ones. End result will be an * in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot * in this manner. */ if (atomic_read(&fb->refcount.refcount) > 1) { drm_modeset_lock_all(dev); /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { if (crtc->primary->fb == fb) { /* should turn off the crtc */ memset(&set, 0, sizeof(struct drm_mode_set)); set.crtc = crtc; set.fb = NULL; ret = drm_mode_set_config_internal(&set); if (ret) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); } } drm_for_each_plane(plane, dev) { if (plane->fb == fb) drm_plane_force_disable(plane); } drm_modeset_unlock_all(dev); } drm_framebuffer_unreference(fb); } EXPORT_SYMBOL(drm_framebuffer_remove); DEFINE_WW_CLASS(crtc_ww_class); static unsigned int drm_num_crtcs(struct drm_device *dev) { unsigned int num = 0; struct drm_crtc *tmp; drm_for_each_crtc(tmp, dev) { num++; } return num; } /** * drm_crtc_init_with_planes - Initialise a new CRTC object with * specified primary and cursor planes. * @dev: DRM device * @crtc: CRTC object to init * @primary: Primary plane for CRTC * @cursor: Cursor plane for CRTC * @funcs: callbacks for the new CRTC * @name: printf style format string for the CRTC name, or NULL for default name * * Inits a new object created as base part of a driver crtc object. * * Returns: * Zero on success, error code on failure. */ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor, const struct drm_crtc_funcs *funcs, const char *name, ...) { struct drm_mode_config *config = &dev->mode_config; int ret; WARN_ON(primary && primary->type != DRM_PLANE_TYPE_PRIMARY); WARN_ON(cursor && cursor->type != DRM_PLANE_TYPE_CURSOR); crtc->dev = dev; crtc->funcs = funcs; drm_modeset_lock_init(&crtc->mutex); ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); if (ret) return ret; if (name) { va_list ap; va_start(ap, name); crtc->name = kvasprintf(GFP_KERNEL, name, ap); va_end(ap); } else { crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", drm_num_crtcs(dev)); } if (!crtc->name) { drm_mode_object_put(dev, &crtc->base); return -ENOMEM; } crtc->base.properties = &crtc->properties; list_add_tail(&crtc->head, &config->crtc_list); config->num_crtc++; crtc->primary = primary; crtc->cursor = cursor; if (primary) primary->possible_crtcs = 1 << drm_crtc_index(crtc); if (cursor) cursor->possible_crtcs = 1 << drm_crtc_index(crtc); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&crtc->base, config->prop_active, 0); drm_object_attach_property(&crtc->base, config->prop_mode_id, 0); } return 0; } EXPORT_SYMBOL(drm_crtc_init_with_planes); /** * drm_crtc_cleanup - Clean up the core crtc usage * @crtc: CRTC to cleanup * * This function cleans up @crtc and removes it from the DRM mode setting * core. Note that the function does *not* free the crtc structure itself, * this is the responsibility of the caller. */ void drm_crtc_cleanup(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; kfree(crtc->gamma_store); crtc->gamma_store = NULL; drm_modeset_lock_fini(&crtc->mutex); drm_mode_object_put(dev, &crtc->base); list_del(&crtc->head); dev->mode_config.num_crtc--; WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state); if (crtc->state && crtc->funcs->atomic_destroy_state) crtc->funcs->atomic_destroy_state(crtc, crtc->state); kfree(crtc->name); memset(crtc, 0, sizeof(*crtc)); } EXPORT_SYMBOL(drm_crtc_cleanup); /** * drm_crtc_index - find the index of a registered CRTC * @crtc: CRTC to find index for * * Given a registered CRTC, return the index of that CRTC within a DRM * device's list of CRTCs. */ unsigned int drm_crtc_index(struct drm_crtc *crtc) { unsigned int index = 0; struct drm_crtc *tmp; drm_for_each_crtc(tmp, crtc->dev) { if (tmp == crtc) return index; index++; } BUG(); } EXPORT_SYMBOL(drm_crtc_index); /* * drm_mode_remove - remove and free a mode * @connector: connector list to modify * @mode: mode to remove * * Remove @mode from @connector's mode list, then free it. */ static void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode) { list_del(&mode->head); drm_mode_destroy(connector->dev, mode); } /** * drm_display_info_set_bus_formats - set the supported bus formats * @info: display info to store bus formats in * @formats: array containing the supported bus formats * @num_formats: the number of entries in the fmts array * * Store the supported bus formats in display info structure. * See MEDIA_BUS_FMT_* definitions in include/uapi/linux/media-bus-format.h for * a full list of available formats. */ int drm_display_info_set_bus_formats(struct drm_display_info *info, const u32 *formats, unsigned int num_formats) { u32 *fmts = NULL; if (!formats && num_formats) return -EINVAL; if (formats && num_formats) { fmts = kmemdup(formats, sizeof(*formats) * num_formats, GFP_KERNEL); if (!fmts) return -ENOMEM; } kfree(info->bus_formats); info->bus_formats = fmts; info->num_bus_formats = num_formats; return 0; } EXPORT_SYMBOL(drm_display_info_set_bus_formats); /** * drm_connector_get_cmdline_mode - reads the user's cmdline mode * @connector: connector to quwery * * The kernel supports per-connector configration of its consoles through * use of the video= parameter. This function parses that option and * extracts the user's specified mode (or enable/disable status) for a * particular connector. This is typically only used during the early fbdev * setup. */ static void drm_connector_get_cmdline_mode(struct drm_connector *connector) { struct drm_cmdline_mode *mode = &connector->cmdline_mode; char *option = NULL; if (fb_get_options(connector->name, &option)) return; if (!drm_mode_parse_command_line_for_connector(option, connector, mode)) return; if (mode->force) { const char *s; switch (mode->force) { case DRM_FORCE_OFF: s = "OFF"; break; case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break; default: case DRM_FORCE_ON: s = "ON"; break; } DRM_INFO("forcing %s connector %s\n", connector->name, s); connector->force = mode->force; } DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n", connector->name, mode->xres, mode->yres, mode->refresh_specified ? mode->refresh : 60, mode->rb ? " reduced blanking" : "", mode->margins ? " with margins" : "", mode->interlace ? " interlaced" : ""); } /** * drm_connector_init - Init a preallocated connector * @dev: DRM device * @connector: the connector to init * @funcs: callbacks for this connector * @connector_type: user visible type of the connector * * Initialises a preallocated connector. Connectors should be * subclassed as part of driver connector objects. * * Returns: * Zero on success, error code on failure. */ int drm_connector_init(struct drm_device *dev, struct drm_connector *connector, const struct drm_connector_funcs *funcs, int connector_type) { struct drm_mode_config *config = &dev->mode_config; int ret; struct ida *connector_ida = &drm_connector_enum_list[connector_type].ida; drm_modeset_lock_all(dev); ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false); if (ret) goto out_unlock; connector->base.properties = &connector->properties; connector->dev = dev; connector->funcs = funcs; connector->connector_type = connector_type; connector->connector_type_id = ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); if (connector->connector_type_id < 0) { ret = connector->connector_type_id; goto out_put; } connector->name = kasprintf(GFP_KERNEL, "%s-%d", drm_connector_enum_list[connector_type].name, connector->connector_type_id); if (!connector->name) { ret = -ENOMEM; goto out_put; } INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); connector->edid_blob_ptr = NULL; connector->status = connector_status_unknown; drm_connector_get_cmdline_mode(connector); /* We should add connectors at the end to avoid upsetting the connector * index too much. */ list_add_tail(&connector->head, &config->connector_list); config->num_connector++; if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) drm_object_attach_property(&connector->base, config->edid_property, 0); drm_object_attach_property(&connector->base, config->dpms_property, 0); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); } connector->debugfs_entry = NULL; out_put: if (ret) drm_mode_object_put(dev, &connector->base); out_unlock: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_connector_init); /** * drm_connector_cleanup - cleans up an initialised connector * @connector: connector to cleanup * * Cleans up the connector but doesn't free the object. */ void drm_connector_cleanup(struct drm_connector *connector) { struct drm_device *dev = connector->dev; struct drm_display_mode *mode, *t; if (connector->tile_group) { drm_mode_put_tile_group(dev, connector->tile_group); connector->tile_group = NULL; } list_for_each_entry_safe(mode, t, &connector->probed_modes, head) drm_mode_remove(connector, mode); list_for_each_entry_safe(mode, t, &connector->modes, head) drm_mode_remove(connector, mode); ida_remove(&drm_connector_enum_list[connector->connector_type].ida, connector->connector_type_id); kfree(connector->display_info.bus_formats); drm_mode_object_put(dev, &connector->base); kfree(connector->name); connector->name = NULL; list_del(&connector->head); dev->mode_config.num_connector--; WARN_ON(connector->state && !connector->funcs->atomic_destroy_state); if (connector->state && connector->funcs->atomic_destroy_state) connector->funcs->atomic_destroy_state(connector, connector->state); memset(connector, 0, sizeof(*connector)); } EXPORT_SYMBOL(drm_connector_cleanup); /** * drm_connector_index - find the index of a registered connector * @connector: connector to find index for * * Given a registered connector, return the index of that connector within a DRM * device's list of connectors. */ unsigned int drm_connector_index(struct drm_connector *connector) { unsigned int index = 0; struct drm_connector *tmp; struct drm_mode_config *config = &connector->dev->mode_config; WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); drm_for_each_connector(tmp, connector->dev) { if (tmp == connector) return index; index++; } BUG(); } EXPORT_SYMBOL(drm_connector_index); /** * drm_connector_register - register a connector * @connector: the connector to register * * Register userspace interfaces for a connector * * Returns: * Zero on success, error code on failure. */ int drm_connector_register(struct drm_connector *connector) { int ret; drm_mode_object_register(connector->dev, &connector->base); ret = drm_sysfs_connector_add(connector); if (ret) return ret; ret = drm_debugfs_connector_add(connector); if (ret) { drm_sysfs_connector_remove(connector); return ret; } return 0; } EXPORT_SYMBOL(drm_connector_register); /** * drm_connector_unregister - unregister a connector * @connector: the connector to unregister * * Unregister userspace interfaces for a connector */ void drm_connector_unregister(struct drm_connector *connector) { drm_sysfs_connector_remove(connector); drm_debugfs_connector_remove(connector); } EXPORT_SYMBOL(drm_connector_unregister); /** * drm_connector_unplug_all - unregister connector userspace interfaces * @dev: drm device * * This function unregisters all connector userspace interfaces in sysfs. Should * be call when the device is disconnected, e.g. from an usb driver's * ->disconnect callback. */ void drm_connector_unplug_all(struct drm_device *dev) { struct drm_connector *connector; /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) drm_connector_unregister(connector); } EXPORT_SYMBOL(drm_connector_unplug_all); /** * drm_encoder_init - Init a preallocated encoder * @dev: drm device * @encoder: the encoder to init * @funcs: callbacks for this encoder * @encoder_type: user visible type of the encoder * @name: printf style format string for the encoder name, or NULL for default name * * Initialises a preallocated encoder. Encoder should be * subclassed as part of driver encoder objects. * * Returns: * Zero on success, error code on failure. */ int drm_encoder_init(struct drm_device *dev, struct drm_encoder *encoder, const struct drm_encoder_funcs *funcs, int encoder_type, const char *name, ...) { int ret; drm_modeset_lock_all(dev); ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); if (ret) goto out_unlock; encoder->dev = dev; encoder->encoder_type = encoder_type; encoder->funcs = funcs; if (name) { va_list ap; va_start(ap, name); encoder->name = kvasprintf(GFP_KERNEL, name, ap); va_end(ap); } else { encoder->name = kasprintf(GFP_KERNEL, "%s-%d", drm_encoder_enum_list[encoder_type].name, encoder->base.id); } if (!encoder->name) { ret = -ENOMEM; goto out_put; } list_add_tail(&encoder->head, &dev->mode_config.encoder_list); dev->mode_config.num_encoder++; out_put: if (ret) drm_mode_object_put(dev, &encoder->base); out_unlock: drm_modeset_unlock_all(dev); return ret; } EXPORT_SYMBOL(drm_encoder_init); /** * drm_encoder_cleanup - cleans up an initialised encoder * @encoder: encoder to cleanup * * Cleans up the encoder but doesn't free the object. */ void drm_encoder_cleanup(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; drm_modeset_lock_all(dev); drm_mode_object_put(dev, &encoder->base); kfree(encoder->name); list_del(&encoder->head); dev->mode_config.num_encoder--; drm_modeset_unlock_all(dev); memset(encoder, 0, sizeof(*encoder)); } EXPORT_SYMBOL(drm_encoder_cleanup); static unsigned int drm_num_planes(struct drm_device *dev) { unsigned int num = 0; struct drm_plane *tmp; drm_for_each_plane(tmp, dev) { num++; } return num; } /** * drm_universal_plane_init - Initialize a new universal plane object * @dev: DRM device * @plane: plane object to init * @possible_crtcs: bitmask of possible CRTCs * @funcs: callbacks for the new plane * @formats: array of supported formats (%DRM_FORMAT_*) * @format_count: number of elements in @formats * @type: type of plane (overlay, primary, cursor) * @name: printf style format string for the plane name, or NULL for default name * * Initializes a plane object of type @type. * * Returns: * Zero on success, error code on failure. */ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, enum drm_plane_type type, const char *name, ...) { struct drm_mode_config *config = &dev->mode_config; int ret; ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); if (ret) return ret; drm_modeset_lock_init(&plane->mutex); plane->base.properties = &plane->properties; plane->dev = dev; plane->funcs = funcs; plane->format_types = kmalloc_array(format_count, sizeof(uint32_t), GFP_KERNEL); if (!plane->format_types) { DRM_DEBUG_KMS("out of memory when allocating plane\n"); drm_mode_object_put(dev, &plane->base); return -ENOMEM; } if (name) { va_list ap; va_start(ap, name); plane->name = kvasprintf(GFP_KERNEL, name, ap); va_end(ap); } else { plane->name = kasprintf(GFP_KERNEL, "plane-%d", drm_num_planes(dev)); } if (!plane->name) { kfree(plane->format_types); drm_mode_object_put(dev, &plane->base); return -ENOMEM; } memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); plane->format_count = format_count; plane->possible_crtcs = possible_crtcs; plane->type = type; list_add_tail(&plane->head, &config->plane_list); config->num_total_plane++; if (plane->type == DRM_PLANE_TYPE_OVERLAY) config->num_overlay_plane++; drm_object_attach_property(&plane->base, config->plane_type_property, plane->type); if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&plane->base, config->prop_fb_id, 0); drm_object_attach_property(&plane->base, config->prop_crtc_id, 0); drm_object_attach_property(&plane->base, config->prop_crtc_x, 0); drm_object_attach_property(&plane->base, config->prop_crtc_y, 0); drm_object_attach_property(&plane->base, config->prop_crtc_w, 0); drm_object_attach_property(&plane->base, config->prop_crtc_h, 0); drm_object_attach_property(&plane->base, config->prop_src_x, 0); drm_object_attach_property(&plane->base, config->prop_src_y, 0); drm_object_attach_property(&plane->base, config->prop_src_w, 0); drm_object_attach_property(&plane->base, config->prop_src_h, 0); } return 0; } EXPORT_SYMBOL(drm_universal_plane_init); /** * drm_plane_init - Initialize a legacy plane * @dev: DRM device * @plane: plane object to init * @possible_crtcs: bitmask of possible CRTCs * @funcs: callbacks for the new plane * @formats: array of supported formats (%DRM_FORMAT_*) * @format_count: number of elements in @formats * @is_primary: plane type (primary vs overlay) * * Legacy API to initialize a DRM plane. * * New drivers should call drm_universal_plane_init() instead. * * Returns: * Zero on success, error code on failure. */ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, const struct drm_plane_funcs *funcs, const uint32_t *formats, unsigned int format_count, bool is_primary) { enum drm_plane_type type; type = is_primary ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; return drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, NULL); } EXPORT_SYMBOL(drm_plane_init); /** * drm_plane_cleanup - Clean up the core plane usage * @plane: plane to cleanup * * This function cleans up @plane and removes it from the DRM mode setting * core. Note that the function does *not* free the plane structure itself, * this is the responsibility of the caller. */ void drm_plane_cleanup(struct drm_plane *plane) { struct drm_device *dev = plane->dev; drm_modeset_lock_all(dev); kfree(plane->format_types); drm_mode_object_put(dev, &plane->base); BUG_ON(list_empty(&plane->head)); list_del(&plane->head); dev->mode_config.num_total_plane--; if (plane->type == DRM_PLANE_TYPE_OVERLAY) dev->mode_config.num_overlay_plane--; drm_modeset_unlock_all(dev); WARN_ON(plane->state && !plane->funcs->atomic_destroy_state); if (plane->state && plane->funcs->atomic_destroy_state) plane->funcs->atomic_destroy_state(plane, plane->state); kfree(plane->name); memset(plane, 0, sizeof(*plane)); } EXPORT_SYMBOL(drm_plane_cleanup); /** * drm_plane_index - find the index of a registered plane * @plane: plane to find index for * * Given a registered plane, return the index of that CRTC within a DRM * device's list of planes. */ unsigned int drm_plane_index(struct drm_plane *plane) { unsigned int index = 0; struct drm_plane *tmp; drm_for_each_plane(tmp, plane->dev) { if (tmp == plane) return index; index++; } BUG(); } EXPORT_SYMBOL(drm_plane_index); /** * drm_plane_from_index - find the registered plane at an index * @dev: DRM device * @idx: index of registered plane to find for * * Given a plane index, return the registered plane from DRM device's * list of planes with matching index. */ struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx) { struct drm_plane *plane; unsigned int i = 0; drm_for_each_plane(plane, dev) { if (i == idx) return plane; i++; } return NULL; } EXPORT_SYMBOL(drm_plane_from_index); /** * drm_plane_force_disable - Forcibly disable a plane * @plane: plane to disable * * Forces the plane to be disabled. * * Used when the plane's current framebuffer is destroyed, * and when restoring fbdev mode. */ void drm_plane_force_disable(struct drm_plane *plane) { int ret; if (!plane->fb) return; plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane); if (ret) { DRM_ERROR("failed to disable plane with busy fb\n"); plane->old_fb = NULL; return; } /* disconnect the plane from the fb and crtc: */ drm_framebuffer_unreference(plane->old_fb); plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; } EXPORT_SYMBOL(drm_plane_force_disable); static int drm_mode_create_standard_properties(struct drm_device *dev) { struct drm_property *prop; /* * Standard properties (apply to all connectors) */ prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | DRM_MODE_PROP_IMMUTABLE, "EDID", 0); if (!prop) return -ENOMEM; dev->mode_config.edid_property = prop; prop = drm_property_create_enum(dev, 0, "DPMS", drm_dpms_enum_list, ARRAY_SIZE(drm_dpms_enum_list)); if (!prop) return -ENOMEM; dev->mode_config.dpms_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | DRM_MODE_PROP_IMMUTABLE, "PATH", 0); if (!prop) return -ENOMEM; dev->mode_config.path_property = prop; prop = drm_property_create(dev, DRM_MODE_PROP_BLOB | DRM_MODE_PROP_IMMUTABLE, "TILE", 0); if (!prop) return -ENOMEM; dev->mode_config.tile_property = prop; prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "type", drm_plane_type_enum_list, ARRAY_SIZE(drm_plane_type_enum_list)); if (!prop) return -ENOMEM; dev->mode_config.plane_type_property = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_X", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_x = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_Y", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_y = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_W", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_w = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "SRC_H", 0, UINT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_src_h = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_X", INT_MIN, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_x = prop; prop = drm_property_create_signed_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_Y", INT_MIN, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_y = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_W", 0, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_w = prop; prop = drm_property_create_range(dev, DRM_MODE_PROP_ATOMIC, "CRTC_H", 0, INT_MAX); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_h = prop; prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, "FB_ID", DRM_MODE_OBJECT_FB); if (!prop) return -ENOMEM; dev->mode_config.prop_fb_id = prop; prop = drm_property_create_object(dev, DRM_MODE_PROP_ATOMIC, "CRTC_ID", DRM_MODE_OBJECT_CRTC); if (!prop) return -ENOMEM; dev->mode_config.prop_crtc_id = prop; prop = drm_property_create_bool(dev, DRM_MODE_PROP_ATOMIC, "ACTIVE"); if (!prop) return -ENOMEM; dev->mode_config.prop_active = prop; prop = drm_property_create(dev, DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB, "MODE_ID", 0); if (!prop) return -ENOMEM; dev->mode_config.prop_mode_id = prop; return 0; } /** * drm_mode_create_dvi_i_properties - create DVI-I specific connector properties * @dev: DRM device * * Called by a driver the first time a DVI-I connector is made. */ int drm_mode_create_dvi_i_properties(struct drm_device *dev) { struct drm_property *dvi_i_selector; struct drm_property *dvi_i_subconnector; if (dev->mode_config.dvi_i_select_subconnector_property) return 0; dvi_i_selector = drm_property_create_enum(dev, 0, "select subconnector", drm_dvi_i_select_enum_list, ARRAY_SIZE(drm_dvi_i_select_enum_list)); dev->mode_config.dvi_i_select_subconnector_property = dvi_i_selector; dvi_i_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "subconnector", drm_dvi_i_subconnector_enum_list, ARRAY_SIZE(drm_dvi_i_subconnector_enum_list)); dev->mode_config.dvi_i_subconnector_property = dvi_i_subconnector; return 0; } EXPORT_SYMBOL(drm_mode_create_dvi_i_properties); /** * drm_create_tv_properties - create TV specific connector properties * @dev: DRM device * @num_modes: number of different TV formats (modes) supported * @modes: array of pointers to strings containing name of each format * * Called by a driver's TV initialization routine, this function creates * the TV specific connector properties for a given device. Caller is * responsible for allocating a list of format names and passing them to * this routine. */ int drm_mode_create_tv_properties(struct drm_device *dev, unsigned int num_modes, const char * const modes[]) { struct drm_property *tv_selector; struct drm_property *tv_subconnector; unsigned int i; if (dev->mode_config.tv_select_subconnector_property) return 0; /* * Basic connector properties */ tv_selector = drm_property_create_enum(dev, 0, "select subconnector", drm_tv_select_enum_list, ARRAY_SIZE(drm_tv_select_enum_list)); if (!tv_selector) goto nomem; dev->mode_config.tv_select_subconnector_property = tv_selector; tv_subconnector = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "subconnector", drm_tv_subconnector_enum_list, ARRAY_SIZE(drm_tv_subconnector_enum_list)); if (!tv_subconnector) goto nomem; dev->mode_config.tv_subconnector_property = tv_subconnector; /* * Other, TV specific properties: margins & TV modes. */ dev->mode_config.tv_left_margin_property = drm_property_create_range(dev, 0, "left margin", 0, 100); if (!dev->mode_config.tv_left_margin_property) goto nomem; dev->mode_config.tv_right_margin_property = drm_property_create_range(dev, 0, "right margin", 0, 100); if (!dev->mode_config.tv_right_margin_property) goto nomem; dev->mode_config.tv_top_margin_property = drm_property_create_range(dev, 0, "top margin", 0, 100); if (!dev->mode_config.tv_top_margin_property) goto nomem; dev->mode_config.tv_bottom_margin_property = drm_property_create_range(dev, 0, "bottom margin", 0, 100); if (!dev->mode_config.tv_bottom_margin_property) goto nomem; dev->mode_config.tv_mode_property = drm_property_create(dev, DRM_MODE_PROP_ENUM, "mode", num_modes); if (!dev->mode_config.tv_mode_property) goto nomem; for (i = 0; i < num_modes; i++) drm_property_add_enum(dev->mode_config.tv_mode_property, i, i, modes[i]); dev->mode_config.tv_brightness_property = drm_property_create_range(dev, 0, "brightness", 0, 100); if (!dev->mode_config.tv_brightness_property) goto nomem; dev->mode_config.tv_contrast_property = drm_property_create_range(dev, 0, "contrast", 0, 100); if (!dev->mode_config.tv_contrast_property) goto nomem; dev->mode_config.tv_flicker_reduction_property = drm_property_create_range(dev, 0, "flicker reduction", 0, 100); if (!dev->mode_config.tv_flicker_reduction_property) goto nomem; dev->mode_config.tv_overscan_property = drm_property_create_range(dev, 0, "overscan", 0, 100); if (!dev->mode_config.tv_overscan_property) goto nomem; dev->mode_config.tv_saturation_property = drm_property_create_range(dev, 0, "saturation", 0, 100); if (!dev->mode_config.tv_saturation_property) goto nomem; dev->mode_config.tv_hue_property = drm_property_create_range(dev, 0, "hue", 0, 100); if (!dev->mode_config.tv_hue_property) goto nomem; return 0; nomem: return -ENOMEM; } EXPORT_SYMBOL(drm_mode_create_tv_properties); /** * drm_mode_create_scaling_mode_property - create scaling mode property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_scaling_mode_property(struct drm_device *dev) { struct drm_property *scaling_mode; if (dev->mode_config.scaling_mode_property) return 0; scaling_mode = drm_property_create_enum(dev, 0, "scaling mode", drm_scaling_mode_enum_list, ARRAY_SIZE(drm_scaling_mode_enum_list)); dev->mode_config.scaling_mode_property = scaling_mode; return 0; } EXPORT_SYMBOL(drm_mode_create_scaling_mode_property); /** * drm_mode_create_aspect_ratio_property - create aspect ratio property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_create_aspect_ratio_property(struct drm_device *dev) { if (dev->mode_config.aspect_ratio_property) return 0; dev->mode_config.aspect_ratio_property = drm_property_create_enum(dev, 0, "aspect ratio", drm_aspect_ratio_enum_list, ARRAY_SIZE(drm_aspect_ratio_enum_list)); if (dev->mode_config.aspect_ratio_property == NULL) return -ENOMEM; return 0; } EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); /** * drm_mode_create_dirty_property - create dirty property * @dev: DRM device * * Called by a driver the first time it's needed, must be attached to desired * connectors. */ int drm_mode_create_dirty_info_property(struct drm_device *dev) { struct drm_property *dirty_info; if (dev->mode_config.dirty_info_property) return 0; dirty_info = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE, "dirty", drm_dirty_info_enum_list, ARRAY_SIZE(drm_dirty_info_enum_list)); dev->mode_config.dirty_info_property = dirty_info; return 0; } EXPORT_SYMBOL(drm_mode_create_dirty_info_property); /** * drm_mode_create_suggested_offset_properties - create suggests offset properties * @dev: DRM device * * Create the the suggested x/y offset property for connectors. */ int drm_mode_create_suggested_offset_properties(struct drm_device *dev) { if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property) return 0; dev->mode_config.suggested_x_property = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff); dev->mode_config.suggested_y_property = drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff); if (dev->mode_config.suggested_x_property == NULL || dev->mode_config.suggested_y_property == NULL) return -ENOMEM; return 0; } EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties); /** * drm_mode_getresources - get graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a set of configuration description structures and return * them to the user, including CRTC, connector and framebuffer configuration. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; struct list_head *lh; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; int ret = 0; int connector_count = 0; int crtc_count = 0; int fb_count = 0; int encoder_count = 0; int copied = 0; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&file_priv->fbs_lock); /* * For the non-control nodes we need to limit the list of resources * by IDs in the group list for this node */ list_for_each(lh, &file_priv->fbs) fb_count++; /* handle this in 4 parts */ /* FBs */ if (card_res->count_fbs >= fb_count) { copied = 0; fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; list_for_each_entry(fb, &file_priv->fbs, filp_head) { if (put_user(fb->base.id, fb_id + copied)) { mutex_unlock(&file_priv->fbs_lock); return -EFAULT; } copied++; } } card_res->count_fbs = fb_count; mutex_unlock(&file_priv->fbs_lock); /* mode_config.mutex protects the connector list against e.g. DP MST * connector hot-adding. CRTC/Plane lists are invariant. */ mutex_lock(&dev->mode_config.mutex); drm_for_each_crtc(crtc, dev) crtc_count++; drm_for_each_connector(connector, dev) connector_count++; drm_for_each_encoder(encoder, dev) encoder_count++; card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; /* CRTCs */ if (card_res->count_crtcs >= crtc_count) { copied = 0; crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; drm_for_each_crtc(crtc, dev) { DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); if (put_user(crtc->base.id, crtc_id + copied)) { ret = -EFAULT; goto out; } copied++; } } card_res->count_crtcs = crtc_count; /* Encoders */ if (card_res->count_encoders >= encoder_count) { copied = 0; encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; drm_for_each_encoder(encoder, dev) { DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id, encoder->name); if (put_user(encoder->base.id, encoder_id + copied)) { ret = -EFAULT; goto out; } copied++; } } card_res->count_encoders = encoder_count; /* Connectors */ if (card_res->count_connectors >= connector_count) { copied = 0; connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; drm_for_each_connector(connector, dev) { DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); if (put_user(connector->base.id, connector_id + copied)) { ret = -EFAULT; goto out; } copied++; } } card_res->count_connectors = connector_count; DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs, card_res->count_connectors, card_res->count_encoders); out: mutex_unlock(&dev->mode_config.mutex); return ret; } /** * drm_mode_getcrtc - get CRTC configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a CRTC configuration structure to return to the user. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc *crtc_resp = data; struct drm_crtc *crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; crtc = drm_crtc_find(dev, crtc_resp->crtc_id); if (!crtc) return -ENOENT; drm_modeset_lock_crtc(crtc, crtc->primary); crtc_resp->gamma_size = crtc->gamma_size; if (crtc->primary->fb) crtc_resp->fb_id = crtc->primary->fb->base.id; else crtc_resp->fb_id = 0; if (crtc->state) { crtc_resp->x = crtc->primary->state->src_x >> 16; crtc_resp->y = crtc->primary->state->src_y >> 16; if (crtc->state->enable) { drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->state->mode); crtc_resp->mode_valid = 1; } else { crtc_resp->mode_valid = 0; } } else { crtc_resp->x = crtc->x; crtc_resp->y = crtc->y; if (crtc->enabled) { drm_mode_convert_to_umode(&crtc_resp->mode, &crtc->mode); crtc_resp->mode_valid = 1; } else { crtc_resp->mode_valid = 0; } } drm_modeset_unlock_crtc(crtc); return 0; } static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, const struct drm_file *file_priv) { /* * If user-space hasn't configured the driver to expose the stereo 3D * modes, don't expose them. */ if (!file_priv->stereo_allowed && drm_mode_is_stereo(mode)) return false; return true; } static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector) { /* For atomic drivers only state objects are synchronously updated and * protected by modeset locks, so check those first. */ if (connector->state) return connector->state->best_encoder; return connector->encoder; } /* helper for getconnector and getproperties ioctls */ static int get_properties(struct drm_mode_object *obj, bool atomic, uint32_t __user *prop_ptr, uint64_t __user *prop_values, uint32_t *arg_count_props) { int props_count; int i, ret, copied; props_count = obj->properties->count; if (!atomic) props_count -= obj->properties->atomic_count; if ((*arg_count_props >= props_count) && props_count) { for (i = 0, copied = 0; copied < props_count; i++) { struct drm_property *prop = obj->properties->properties[i]; uint64_t val; if ((prop->flags & DRM_MODE_PROP_ATOMIC) && !atomic) continue; ret = drm_object_property_get_value(obj, prop, &val); if (ret) return ret; if (put_user(prop->base.id, prop_ptr + copied)) return -EFAULT; if (put_user(val, prop_values + copied)) return -EFAULT; copied++; } } *arg_count_props = props_count; return 0; } /** * drm_mode_getconnector - get connector configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a connector configuration structure to return to the user. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getconnector(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_connector *out_resp = data; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_display_mode *mode; int mode_count = 0; int encoders_count = 0; int ret = 0; int copied = 0; int i; struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *encoder_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); mutex_lock(&dev->mode_config.mutex); connector = drm_connector_find(dev, out_resp->connector_id); if (!connector) { ret = -ENOENT; goto out_unlock; } for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) if (connector->encoder_ids[i] != 0) encoders_count++; if (out_resp->count_modes == 0) { connector->funcs->fill_modes(connector, dev->mode_config.max_width, dev->mode_config.max_height); } /* delayed so we get modes regardless of pre-fill_modes state */ list_for_each_entry(mode, &connector->modes, head) if (drm_mode_expose_to_userspace(mode, file_priv)) mode_count++; out_resp->connector_id = connector->base.id; out_resp->connector_type = connector->connector_type; out_resp->connector_type_id = connector->connector_type_id; out_resp->mm_width = connector->display_info.width_mm; out_resp->mm_height = connector->display_info.height_mm; out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); encoder = drm_connector_get_encoder(connector); if (encoder) out_resp->encoder_id = encoder->base.id; else out_resp->encoder_id = 0; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if ((out_resp->count_modes >= mode_count) && mode_count) { copied = 0; mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr; list_for_each_entry(mode, &connector->modes, head) { if (!drm_mode_expose_to_userspace(mode, file_priv)) continue; drm_mode_convert_to_umode(&u_mode, mode); if (copy_to_user(mode_ptr + copied, &u_mode, sizeof(u_mode))) { ret = -EFAULT; goto out; } copied++; } } out_resp->count_modes = mode_count; ret = get_properties(&connector->base, file_priv->atomic, (uint32_t __user *)(unsigned long)(out_resp->props_ptr), (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), &out_resp->count_props); if (ret) goto out; if ((out_resp->count_encoders >= encoders_count) && encoders_count) { copied = 0; encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] != 0) { if (put_user(connector->encoder_ids[i], encoder_ptr + copied)) { ret = -EFAULT; goto out; } copied++; } } } out_resp->count_encoders = encoders_count; out: drm_modeset_unlock(&dev->mode_config.connection_mutex); out_unlock: mutex_unlock(&dev->mode_config.mutex); return ret; } static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) { struct drm_connector *connector; struct drm_device *dev = encoder->dev; bool uses_atomic = false; /* For atomic drivers only state objects are synchronously updated and * protected by modeset locks, so check those first. */ drm_for_each_connector(connector, dev) { if (!connector->state) continue; uses_atomic = true; if (connector->state->best_encoder != encoder) continue; return connector->state->crtc; } /* Don't return stale data (e.g. pending async disable). */ if (uses_atomic) return NULL; return encoder->crtc; } /** * drm_mode_getencoder - get encoder configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Construct a encoder configuration structure to return to the user. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getencoder(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_encoder *enc_resp = data; struct drm_encoder *encoder; struct drm_crtc *crtc; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; encoder = drm_encoder_find(dev, enc_resp->encoder_id); if (!encoder) return -ENOENT; drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); crtc = drm_encoder_get_crtc(encoder); if (crtc) enc_resp->crtc_id = crtc->base.id; else enc_resp->crtc_id = 0; drm_modeset_unlock(&dev->mode_config.connection_mutex); enc_resp->encoder_type = encoder->encoder_type; enc_resp->encoder_id = encoder->base.id; enc_resp->possible_crtcs = encoder->possible_crtcs; enc_resp->possible_clones = encoder->possible_clones; return 0; } /** * drm_mode_getplane_res - enumerate all plane resources * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Construct a list of plane ids to return to the user. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane_res *plane_resp = data; struct drm_mode_config *config; struct drm_plane *plane; uint32_t __user *plane_ptr; int copied = 0; unsigned num_planes; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; config = &dev->mode_config; if (file_priv->universal_planes) num_planes = config->num_total_plane; else num_planes = config->num_overlay_plane; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (num_planes && (plane_resp->count_planes >= num_planes)) { plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr; /* Plane lists are invariant, no locking needed. */ drm_for_each_plane(plane, dev) { /* * Unless userspace set the 'universal planes' * capability bit, only advertise overlays. */ if (plane->type != DRM_PLANE_TYPE_OVERLAY && !file_priv->universal_planes) continue; if (put_user(plane->base.id, plane_ptr + copied)) return -EFAULT; copied++; } } plane_resp->count_planes = num_planes; return 0; } /** * drm_mode_getplane - get plane configuration * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Construct a plane configuration structure to return to the user. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_plane *plane_resp = data; struct drm_plane *plane; uint32_t __user *format_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; plane = drm_plane_find(dev, plane_resp->plane_id); if (!plane) return -ENOENT; drm_modeset_lock(&plane->mutex, NULL); if (plane->crtc) plane_resp->crtc_id = plane->crtc->base.id; else plane_resp->crtc_id = 0; if (plane->fb) plane_resp->fb_id = plane->fb->base.id; else plane_resp->fb_id = 0; drm_modeset_unlock(&plane->mutex); plane_resp->plane_id = plane->base.id; plane_resp->possible_crtcs = plane->possible_crtcs; plane_resp->gamma_size = 0; /* * This ioctl is called twice, once to determine how much space is * needed, and the 2nd time to fill it. */ if (plane->format_count && (plane_resp->count_format_types >= plane->format_count)) { format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr; if (copy_to_user(format_ptr, plane->format_types, sizeof(uint32_t) * plane->format_count)) { return -EFAULT; } } plane_resp->count_format_types = plane->format_count; return 0; } /** * drm_plane_check_pixel_format - Check if the plane supports the pixel format * @plane: plane to check for format support * @format: the pixel format * * Returns: * Zero of @plane has @format in its list of supported pixel formats, -EINVAL * otherwise. */ int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format) { unsigned int i; for (i = 0; i < plane->format_count; i++) { if (format == plane->format_types[i]) return 0; } return -EINVAL; } static int check_src_coords(uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, const struct drm_framebuffer *fb) { unsigned int fb_width, fb_height; fb_width = fb->width << 16; fb_height = fb->height << 16; /* Make sure source coordinates are inside the fb. */ if (src_w > fb_width || src_x > fb_width - src_w || src_h > fb_height || src_y > fb_height - src_h) { DRM_DEBUG_KMS("Invalid source coordinates " "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n", src_w >> 16, ((src_w & 0xffff) * 15625) >> 10, src_h >> 16, ((src_h & 0xffff) * 15625) >> 10, src_x >> 16, ((src_x & 0xffff) * 15625) >> 10, src_y >> 16, ((src_y & 0xffff) * 15625) >> 10); return -ENOSPC; } return 0; } /* * setplane_internal - setplane handler for internal callers * * Note that we assume an extra reference has already been taken on fb. If the * update fails, this reference will be dropped before return; if it succeeds, * the previous framebuffer (if any) will be unreferenced instead. * * src_{x,y,w,h} are provided in 16.16 fixed point format */ static int __setplane_internal(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int32_t crtc_x, int32_t crtc_y, uint32_t crtc_w, uint32_t crtc_h, /* src_{x,y,w,h} values are 16.16 fixed point */ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { int ret = 0; /* No fb means shut it down */ if (!fb) { plane->old_fb = plane->fb; ret = plane->funcs->disable_plane(plane); if (!ret) { plane->crtc = NULL; plane->fb = NULL; } else { plane->old_fb = NULL; } goto out; } /* Check whether this plane is usable on this CRTC */ if (!(plane->possible_crtcs & drm_crtc_mask(crtc))) { DRM_DEBUG_KMS("Invalid crtc for plane\n"); ret = -EINVAL; goto out; } /* Check whether this plane supports the fb pixel format. */ ret = drm_plane_check_pixel_format(plane, fb->pixel_format); if (ret) { DRM_DEBUG_KMS("Invalid pixel format %s\n", drm_get_format_name(fb->pixel_format)); goto out; } /* Give drivers some help against integer overflows */ if (crtc_w > INT_MAX || crtc_x > INT_MAX - (int32_t) crtc_w || crtc_h > INT_MAX || crtc_y > INT_MAX - (int32_t) crtc_h) { DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n", crtc_w, crtc_h, crtc_x, crtc_y); ret = -ERANGE; goto out; } ret = check_src_coords(src_x, src_y, src_w, src_h, fb); if (ret) goto out; plane->old_fb = plane->fb; ret = plane->funcs->update_plane(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); if (!ret) { plane->crtc = crtc; plane->fb = fb; fb = NULL; } else { plane->old_fb = NULL; } out: if (fb) drm_framebuffer_unreference(fb); if (plane->old_fb) drm_framebuffer_unreference(plane->old_fb); plane->old_fb = NULL; return ret; } static int setplane_internal(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, int32_t crtc_x, int32_t crtc_y, uint32_t crtc_w, uint32_t crtc_h, /* src_{x,y,w,h} values are 16.16 fixed point */ uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h) { int ret; drm_modeset_lock_all(plane->dev); ret = __setplane_internal(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); drm_modeset_unlock_all(plane->dev); return ret; } /** * drm_mode_setplane - configure a plane's configuration * @dev: DRM device * @data: ioctl data* * @file_priv: DRM file info * * Set plane configuration, including placement, fb, scaling, and other factors. * Or pass a NULL fb to disable (planes may be disabled without providing a * valid crtc). * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_setplane(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_set_plane *plane_req = data; struct drm_plane *plane; struct drm_crtc *crtc = NULL; struct drm_framebuffer *fb = NULL; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; /* * First, find the plane, crtc, and fb objects. If not available, * we don't bother to call the driver. */ plane = drm_plane_find(dev, plane_req->plane_id); if (!plane) { DRM_DEBUG_KMS("Unknown plane ID %d\n", plane_req->plane_id); return -ENOENT; } if (plane_req->fb_id) { fb = drm_framebuffer_lookup(dev, plane_req->fb_id); if (!fb) { DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", plane_req->fb_id); return -ENOENT; } crtc = drm_crtc_find(dev, plane_req->crtc_id); if (!crtc) { DRM_DEBUG_KMS("Unknown crtc ID %d\n", plane_req->crtc_id); return -ENOENT; } } /* * setplane_internal will take care of deref'ing either the old or new * framebuffer depending on success. */ return setplane_internal(plane, crtc, fb, plane_req->crtc_x, plane_req->crtc_y, plane_req->crtc_w, plane_req->crtc_h, plane_req->src_x, plane_req->src_y, plane_req->src_w, plane_req->src_h); } /** * drm_mode_set_config_internal - helper to call ->set_config * @set: modeset config to set * * This is a little helper to wrap internal calls to the ->set_config driver * interface. The only thing it adds is correct refcounting dance. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_set_config_internal(struct drm_mode_set *set) { struct drm_crtc *crtc = set->crtc; struct drm_framebuffer *fb; struct drm_crtc *tmp; int ret; /* * NOTE: ->set_config can also disable other crtcs (if we steal all * connectors from it), hence we need to refcount the fbs across all * crtcs. Atomic modeset will have saner semantics ... */ drm_for_each_crtc(tmp, crtc->dev) tmp->primary->old_fb = tmp->primary->fb; fb = set->fb; ret = crtc->funcs->set_config(set); if (ret == 0) { crtc->primary->crtc = crtc; crtc->primary->fb = fb; } drm_for_each_crtc(tmp, crtc->dev) { if (tmp->primary->fb) drm_framebuffer_reference(tmp->primary->fb); if (tmp->primary->old_fb) drm_framebuffer_unreference(tmp->primary->old_fb); tmp->primary->old_fb = NULL; } return ret; } EXPORT_SYMBOL(drm_mode_set_config_internal); /** * drm_crtc_get_hv_timing - Fetches hdisplay/vdisplay for given mode * @mode: mode to query * @hdisplay: hdisplay value to fill in * @vdisplay: vdisplay value to fill in * * The vdisplay value will be doubled if the specified mode is a stereo mode of * the appropriate layout. */ void drm_crtc_get_hv_timing(const struct drm_display_mode *mode, int *hdisplay, int *vdisplay) { struct drm_display_mode adjusted; drm_mode_copy(&adjusted, mode); drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY); *hdisplay = adjusted.crtc_hdisplay; *vdisplay = adjusted.crtc_vdisplay; } EXPORT_SYMBOL(drm_crtc_get_hv_timing); /** * drm_crtc_check_viewport - Checks that a framebuffer is big enough for the * CRTC viewport * @crtc: CRTC that framebuffer will be displayed on * @x: x panning * @y: y panning * @mode: mode that framebuffer will be displayed under * @fb: framebuffer to check size of */ int drm_crtc_check_viewport(const struct drm_crtc *crtc, int x, int y, const struct drm_display_mode *mode, const struct drm_framebuffer *fb) { int hdisplay, vdisplay; drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); if (crtc->state && crtc->primary->state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) swap(hdisplay, vdisplay); return check_src_coords(x << 16, y << 16, hdisplay << 16, vdisplay << 16, fb); } EXPORT_SYMBOL(drm_crtc_check_viewport); /** * drm_mode_setcrtc - set CRTC configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Build a new CRTC configuration based on user request. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_setcrtc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_mode_crtc *crtc_req = data; struct drm_crtc *crtc; struct drm_connector **connector_set = NULL, *connector; struct drm_framebuffer *fb = NULL; struct drm_display_mode *mode = NULL; struct drm_mode_set set; uint32_t __user *set_connectors_ptr; int ret; int i; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; /* * Universal plane src offsets are only 16.16, prevent havoc for * drivers using universal plane code internally. */ if (crtc_req->x & 0xffff0000 || crtc_req->y & 0xffff0000) return -ERANGE; drm_modeset_lock_all(dev); crtc = drm_crtc_find(dev, crtc_req->crtc_id); if (!crtc) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); ret = -ENOENT; goto out; } DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); if (crtc_req->mode_valid) { /* If we have a mode we need a framebuffer. */ /* If we pass -1, set the mode with the currently bound fb */ if (crtc_req->fb_id == -1) { if (!crtc->primary->fb) { DRM_DEBUG_KMS("CRTC doesn't have current FB\n"); ret = -EINVAL; goto out; } fb = crtc->primary->fb; /* Make refcounting symmetric with the lookup path. */ drm_framebuffer_reference(fb); } else { fb = drm_framebuffer_lookup(dev, crtc_req->fb_id); if (!fb) { DRM_DEBUG_KMS("Unknown FB ID%d\n", crtc_req->fb_id); ret = -ENOENT; goto out; } } mode = drm_mode_create(dev); if (!mode) { ret = -ENOMEM; goto out; } ret = drm_mode_convert_umode(mode, &crtc_req->mode); if (ret) { DRM_DEBUG_KMS("Invalid mode\n"); goto out; } drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); /* * Check whether the primary plane supports the fb pixel format. * Drivers not implementing the universal planes API use a * default formats list provided by the DRM core which doesn't * match real hardware capabilities. Skip the check in that * case. */ if (!crtc->primary->format_default) { ret = drm_plane_check_pixel_format(crtc->primary, fb->pixel_format); if (ret) { DRM_DEBUG_KMS("Invalid pixel format %s\n", drm_get_format_name(fb->pixel_format)); goto out; } } ret = drm_crtc_check_viewport(crtc, crtc_req->x, crtc_req->y, mode, fb); if (ret) goto out; } if (crtc_req->count_connectors == 0 && mode) { DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0 && (!mode || !fb)) { DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", crtc_req->count_connectors); ret = -EINVAL; goto out; } if (crtc_req->count_connectors > 0) { u32 out_id; /* Avoid unbounded kernel memory allocation */ if (crtc_req->count_connectors > config->num_connector) { ret = -EINVAL; goto out; } connector_set = kmalloc_array(crtc_req->count_connectors, sizeof(struct drm_connector *), GFP_KERNEL); if (!connector_set) { ret = -ENOMEM; goto out; } for (i = 0; i < crtc_req->count_connectors; i++) { set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr; if (get_user(out_id, &set_connectors_ptr[i])) { ret = -EFAULT; goto out; } connector = drm_connector_find(dev, out_id); if (!connector) { DRM_DEBUG_KMS("Connector id %d unknown\n", out_id); ret = -ENOENT; goto out; } DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); connector_set[i] = connector; } } set.crtc = crtc; set.x = crtc_req->x; set.y = crtc_req->y; set.mode = mode; set.connectors = connector_set; set.num_connectors = crtc_req->count_connectors; set.fb = fb; ret = drm_mode_set_config_internal(&set); out: if (fb) drm_framebuffer_unreference(fb); kfree(connector_set); drm_mode_destroy(dev, mode); drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_cursor_universal - translate legacy cursor ioctl call into a * universal plane handler call * @crtc: crtc to update cursor for * @req: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Legacy cursor ioctl's work directly with driver buffer handles. To * translate legacy ioctl calls into universal plane handler calls, we need to * wrap the native buffer handle in a drm_framebuffer. * * Note that we assume any handle passed to the legacy ioctls was a 32-bit ARGB * buffer with a pitch of 4*width; the universal plane interface should be used * directly in cases where the hardware can support other buffer settings and * userspace wants to make use of these capabilities. * * Returns: * Zero on success, negative errno on failure. */ static int drm_mode_cursor_universal(struct drm_crtc *crtc, struct drm_mode_cursor2 *req, struct drm_file *file_priv) { struct drm_device *dev = crtc->dev; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 fbreq = { .width = req->width, .height = req->height, .pixel_format = DRM_FORMAT_ARGB8888, .pitches = { req->width * 4 }, .handles = { req->handle }, }; int32_t crtc_x, crtc_y; uint32_t crtc_w = 0, crtc_h = 0; uint32_t src_w = 0, src_h = 0; int ret = 0; BUG_ON(!crtc->cursor); WARN_ON(crtc->cursor->crtc != crtc && crtc->cursor->crtc != NULL); /* * Obtain fb we'll be using (either new or existing) and take an extra * reference to it if fb != null. setplane will take care of dropping * the reference if the plane update fails. */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { fb = internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } } else { fb = NULL; } } else { fb = crtc->cursor->fb; if (fb) drm_framebuffer_reference(fb); } if (req->flags & DRM_MODE_CURSOR_MOVE) { crtc_x = req->x; crtc_y = req->y; } else { crtc_x = crtc->cursor_x; crtc_y = crtc->cursor_y; } if (fb) { crtc_w = fb->width; crtc_h = fb->height; src_w = fb->width << 16; src_h = fb->height << 16; } /* * setplane_internal will take care of deref'ing either the old or new * framebuffer depending on success. */ ret = __setplane_internal(crtc->cursor, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, 0, 0, src_w, src_h); /* Update successful; save new cursor position, if necessary */ if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) { crtc->cursor_x = req->x; crtc->cursor_y = req->y; } return ret; } static int drm_mode_cursor_common(struct drm_device *dev, struct drm_mode_cursor2 *req, struct drm_file *file_priv) { struct drm_crtc *crtc; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; crtc = drm_crtc_find(dev, req->crtc_id); if (!crtc) { DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); return -ENOENT; } /* * If this crtc has a universal cursor plane, call that plane's update * handler rather than using legacy cursor handlers. */ drm_modeset_lock_crtc(crtc, crtc->cursor); if (crtc->cursor) { ret = drm_mode_cursor_universal(crtc, req, file_priv); goto out; } if (req->flags & DRM_MODE_CURSOR_BO) { if (!crtc->funcs->cursor_set && !crtc->funcs->cursor_set2) { ret = -ENXIO; goto out; } /* Turns off the cursor if handle is 0 */ if (crtc->funcs->cursor_set2) ret = crtc->funcs->cursor_set2(crtc, file_priv, req->handle, req->width, req->height, req->hot_x, req->hot_y); else ret = crtc->funcs->cursor_set(crtc, file_priv, req->handle, req->width, req->height); } if (req->flags & DRM_MODE_CURSOR_MOVE) { if (crtc->funcs->cursor_move) { ret = crtc->funcs->cursor_move(crtc, req->x, req->y); } else { ret = -EFAULT; goto out; } } out: drm_modeset_unlock_crtc(crtc); return ret; } /** * drm_mode_cursor_ioctl - set CRTC's cursor configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Set the cursor configuration based on user request. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_cursor_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor *req = data; struct drm_mode_cursor2 new_req; memcpy(&new_req, req, sizeof(struct drm_mode_cursor)); new_req.hot_x = new_req.hot_y = 0; return drm_mode_cursor_common(dev, &new_req, file_priv); } /** * drm_mode_cursor2_ioctl - set CRTC's cursor configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Set the cursor configuration based on user request. This implements the 2nd * version of the cursor ioctl, which allows userspace to additionally specify * the hotspot of the pointer. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_cursor2_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_cursor2 *req = data; return drm_mode_cursor_common(dev, req, file_priv); } /** * drm_mode_legacy_fb_format - compute drm fourcc code from legacy description * @bpp: bits per pixels * @depth: bit depth per pixel * * Computes a drm fourcc pixel format code for the given @bpp/@depth values. * Useful in fbdev emulation code, since that deals in those values. */ uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth) { uint32_t fmt; switch (bpp) { case 8: fmt = DRM_FORMAT_C8; break; case 16: if (depth == 15) fmt = DRM_FORMAT_XRGB1555; else fmt = DRM_FORMAT_RGB565; break; case 24: fmt = DRM_FORMAT_RGB888; break; case 32: if (depth == 24) fmt = DRM_FORMAT_XRGB8888; else if (depth == 30) fmt = DRM_FORMAT_XRGB2101010; else fmt = DRM_FORMAT_ARGB8888; break; default: DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n"); fmt = DRM_FORMAT_XRGB8888; break; } return fmt; } EXPORT_SYMBOL(drm_mode_legacy_fb_format); /** * drm_mode_addfb - add an FB to the graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Add a new FB to the specified CRTC, given a user request. This is the * original addfb ioctl which only supported RGB formats. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_addfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *or = data; struct drm_mode_fb_cmd2 r = {}; int ret; /* convert to new format and call new ioctl */ r.fb_id = or->fb_id; r.width = or->width; r.height = or->height; r.pitches[0] = or->pitch; r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); r.handles[0] = or->handle; ret = drm_mode_addfb2(dev, &r, file_priv); if (ret) return ret; or->fb_id = r.fb_id; return 0; } static int format_check(const struct drm_mode_fb_cmd2 *r) { uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN; switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: case DRM_FORMAT_XRGB4444: case DRM_FORMAT_XBGR4444: case DRM_FORMAT_RGBX4444: case DRM_FORMAT_BGRX4444: case DRM_FORMAT_ARGB4444: case DRM_FORMAT_ABGR4444: case DRM_FORMAT_RGBA4444: case DRM_FORMAT_BGRA4444: case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XBGR1555: case DRM_FORMAT_RGBX5551: case DRM_FORMAT_BGRX5551: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: case DRM_FORMAT_BGRA5551: case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: case DRM_FORMAT_RGB888: case DRM_FORMAT_BGR888: case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_RGBX8888: case DRM_FORMAT_BGRX8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_RGBA8888: case DRM_FORMAT_BGRA8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_BGRA1010102: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_AYUV: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 0; default: DRM_DEBUG_KMS("invalid pixel format %s\n", drm_get_format_name(r->pixel_format)); return -EINVAL; } } static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) { int ret, hsub, vsub, num_planes, i; ret = format_check(r); if (ret) { DRM_DEBUG_KMS("bad framebuffer format %s\n", drm_get_format_name(r->pixel_format)); return ret; } hsub = drm_format_horz_chroma_subsampling(r->pixel_format); vsub = drm_format_vert_chroma_subsampling(r->pixel_format); num_planes = drm_format_num_planes(r->pixel_format); if (r->width == 0 || r->width % hsub) { DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width); return -EINVAL; } if (r->height == 0 || r->height % vsub) { DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height); return -EINVAL; } for (i = 0; i < num_planes; i++) { unsigned int width = r->width / (i != 0 ? hsub : 1); unsigned int height = r->height / (i != 0 ? vsub : 1); unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i); if (!r->handles[i]) { DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i); return -EINVAL; } if ((uint64_t) width * cpp > UINT_MAX) return -ERANGE; if ((uint64_t) height * r->pitches[i] + r->offsets[i] > UINT_MAX) return -ERANGE; if (r->pitches[i] < width * cpp) { DRM_DEBUG_KMS("bad pitch %u for plane %d\n", r->pitches[i], i); return -EINVAL; } if (r->modifier[i] && !(r->flags & DRM_MODE_FB_MODIFIERS)) { DRM_DEBUG_KMS("bad fb modifier %llu for plane %d\n", r->modifier[i], i); return -EINVAL; } /* modifier specific checks: */ switch (r->modifier[i]) { case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE: /* NOTE: the pitch restriction may be lifted later if it turns * out that no hw has this restriction: */ if (r->pixel_format != DRM_FORMAT_NV12 || width % 128 || height % 32 || r->pitches[i] % 128) { DRM_DEBUG_KMS("bad modifier data for plane %d\n", i); return -EINVAL; } break; default: break; } } for (i = num_planes; i < 4; i++) { if (r->modifier[i]) { DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i); return -EINVAL; } /* Pre-FB_MODIFIERS userspace didn't clear the structs properly. */ if (!(r->flags & DRM_MODE_FB_MODIFIERS)) continue; if (r->handles[i]) { DRM_DEBUG_KMS("buffer object handle for unused plane %d\n", i); return -EINVAL; } if (r->pitches[i]) { DRM_DEBUG_KMS("non-zero pitch for unused plane %d\n", i); return -EINVAL; } if (r->offsets[i]) { DRM_DEBUG_KMS("non-zero offset for unused plane %d\n", i); return -EINVAL; } } return 0; } static struct drm_framebuffer * internal_framebuffer_create(struct drm_device *dev, const struct drm_mode_fb_cmd2 *r, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; int ret; if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) { DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags); return ERR_PTR(-EINVAL); } if ((config->min_width > r->width) || (r->width > config->max_width)) { DRM_DEBUG_KMS("bad framebuffer width %d, should be >= %d && <= %d\n", r->width, config->min_width, config->max_width); return ERR_PTR(-EINVAL); } if ((config->min_height > r->height) || (r->height > config->max_height)) { DRM_DEBUG_KMS("bad framebuffer height %d, should be >= %d && <= %d\n", r->height, config->min_height, config->max_height); return ERR_PTR(-EINVAL); } if (r->flags & DRM_MODE_FB_MODIFIERS && !dev->mode_config.allow_fb_modifiers) { DRM_DEBUG_KMS("driver does not support fb modifiers\n"); return ERR_PTR(-EINVAL); } ret = framebuffer_check(r); if (ret) return ERR_PTR(ret); fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); if (IS_ERR(fb)) { DRM_DEBUG_KMS("could not create framebuffer\n"); return fb; } return fb; } /** * drm_mode_addfb2 - add an FB to the graphics configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Add a new FB to the specified CRTC, given a user request with format. This is * the 2nd version of the addfb ioctl, which supports multi-planar framebuffers * and uses fourcc codes as pixel format specifiers. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; fb = internal_framebuffer_create(dev, r, file_priv); if (IS_ERR(fb)) return PTR_ERR(fb); /* Transfer ownership to the filp for reaping on close */ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); mutex_lock(&file_priv->fbs_lock); r->fb_id = fb->base.id; list_add(&fb->filp_head, &file_priv->fbs); mutex_unlock(&file_priv->fbs_lock); return 0; } /** * drm_mode_rmfb - remove an FB from the configuration * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Remove the FB specified by the user. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_rmfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_framebuffer *fb = NULL; struct drm_framebuffer *fbl = NULL; uint32_t *id = data; int found = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&file_priv->fbs_lock); mutex_lock(&dev->mode_config.fb_lock); fb = __drm_framebuffer_lookup(dev, *id); if (!fb) goto fail_lookup; list_for_each_entry(fbl, &file_priv->fbs, filp_head) if (fb == fbl) found = 1; if (!found) goto fail_lookup; list_del_init(&fb->filp_head); mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&file_priv->fbs_lock); drm_framebuffer_unreference(fb); return 0; fail_lookup: mutex_unlock(&dev->mode_config.fb_lock); mutex_unlock(&file_priv->fbs_lock); return -ENOENT; } /** * drm_mode_getfb - get FB info * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Lookup the FB given its ID and return info about it. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getfb(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_fb_cmd *r = data; struct drm_framebuffer *fb; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; fb = drm_framebuffer_lookup(dev, r->fb_id); if (!fb) return -ENOENT; r->height = fb->height; r->width = fb->width; r->depth = fb->depth; r->bpp = fb->bits_per_pixel; r->pitch = fb->pitches[0]; if (fb->funcs->create_handle) { if (file_priv->is_master || capable(CAP_SYS_ADMIN) || drm_is_control_client(file_priv)) { ret = fb->funcs->create_handle(fb, file_priv, &r->handle); } else { /* GET_FB() is an unprivileged ioctl so we must not * return a buffer-handle to non-master processes! For * backwards-compatibility reasons, we cannot make * GET_FB() privileged, so just return an invalid handle * for non-masters. */ r->handle = 0; ret = 0; } } else { ret = -ENODEV; } drm_framebuffer_unreference(fb); return ret; } /** * drm_mode_dirtyfb_ioctl - flush frontbuffer rendering on an FB * @dev: drm device for the ioctl * @data: data pointer for the ioctl * @file_priv: drm file for the ioctl call * * Lookup the FB and flush out the damaged area supplied by userspace as a clip * rectangle list. Generic userspace which does frontbuffer rendering must call * this ioctl to flush out the changes on manual-update display outputs, e.g. * usb display-link, mipi manual update panels or edp panel self refresh modes. * * Modesetting drivers which always update the frontbuffer do not need to * implement the corresponding ->dirty framebuffer callback. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_clip_rect __user *clips_ptr; struct drm_clip_rect *clips = NULL; struct drm_mode_fb_dirty_cmd *r = data; struct drm_framebuffer *fb; unsigned flags; int num_clips; int ret; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; fb = drm_framebuffer_lookup(dev, r->fb_id); if (!fb) return -ENOENT; num_clips = r->num_clips; clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; if (!num_clips != !clips_ptr) { ret = -EINVAL; goto out_err1; } flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags; /* If userspace annotates copy, clips must come in pairs */ if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) { ret = -EINVAL; goto out_err1; } if (num_clips && clips_ptr) { if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { ret = -EINVAL; goto out_err1; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (!clips) { ret = -ENOMEM; goto out_err1; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { ret = -EFAULT; goto out_err2; } } if (fb->funcs->dirty) { ret = fb->funcs->dirty(fb, file_priv, flags, r->color, clips, num_clips); } else { ret = -ENOSYS; } out_err2: kfree(clips); out_err1: drm_framebuffer_unreference(fb); return ret; } /** * drm_fb_release - remove and free the FBs on this file * @priv: drm file for the ioctl * * Destroy all the FBs associated with @filp. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ void drm_fb_release(struct drm_file *priv) { struct drm_framebuffer *fb, *tfb; /* * When the file gets released that means no one else can access the fb * list any more, so no need to grab fpriv->fbs_lock. And we need to * avoid upsetting lockdep since the universal cursor code adds a * framebuffer while holding mutex locks. * * Note that a real deadlock between fpriv->fbs_lock and the modeset * locks is impossible here since no one else but this function can get * at it any more. */ list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { list_del_init(&fb->filp_head); /* This drops the fpriv->fbs reference. */ drm_framebuffer_unreference(fb); } } /** * drm_property_create - create a new property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * @num_values: number of pre-defined values * * This creates a new generic drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * Note that the DRM core keeps a per-device list of properties and that, if * drm_mode_config_cleanup() is called, it will destroy all properties created * by the driver. * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create(struct drm_device *dev, int flags, const char *name, int num_values) { struct drm_property *property = NULL; int ret; property = kzalloc(sizeof(struct drm_property), GFP_KERNEL); if (!property) return NULL; property->dev = dev; if (num_values) { property->values = kcalloc(num_values, sizeof(uint64_t), GFP_KERNEL); if (!property->values) goto fail; } ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); if (ret) goto fail; property->flags = flags; property->num_values = num_values; INIT_LIST_HEAD(&property->enum_list); if (name) { strncpy(property->name, name, DRM_PROP_NAME_LEN); property->name[DRM_PROP_NAME_LEN-1] = '\0'; } list_add_tail(&property->head, &dev->mode_config.property_list); WARN_ON(!drm_property_type_valid(property)); return property; fail: kfree(property->values); kfree(property); return NULL; } EXPORT_SYMBOL(drm_property_create); /** * drm_property_create_enum - create a new enumeration property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * @props: enumeration lists with property values * @num_values: number of pre-defined values * * This creates a new generic drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * Userspace is only allowed to set one of the predefined values for enumeration * properties. * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create_enum(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, int num_values) { struct drm_property *property; int i, ret; flags |= DRM_MODE_PROP_ENUM; property = drm_property_create(dev, flags, name, num_values); if (!property) return NULL; for (i = 0; i < num_values; i++) { ret = drm_property_add_enum(property, i, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; } } return property; } EXPORT_SYMBOL(drm_property_create_enum); /** * drm_property_create_bitmask - create a new bitmask property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * @props: enumeration lists with property bitflags * @num_props: size of the @props array * @supported_bits: bitmask of all supported enumeration values * * This creates a new bitmask drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * Compared to plain enumeration properties userspace is allowed to set any * or'ed together combination of the predefined property bitflag values * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create_bitmask(struct drm_device *dev, int flags, const char *name, const struct drm_prop_enum_list *props, int num_props, uint64_t supported_bits) { struct drm_property *property; int i, ret, index = 0; int num_values = hweight64(supported_bits); flags |= DRM_MODE_PROP_BITMASK; property = drm_property_create(dev, flags, name, num_values); if (!property) return NULL; for (i = 0; i < num_props; i++) { if (!(supported_bits & (1ULL << props[i].type))) continue; if (WARN_ON(index >= num_values)) { drm_property_destroy(dev, property); return NULL; } ret = drm_property_add_enum(property, index++, props[i].type, props[i].name); if (ret) { drm_property_destroy(dev, property); return NULL; } } return property; } EXPORT_SYMBOL(drm_property_create_bitmask); static struct drm_property *property_create_range(struct drm_device *dev, int flags, const char *name, uint64_t min, uint64_t max) { struct drm_property *property; property = drm_property_create(dev, flags, name, 2); if (!property) return NULL; property->values[0] = min; property->values[1] = max; return property; } /** * drm_property_create_range - create a new unsigned ranged property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * @min: minimum value of the property * @max: maximum value of the property * * This creates a new generic drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * Userspace is allowed to set any unsigned integer value in the (min, max) * range inclusive. * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create_range(struct drm_device *dev, int flags, const char *name, uint64_t min, uint64_t max) { return property_create_range(dev, DRM_MODE_PROP_RANGE | flags, name, min, max); } EXPORT_SYMBOL(drm_property_create_range); /** * drm_property_create_signed_range - create a new signed ranged property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * @min: minimum value of the property * @max: maximum value of the property * * This creates a new generic drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * Userspace is allowed to set any signed integer value in the (min, max) * range inclusive. * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create_signed_range(struct drm_device *dev, int flags, const char *name, int64_t min, int64_t max) { return property_create_range(dev, DRM_MODE_PROP_SIGNED_RANGE | flags, name, I642U64(min), I642U64(max)); } EXPORT_SYMBOL(drm_property_create_signed_range); /** * drm_property_create_object - create a new object property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * @type: object type from DRM_MODE_OBJECT_* defines * * This creates a new generic drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * Userspace is only allowed to set this to any property value of the given * @type. Only useful for atomic properties, which is enforced. * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create_object(struct drm_device *dev, int flags, const char *name, uint32_t type) { struct drm_property *property; flags |= DRM_MODE_PROP_OBJECT; if (WARN_ON(!(flags & DRM_MODE_PROP_ATOMIC))) return NULL; property = drm_property_create(dev, flags, name, 1); if (!property) return NULL; property->values[0] = type; return property; } EXPORT_SYMBOL(drm_property_create_object); /** * drm_property_create_bool - create a new boolean property type * @dev: drm device * @flags: flags specifying the property type * @name: name of the property * * This creates a new generic drm property which can then be attached to a drm * object with drm_object_attach_property. The returned property object must be * freed with drm_property_destroy. * * This is implemented as a ranged property with only {0, 1} as valid values. * * Returns: * A pointer to the newly created property on success, NULL on failure. */ struct drm_property *drm_property_create_bool(struct drm_device *dev, int flags, const char *name) { return drm_property_create_range(dev, flags, name, 0, 1); } EXPORT_SYMBOL(drm_property_create_bool); /** * drm_property_add_enum - add a possible value to an enumeration property * @property: enumeration property to change * @index: index of the new enumeration * @value: value of the new enumeration * @name: symbolic name of the new enumeration * * This functions adds enumerations to a property. * * It's use is deprecated, drivers should use one of the more specific helpers * to directly create the property with all enumerations already attached. * * Returns: * Zero on success, error code on failure. */ int drm_property_add_enum(struct drm_property *property, int index, uint64_t value, const char *name) { struct drm_property_enum *prop_enum; if (!(drm_property_type_is(property, DRM_MODE_PROP_ENUM) || drm_property_type_is(property, DRM_MODE_PROP_BITMASK))) return -EINVAL; /* * Bitmask enum properties have the additional constraint of values * from 0 to 63 */ if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK) && (value > 63)) return -EINVAL; if (!list_empty(&property->enum_list)) { list_for_each_entry(prop_enum, &property->enum_list, head) { if (prop_enum->value == value) { strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; return 0; } } } prop_enum = kzalloc(sizeof(struct drm_property_enum), GFP_KERNEL); if (!prop_enum) return -ENOMEM; strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; prop_enum->value = value; property->values[index] = value; list_add_tail(&prop_enum->head, &property->enum_list); return 0; } EXPORT_SYMBOL(drm_property_add_enum); /** * drm_property_destroy - destroy a drm property * @dev: drm device * @property: property to destry * * This function frees a property including any attached resources like * enumeration values. */ void drm_property_destroy(struct drm_device *dev, struct drm_property *property) { struct drm_property_enum *prop_enum, *pt; list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) { list_del(&prop_enum->head); kfree(prop_enum); } if (property->num_values) kfree(property->values); drm_mode_object_put(dev, &property->base); list_del(&property->head); kfree(property); } EXPORT_SYMBOL(drm_property_destroy); /** * drm_object_attach_property - attach a property to a modeset object * @obj: drm modeset object * @property: property to attach * @init_val: initial value of the property * * This attaches the given property to the modeset object with the given initial * value. Currently this function cannot fail since the properties are stored in * a statically sized array. */ void drm_object_attach_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t init_val) { int count = obj->properties->count; if (count == DRM_OBJECT_MAX_PROPERTY) { WARN(1, "Failed to attach object property (type: 0x%x). Please " "increase DRM_OBJECT_MAX_PROPERTY by 1 for each time " "you see this message on the same object type.\n", obj->type); return; } obj->properties->properties[count] = property; obj->properties->values[count] = init_val; obj->properties->count++; if (property->flags & DRM_MODE_PROP_ATOMIC) obj->properties->atomic_count++; } EXPORT_SYMBOL(drm_object_attach_property); /** * drm_object_property_set_value - set the value of a property * @obj: drm mode object to set property value for * @property: property to set * @val: value the property should be set to * * This functions sets a given property on a given object. This function only * changes the software state of the property, it does not call into the * driver's ->set_property callback. * * Returns: * Zero on success, error code on failure. */ int drm_object_property_set_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t val) { int i; for (i = 0; i < obj->properties->count; i++) { if (obj->properties->properties[i] == property) { obj->properties->values[i] = val; return 0; } } return -EINVAL; } EXPORT_SYMBOL(drm_object_property_set_value); /** * drm_object_property_get_value - retrieve the value of a property * @obj: drm mode object to get property value from * @property: property to retrieve * @val: storage for the property value * * This function retrieves the softare state of the given property for the given * property. Since there is no driver callback to retrieve the current property * value this might be out of sync with the hardware, depending upon the driver * and property. * * Returns: * Zero on success, error code on failure. */ int drm_object_property_get_value(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val) { int i; /* read-only properties bypass atomic mechanism and still store * their value in obj->properties->values[].. mostly to avoid * having to deal w/ EDID and similar props in atomic paths: */ if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) && !(property->flags & DRM_MODE_PROP_IMMUTABLE)) return drm_atomic_get_property(obj, property, val); for (i = 0; i < obj->properties->count; i++) { if (obj->properties->properties[i] == property) { *val = obj->properties->values[i]; return 0; } } return -EINVAL; } EXPORT_SYMBOL(drm_object_property_get_value); /** * drm_mode_getproperty_ioctl - get the property metadata * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This function retrieves the metadata for a given property, like the different * possible values for an enum property or the limits for a range property. * * Blob properties are special * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getproperty_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_property *out_resp = data; struct drm_property *property; int enum_count = 0; int value_count = 0; int ret = 0, i; int copied; struct drm_property_enum *prop_enum; struct drm_mode_property_enum __user *enum_ptr; uint64_t __user *values_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); property = drm_property_find(dev, out_resp->prop_id); if (!property) { ret = -ENOENT; goto done; } if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { list_for_each_entry(prop_enum, &property->enum_list, head) enum_count++; } value_count = property->num_values; strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN); out_resp->name[DRM_PROP_NAME_LEN-1] = 0; out_resp->flags = property->flags; if ((out_resp->count_values >= value_count) && value_count) { values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr; for (i = 0; i < value_count; i++) { if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) { ret = -EFAULT; goto done; } } } out_resp->count_values = value_count; if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { copied = 0; enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; list_for_each_entry(prop_enum, &property->enum_list, head) { if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { ret = -EFAULT; goto done; } if (copy_to_user(&enum_ptr[copied].name, &prop_enum->name, DRM_PROP_NAME_LEN)) { ret = -EFAULT; goto done; } copied++; } } out_resp->count_enum_blobs = enum_count; } /* * NOTE: The idea seems to have been to use this to read all the blob * property values. But nothing ever added them to the corresponding * list, userspace always used the special-purpose get_blob ioctl to * read the value for a blob property. It also doesn't make a lot of * sense to return values here when everything else is just metadata for * the property itself. */ if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) out_resp->count_enum_blobs = 0; done: drm_modeset_unlock_all(dev); return ret; } /** * drm_property_create_blob - Create new blob property * * Creates a new blob property for a specified DRM device, optionally * copying data. * * @dev: DRM device to create property for * @length: Length to allocate for blob data * @data: If specified, copies data into blob * * Returns: * New blob property with a single reference on success, or an ERR_PTR * value on failure. */ struct drm_property_blob * drm_property_create_blob(struct drm_device *dev, size_t length, const void *data) { struct drm_property_blob *blob; int ret; if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) return ERR_PTR(-EINVAL); blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); if (!blob) return ERR_PTR(-ENOMEM); /* This must be explicitly initialised, so we can safely call list_del * on it in the removal handler, even if it isn't in a file list. */ INIT_LIST_HEAD(&blob->head_file); blob->length = length; blob->dev = dev; if (data) memcpy(blob->data, data, length); mutex_lock(&dev->mode_config.blob_lock); ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB); if (ret) { kfree(blob); mutex_unlock(&dev->mode_config.blob_lock); return ERR_PTR(-EINVAL); } kref_init(&blob->refcount); list_add_tail(&blob->head_global, &dev->mode_config.property_blob_list); mutex_unlock(&dev->mode_config.blob_lock); return blob; } EXPORT_SYMBOL(drm_property_create_blob); /** * drm_property_free_blob - Blob property destructor * * Internal free function for blob properties; must not be used directly. * * @kref: Reference */ static void drm_property_free_blob(struct kref *kref) { struct drm_property_blob *blob = container_of(kref, struct drm_property_blob, refcount); WARN_ON(!mutex_is_locked(&blob->dev->mode_config.blob_lock)); list_del(&blob->head_global); list_del(&blob->head_file); drm_mode_object_put(blob->dev, &blob->base); kfree(blob); } /** * drm_property_unreference_blob - Unreference a blob property * * Drop a reference on a blob property. May free the object. * * @blob: Pointer to blob property */ void drm_property_unreference_blob(struct drm_property_blob *blob) { struct drm_device *dev; if (!blob) return; dev = blob->dev; DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount)); if (kref_put_mutex(&blob->refcount, drm_property_free_blob, &dev->mode_config.blob_lock)) mutex_unlock(&dev->mode_config.blob_lock); else might_lock(&dev->mode_config.blob_lock); } EXPORT_SYMBOL(drm_property_unreference_blob); /** * drm_property_unreference_blob_locked - Unreference a blob property with blob_lock held * * Drop a reference on a blob property. May free the object. This must be * called with blob_lock held. * * @blob: Pointer to blob property */ static void drm_property_unreference_blob_locked(struct drm_property_blob *blob) { if (!blob) return; DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount)); kref_put(&blob->refcount, drm_property_free_blob); } /** * drm_property_destroy_user_blobs - destroy all blobs created by this client * @dev: DRM device * @file_priv: destroy all blobs owned by this file handle */ void drm_property_destroy_user_blobs(struct drm_device *dev, struct drm_file *file_priv) { struct drm_property_blob *blob, *bt; mutex_lock(&dev->mode_config.blob_lock); list_for_each_entry_safe(blob, bt, &file_priv->blobs, head_file) { list_del_init(&blob->head_file); drm_property_unreference_blob_locked(blob); } mutex_unlock(&dev->mode_config.blob_lock); } /** * drm_property_reference_blob - Take a reference on an existing property * * Take a new reference on an existing blob property. * * @blob: Pointer to blob property */ struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob) { DRM_DEBUG("%p: blob ID: %d (%d)\n", blob, blob->base.id, atomic_read(&blob->refcount.refcount)); kref_get(&blob->refcount); return blob; } EXPORT_SYMBOL(drm_property_reference_blob); /* * Like drm_property_lookup_blob, but does not return an additional reference. * Must be called with blob_lock held. */ static struct drm_property_blob *__drm_property_lookup_blob(struct drm_device *dev, uint32_t id) { struct drm_mode_object *obj = NULL; struct drm_property_blob *blob; WARN_ON(!mutex_is_locked(&dev->mode_config.blob_lock)); mutex_lock(&dev->mode_config.idr_mutex); obj = idr_find(&dev->mode_config.crtc_idr, id); if (!obj || (obj->type != DRM_MODE_OBJECT_BLOB) || (obj->id != id)) blob = NULL; else blob = obj_to_blob(obj); mutex_unlock(&dev->mode_config.idr_mutex); return blob; } /** * drm_property_lookup_blob - look up a blob property and take a reference * @dev: drm device * @id: id of the blob property * * If successful, this takes an additional reference to the blob property. * callers need to make sure to eventually unreference the returned property * again, using @drm_property_unreference_blob. */ struct drm_property_blob *drm_property_lookup_blob(struct drm_device *dev, uint32_t id) { struct drm_property_blob *blob; mutex_lock(&dev->mode_config.blob_lock); blob = __drm_property_lookup_blob(dev, id); if (blob) { if (!kref_get_unless_zero(&blob->refcount)) blob = NULL; } mutex_unlock(&dev->mode_config.blob_lock); return blob; } EXPORT_SYMBOL(drm_property_lookup_blob); /** * drm_property_replace_global_blob - atomically replace existing blob property * @dev: drm device * @replace: location of blob property pointer to be replaced * @length: length of data for new blob, or 0 for no data * @data: content for new blob, or NULL for no data * @obj_holds_id: optional object for property holding blob ID * @prop_holds_id: optional property holding blob ID * @return 0 on success or error on failure * * This function will atomically replace a global property in the blob list, * optionally updating a property which holds the ID of that property. It is * guaranteed to be atomic: no caller will be allowed to see intermediate * results, and either the entire operation will succeed and clean up the * previous property, or it will fail and the state will be unchanged. * * If length is 0 or data is NULL, no new blob will be created, and the holding * property, if specified, will be set to 0. * * Access to the replace pointer is assumed to be protected by the caller, e.g. * by holding the relevant modesetting object lock for its parent. * * For example, a drm_connector has a 'PATH' property, which contains the ID * of a blob property with the value of the MST path information. Calling this * function with replace pointing to the connector's path_blob_ptr, length and * data set for the new path information, obj_holds_id set to the connector's * base object, and prop_holds_id set to the path property name, will perform * a completely atomic update. The access to path_blob_ptr is protected by the * caller holding a lock on the connector. */ static int drm_property_replace_global_blob(struct drm_device *dev, struct drm_property_blob **replace, size_t length, const void *data, struct drm_mode_object *obj_holds_id, struct drm_property *prop_holds_id) { struct drm_property_blob *new_blob = NULL; struct drm_property_blob *old_blob = NULL; int ret; WARN_ON(replace == NULL); old_blob = *replace; if (length && data) { new_blob = drm_property_create_blob(dev, length, data); if (IS_ERR(new_blob)) return PTR_ERR(new_blob); } /* This does not need to be synchronised with blob_lock, as the * get_properties ioctl locks all modesetting objects, and * obj_holds_id must be locked before calling here, so we cannot * have its value out of sync with the list membership modified * below under blob_lock. */ if (obj_holds_id) { ret = drm_object_property_set_value(obj_holds_id, prop_holds_id, new_blob ? new_blob->base.id : 0); if (ret != 0) goto err_created; } drm_property_unreference_blob(old_blob); *replace = new_blob; return 0; err_created: drm_property_unreference_blob(new_blob); return ret; } /** * drm_mode_getblob_ioctl - get the contents of a blob property value * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This function retrieves the contents of a blob property. The value stored in * an object's blob property is just a normal modeset object id. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_getblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_get_blob *out_resp = data; struct drm_property_blob *blob; int ret = 0; void __user *blob_ptr; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); mutex_lock(&dev->mode_config.blob_lock); blob = __drm_property_lookup_blob(dev, out_resp->blob_id); if (!blob) { ret = -ENOENT; goto done; } if (out_resp->length == blob->length) { blob_ptr = (void __user *)(unsigned long)out_resp->data; if (copy_to_user(blob_ptr, blob->data, blob->length)) { ret = -EFAULT; goto done; } } out_resp->length = blob->length; done: mutex_unlock(&dev->mode_config.blob_lock); drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_createblob_ioctl - create a new blob property * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This function creates a new blob property with user-defined values. In order * to give us sensible validation and checking when creating, rather than at * every potential use, we also require a type to be provided upfront. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_createblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_blob *out_resp = data; struct drm_property_blob *blob; void __user *blob_ptr; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; blob = drm_property_create_blob(dev, out_resp->length, NULL); if (IS_ERR(blob)) return PTR_ERR(blob); blob_ptr = (void __user *)(unsigned long)out_resp->data; if (copy_from_user(blob->data, blob_ptr, out_resp->length)) { ret = -EFAULT; goto out_blob; } /* Dropping the lock between create_blob and our access here is safe * as only the same file_priv can remove the blob; at this point, it is * not associated with any file_priv. */ mutex_lock(&dev->mode_config.blob_lock); out_resp->blob_id = blob->base.id; list_add_tail(&blob->head_file, &file_priv->blobs); mutex_unlock(&dev->mode_config.blob_lock); return 0; out_blob: drm_property_unreference_blob(blob); return ret; } /** * drm_mode_destroyblob_ioctl - destroy a user blob property * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Destroy an existing user-defined blob property. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_destroyblob_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_blob *out_resp = data; struct drm_property_blob *blob = NULL, *bt; bool found = false; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&dev->mode_config.blob_lock); blob = __drm_property_lookup_blob(dev, out_resp->blob_id); if (!blob) { ret = -ENOENT; goto err; } /* Ensure the property was actually created by this user. */ list_for_each_entry(bt, &file_priv->blobs, head_file) { if (bt == blob) { found = true; break; } } if (!found) { ret = -EPERM; goto err; } /* We must drop head_file here, because we may not be the last * reference on the blob. */ list_del_init(&blob->head_file); drm_property_unreference_blob_locked(blob); mutex_unlock(&dev->mode_config.blob_lock); return 0; err: mutex_unlock(&dev->mode_config.blob_lock); return ret; } /** * drm_mode_connector_set_path_property - set tile property on connector * @connector: connector to set property on. * @path: path to use for property; must not be NULL. * * This creates a property to expose to userspace to specify a * connector path. This is mainly used for DisplayPort MST where * connectors have a topology and we want to allow userspace to give * them more meaningful names. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_connector_set_path_property(struct drm_connector *connector, const char *path) { struct drm_device *dev = connector->dev; int ret; ret = drm_property_replace_global_blob(dev, &connector->path_blob_ptr, strlen(path) + 1, path, &connector->base, dev->mode_config.path_property); return ret; } EXPORT_SYMBOL(drm_mode_connector_set_path_property); /** * drm_mode_connector_set_tile_property - set tile property on connector * @connector: connector to set property on. * * This looks up the tile information for a connector, and creates a * property for userspace to parse if it exists. The property is of * the form of 8 integers using ':' as a separator. * * Returns: * Zero on success, errno on failure. */ int drm_mode_connector_set_tile_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; char tile[256]; int ret; if (!connector->has_tile) { ret = drm_property_replace_global_blob(dev, &connector->tile_blob_ptr, 0, NULL, &connector->base, dev->mode_config.tile_property); return ret; } snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d", connector->tile_group->id, connector->tile_is_single_monitor, connector->num_h_tile, connector->num_v_tile, connector->tile_h_loc, connector->tile_v_loc, connector->tile_h_size, connector->tile_v_size); ret = drm_property_replace_global_blob(dev, &connector->tile_blob_ptr, strlen(tile) + 1, tile, &connector->base, dev->mode_config.tile_property); return ret; } EXPORT_SYMBOL(drm_mode_connector_set_tile_property); /** * drm_mode_connector_update_edid_property - update the edid property of a connector * @connector: drm connector * @edid: new value of the edid property * * This function creates a new blob modeset object and assigns its id to the * connector's edid property. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_connector_update_edid_property(struct drm_connector *connector, const struct edid *edid) { struct drm_device *dev = connector->dev; size_t size = 0; int ret; /* ignore requests to set edid when overridden */ if (connector->override_edid) return 0; if (edid) size = EDID_LENGTH * (1 + edid->extensions); ret = drm_property_replace_global_blob(dev, &connector->edid_blob_ptr, size, edid, &connector->base, dev->mode_config.edid_property); return ret; } EXPORT_SYMBOL(drm_mode_connector_update_edid_property); /* Some properties could refer to dynamic refcnt'd objects, or things that * need special locking to handle lifetime issues (ie. to ensure the prop * value doesn't become invalid part way through the property update due to * race). The value returned by reference via 'obj' should be passed back * to drm_property_change_valid_put() after the property is set (and the * object to which the property is attached has a chance to take it's own * reference). */ bool drm_property_change_valid_get(struct drm_property *property, uint64_t value, struct drm_mode_object **ref) { int i; if (property->flags & DRM_MODE_PROP_IMMUTABLE) return false; *ref = NULL; if (drm_property_type_is(property, DRM_MODE_PROP_RANGE)) { if (value < property->values[0] || value > property->values[1]) return false; return true; } else if (drm_property_type_is(property, DRM_MODE_PROP_SIGNED_RANGE)) { int64_t svalue = U642I64(value); if (svalue < U642I64(property->values[0]) || svalue > U642I64(property->values[1])) return false; return true; } else if (drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { uint64_t valid_mask = 0; for (i = 0; i < property->num_values; i++) valid_mask |= (1ULL << property->values[i]); return !(value & ~valid_mask); } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) { struct drm_property_blob *blob; if (value == 0) return true; blob = drm_property_lookup_blob(property->dev, value); if (blob) { *ref = &blob->base; return true; } else { return false; } } else if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) { /* a zero value for an object property translates to null: */ if (value == 0) return true; /* handle refcnt'd objects specially: */ if (property->values[0] == DRM_MODE_OBJECT_FB) { struct drm_framebuffer *fb; fb = drm_framebuffer_lookup(property->dev, value); if (fb) { *ref = &fb->base; return true; } else { return false; } } else { return _object_find(property->dev, value, property->values[0]) != NULL; } } for (i = 0; i < property->num_values; i++) if (property->values[i] == value) return true; return false; } void drm_property_change_valid_put(struct drm_property *property, struct drm_mode_object *ref) { if (!ref) return; if (drm_property_type_is(property, DRM_MODE_PROP_OBJECT)) { if (property->values[0] == DRM_MODE_OBJECT_FB) drm_framebuffer_unreference(obj_to_fb(ref)); } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) drm_property_unreference_blob(obj_to_blob(ref)); } /** * drm_mode_connector_property_set_ioctl - set the current value of a connector property * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This function sets the current value for a connectors's property. It also * calls into a driver's ->set_property callback to update the hardware state * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_connector_property_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_connector_set_property *conn_set_prop = data; struct drm_mode_obj_set_property obj_set_prop = { .value = conn_set_prop->value, .prop_id = conn_set_prop->prop_id, .obj_id = conn_set_prop->connector_id, .obj_type = DRM_MODE_OBJECT_CONNECTOR }; /* It does all the locking and checking we need */ return drm_mode_obj_set_property_ioctl(dev, &obj_set_prop, file_priv); } static int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_connector *connector = obj_to_connector(obj); /* Do DPMS ourselves */ if (property == connector->dev->mode_config.dpms_property) { ret = (*connector->funcs->dpms)(connector, (int)value); } else if (connector->funcs->set_property) ret = connector->funcs->set_property(connector, property, value); /* store the property value if successful */ if (!ret) drm_object_property_set_value(&connector->base, property, value); return ret; } static int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_crtc *crtc = obj_to_crtc(obj); if (crtc->funcs->set_property) ret = crtc->funcs->set_property(crtc, property, value); if (!ret) drm_object_property_set_value(obj, property, value); return ret; } /** * drm_mode_plane_set_obj_prop - set the value of a property * @plane: drm plane object to set property value for * @property: property to set * @value: value the property should be set to * * This functions sets a given property on a given plane object. This function * calls the driver's ->set_property callback and changes the software state of * the property if the callback succeeds. * * Returns: * Zero on success, error code on failure. */ int drm_mode_plane_set_obj_prop(struct drm_plane *plane, struct drm_property *property, uint64_t value) { int ret = -EINVAL; struct drm_mode_object *obj = &plane->base; if (plane->funcs->set_property) ret = plane->funcs->set_property(plane, property, value); if (!ret) drm_object_property_set_value(obj, property, value); return ret; } EXPORT_SYMBOL(drm_mode_plane_set_obj_prop); /** * drm_mode_obj_get_properties_ioctl - get the current value of a object's property * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This function retrieves the current value for an object's property. Compared * to the connector specific ioctl this one is extended to also work on crtc and * plane objects. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_obj_get_properties *arg = data; struct drm_mode_object *obj; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); if (!obj) { ret = -ENOENT; goto out; } if (!obj->properties) { ret = -EINVAL; goto out; } ret = get_properties(obj, file_priv->atomic, (uint32_t __user *)(unsigned long)(arg->props_ptr), (uint64_t __user *)(unsigned long)(arg->prop_values_ptr), &arg->count_props); out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_obj_set_property_ioctl - set the current value of an object's property * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This function sets the current value for an object's property. It also calls * into a driver's ->set_property callback to update the hardware state. * Compared to the connector specific ioctl this one is extended to also work on * crtc and plane objects. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_obj_set_property *arg = data; struct drm_mode_object *arg_obj; struct drm_mode_object *prop_obj; struct drm_property *property; int i, ret = -EINVAL; struct drm_mode_object *ref; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); if (!arg_obj) { ret = -ENOENT; goto out; } if (!arg_obj->properties) goto out; for (i = 0; i < arg_obj->properties->count; i++) if (arg_obj->properties->properties[i]->base.id == arg->prop_id) break; if (i == arg_obj->properties->count) goto out; prop_obj = drm_mode_object_find(dev, arg->prop_id, DRM_MODE_OBJECT_PROPERTY); if (!prop_obj) { ret = -ENOENT; goto out; } property = obj_to_property(prop_obj); if (!drm_property_change_valid_get(property, arg->value, &ref)) goto out; switch (arg_obj->type) { case DRM_MODE_OBJECT_CONNECTOR: ret = drm_mode_connector_set_obj_prop(arg_obj, property, arg->value); break; case DRM_MODE_OBJECT_CRTC: ret = drm_mode_crtc_set_obj_prop(arg_obj, property, arg->value); break; case DRM_MODE_OBJECT_PLANE: ret = drm_mode_plane_set_obj_prop(obj_to_plane(arg_obj), property, arg->value); break; } drm_property_change_valid_put(property, ref); out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_connector_attach_encoder - attach a connector to an encoder * @connector: connector to attach * @encoder: encoder to attach @connector to * * This function links up a connector to an encoder. Note that the routing * restrictions between encoders and crtcs are exposed to userspace through the * possible_clones and possible_crtcs bitmasks. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { int i; /* * In the past, drivers have attempted to model the static association * of connector to encoder in simple connector/encoder devices using a * direct assignment of connector->encoder = encoder. This connection * is a logical one and the responsibility of the core, so drivers are * expected not to mess with this. * * Note that the error return should've been enough here, but a large * majority of drivers ignores the return value, so add in a big WARN * to get people's attention. */ if (WARN_ON(connector->encoder)) return -EINVAL; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { if (connector->encoder_ids[i] == 0) { connector->encoder_ids[i] = encoder->base.id; return 0; } } return -ENOMEM; } EXPORT_SYMBOL(drm_mode_connector_attach_encoder); /** * drm_mode_crtc_set_gamma_size - set the gamma table size * @crtc: CRTC to set the gamma table size for * @gamma_size: size of the gamma table * * Drivers which support gamma tables should set this to the supported gamma * table size when initializing the CRTC. Currently the drm core only supports a * fixed gamma table size. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, int gamma_size) { crtc->gamma_size = gamma_size; crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, GFP_KERNEL); if (!crtc->gamma_store) { crtc->gamma_size = 0; return -ENOMEM; } return 0; } EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); /** * drm_mode_gamma_set_ioctl - set the gamma table * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Set the gamma table of a CRTC to the one passed in by the user. Userspace can * inquire the required gamma table size through drm_mode_gamma_get_ioctl. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_gamma_set_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); crtc = drm_crtc_find(dev, crtc_lut->crtc_id); if (!crtc) { ret = -ENOENT; goto out; } if (crtc->funcs->gamma_set == NULL) { ret = -ENOSYS; goto out; } /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copy_from_user(r_base, (void __user *)(unsigned long)crtc_lut->red, size)) { ret = -EFAULT; goto out; } g_base = r_base + size; if (copy_from_user(g_base, (void __user *)(unsigned long)crtc_lut->green, size)) { ret = -EFAULT; goto out; } b_base = g_base + size; if (copy_from_user(b_base, (void __user *)(unsigned long)crtc_lut->blue, size)) { ret = -EFAULT; goto out; } crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_gamma_get_ioctl - get the gamma table * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Copy the current gamma table into the storage provided. This also provides * the gamma table size the driver expects, which can be used to size the * allocated storage. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_gamma_get_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_lut *crtc_lut = data; struct drm_crtc *crtc; void *r_base, *g_base, *b_base; int size; int ret = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; drm_modeset_lock_all(dev); crtc = drm_crtc_find(dev, crtc_lut->crtc_id); if (!crtc) { ret = -ENOENT; goto out; } /* memcpy into gamma store */ if (crtc_lut->gamma_size != crtc->gamma_size) { ret = -EINVAL; goto out; } size = crtc_lut->gamma_size * (sizeof(uint16_t)); r_base = crtc->gamma_store; if (copy_to_user((void __user *)(unsigned long)crtc_lut->red, r_base, size)) { ret = -EFAULT; goto out; } g_base = r_base + size; if (copy_to_user((void __user *)(unsigned long)crtc_lut->green, g_base, size)) { ret = -EFAULT; goto out; } b_base = g_base + size; if (copy_to_user((void __user *)(unsigned long)crtc_lut->blue, b_base, size)) { ret = -EFAULT; goto out; } out: drm_modeset_unlock_all(dev); return ret; } /** * drm_mode_page_flip_ioctl - schedule an asynchronous fb update * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This schedules an asynchronous update on a given CRTC, called page flip. * Optionally a drm event is generated to signal the completion of the event. * Generic drivers cannot assume that a pageflip with changed framebuffer * properties (including driver specific metadata like tiling layout) will work, * but some drivers support e.g. pixel format changes through the pageflip * ioctl. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_page_flip_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_crtc_page_flip *page_flip = data; struct drm_crtc *crtc; struct drm_framebuffer *fb = NULL; struct drm_pending_vblank_event *e = NULL; unsigned long flags; int ret = -EINVAL; if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS || page_flip->reserved != 0) return -EINVAL; if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip) return -EINVAL; crtc = drm_crtc_find(dev, page_flip->crtc_id); if (!crtc) return -ENOENT; drm_modeset_lock_crtc(crtc, crtc->primary); if (crtc->primary->fb == NULL) { /* The framebuffer is currently unbound, presumably * due to a hotplug event, that userspace has not * yet discovered. */ ret = -EBUSY; goto out; } if (crtc->funcs->page_flip == NULL) goto out; fb = drm_framebuffer_lookup(dev, page_flip->fb_id); if (!fb) { ret = -ENOENT; goto out; } if (crtc->state) { const struct drm_plane_state *state = crtc->primary->state; ret = check_src_coords(state->src_x, state->src_y, state->src_w, state->src_h, fb); } else { ret = drm_crtc_check_viewport(crtc, crtc->x, crtc->y, &crtc->mode, fb); } if (ret) goto out; if (crtc->primary->fb->pixel_format != fb->pixel_format) { DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); ret = -EINVAL; goto out; } if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { ret = -ENOMEM; spin_lock_irqsave(&dev->event_lock, flags); if (file_priv->event_space < sizeof(e->event)) { spin_unlock_irqrestore(&dev->event_lock, flags); goto out; } file_priv->event_space -= sizeof(e->event); spin_unlock_irqrestore(&dev->event_lock, flags); e = kzalloc(sizeof(*e), GFP_KERNEL); if (e == NULL) { spin_lock_irqsave(&dev->event_lock, flags); file_priv->event_space += sizeof(e->event); spin_unlock_irqrestore(&dev->event_lock, flags); goto out; } e->event.base.type = DRM_EVENT_FLIP_COMPLETE; e->event.base.length = sizeof(e->event); e->event.user_data = page_flip->user_data; e->base.event = &e->event.base; e->base.file_priv = file_priv; e->base.destroy = (void (*) (struct drm_pending_event *)) kfree; } crtc->primary->old_fb = crtc->primary->fb; ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags); if (ret) { if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) { spin_lock_irqsave(&dev->event_lock, flags); file_priv->event_space += sizeof(e->event); spin_unlock_irqrestore(&dev->event_lock, flags); kfree(e); } /* Keep the old fb, don't unref it. */ crtc->primary->old_fb = NULL; } else { crtc->primary->fb = fb; /* Unref only the old framebuffer. */ fb = NULL; } out: if (fb) drm_framebuffer_unreference(fb); if (crtc->primary->old_fb) drm_framebuffer_unreference(crtc->primary->old_fb); crtc->primary->old_fb = NULL; drm_modeset_unlock_crtc(crtc); return ret; } /** * drm_mode_config_reset - call ->reset callbacks * @dev: drm device * * This functions calls all the crtc's, encoder's and connector's ->reset * callback. Drivers can use this in e.g. their driver load or resume code to * reset hardware and software state. */ void drm_mode_config_reset(struct drm_device *dev) { struct drm_crtc *crtc; struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; drm_for_each_plane(plane, dev) if (plane->funcs->reset) plane->funcs->reset(plane); drm_for_each_crtc(crtc, dev) if (crtc->funcs->reset) crtc->funcs->reset(crtc); drm_for_each_encoder(encoder, dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); mutex_lock(&dev->mode_config.mutex); drm_for_each_connector(connector, dev) if (connector->funcs->reset) connector->funcs->reset(connector); mutex_unlock(&dev->mode_config.mutex); } EXPORT_SYMBOL(drm_mode_config_reset); /** * drm_mode_create_dumb_ioctl - create a dumb backing storage buffer * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This creates a new dumb buffer in the driver's backing storage manager (GEM, * TTM or something else entirely) and returns the resulting buffer handle. This * handle can then be wrapped up into a framebuffer modeset object. * * Note that userspace is not allowed to use such objects for render * acceleration - drivers must create their own private ioctls for such a use * case. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_create_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_create_dumb *args = data; u32 cpp, stride, size; if (!dev->driver->dumb_create) return -ENOSYS; if (!args->width || !args->height || !args->bpp) return -EINVAL; /* overflow checks for 32bit size calculations */ /* NOTE: DIV_ROUND_UP() can overflow */ cpp = DIV_ROUND_UP(args->bpp, 8); if (!cpp || cpp > 0xffffffffU / args->width) return -EINVAL; stride = cpp * args->width; if (args->height > 0xffffffffU / stride) return -EINVAL; /* test for wrap-around */ size = args->height * stride; if (PAGE_ALIGN(size) == 0) return -EINVAL; /* * handle, pitch and size are output parameters. Zero them out to * prevent drivers from accidentally using uninitialized data. Since * not all existing userspace is clearing these fields properly we * cannot reject IOCTL with garbage in them. */ args->handle = 0; args->pitch = 0; args->size = 0; return dev->driver->dumb_create(file_priv, dev, args); } /** * drm_mode_mmap_dumb_ioctl - create an mmap offset for a dumb backing storage buffer * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * Allocate an offset in the drm device node's address space to be able to * memory map a dumb buffer. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_map_dumb *args = data; /* call driver ioctl to get mmap offset */ if (!dev->driver->dumb_map_offset) return -ENOSYS; return dev->driver->dumb_map_offset(file_priv, dev, args->handle, &args->offset); } /** * drm_mode_destroy_dumb_ioctl - destroy a dumb backing strage buffer * @dev: DRM device * @data: ioctl data * @file_priv: DRM file info * * This destroys the userspace handle for the given dumb backing storage buffer. * Since buffer objects must be reference counted in the kernel a buffer object * won't be immediately freed if a framebuffer modeset object still uses it. * * Called by the user via ioctl. * * Returns: * Zero on success, negative errno on failure. */ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_destroy_dumb *args = data; if (!dev->driver->dumb_destroy) return -ENOSYS; return dev->driver->dumb_destroy(file_priv, dev, args->handle); } /** * drm_fb_get_bpp_depth - get the bpp/depth values for format * @format: pixel format (DRM_FORMAT_*) * @depth: storage for the depth value * @bpp: storage for the bpp value * * This only supports RGB formats here for compat with code that doesn't use * pixel formats directly yet. */ void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp) { switch (format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB332: case DRM_FORMAT_BGR233: *depth = 8; *bpp = 8; break; case DRM_FORMAT_XRGB1555: case DRM_FORMAT_XBGR1555: case DRM_FORMAT_RGBX5551: case DRM_FORMAT_BGRX5551: case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: case DRM_FORMAT_BGRA5551: *depth = 15; *bpp = 16; break; case DRM_FORMAT_RGB565: case DRM_FORMAT_BGR565: *depth = 16; *bpp = 16; break; case DRM_FORMAT_RGB888: case DRM_FORMAT_BGR888: *depth = 24; *bpp = 24; break; case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_RGBX8888: case DRM_FORMAT_BGRX8888: *depth = 24; *bpp = 32; break; case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: case DRM_FORMAT_RGBX1010102: case DRM_FORMAT_BGRX1010102: case DRM_FORMAT_ARGB2101010: case DRM_FORMAT_ABGR2101010: case DRM_FORMAT_RGBA1010102: case DRM_FORMAT_BGRA1010102: *depth = 30; *bpp = 32; break; case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: case DRM_FORMAT_RGBA8888: case DRM_FORMAT_BGRA8888: *depth = 32; *bpp = 32; break; default: DRM_DEBUG_KMS("unsupported pixel format %s\n", drm_get_format_name(format)); *depth = 0; *bpp = 0; break; } } EXPORT_SYMBOL(drm_fb_get_bpp_depth); /** * drm_format_num_planes - get the number of planes for format * @format: pixel format (DRM_FORMAT_*) * * Returns: * The number of planes used by the specified pixel format. */ int drm_format_num_planes(uint32_t format) { switch (format) { case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 3; case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: return 2; default: return 1; } } EXPORT_SYMBOL(drm_format_num_planes); /** * drm_format_plane_cpp - determine the bytes per pixel value * @format: pixel format (DRM_FORMAT_*) * @plane: plane index * * Returns: * The bytes per pixel value for the specified plane. */ int drm_format_plane_cpp(uint32_t format, int plane) { unsigned int depth; int bpp; if (plane >= drm_format_num_planes(format)) return 0; switch (format) { case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: return 2; case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_NV24: case DRM_FORMAT_NV42: return plane ? 2 : 1; case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV444: case DRM_FORMAT_YVU444: return 1; default: drm_fb_get_bpp_depth(format, &depth, &bpp); return bpp >> 3; } } EXPORT_SYMBOL(drm_format_plane_cpp); /** * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor * @format: pixel format (DRM_FORMAT_*) * * Returns: * The horizontal chroma subsampling factor for the * specified pixel format. */ int drm_format_horz_chroma_subsampling(uint32_t format) { switch (format) { case DRM_FORMAT_YUV411: case DRM_FORMAT_YVU411: case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: return 4; case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: case DRM_FORMAT_VYUY: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: case DRM_FORMAT_NV16: case DRM_FORMAT_NV61: case DRM_FORMAT_YUV422: case DRM_FORMAT_YVU422: case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: return 2; default: return 1; } } EXPORT_SYMBOL(drm_format_horz_chroma_subsampling); /** * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor * @format: pixel format (DRM_FORMAT_*) * * Returns: * The vertical chroma subsampling factor for the * specified pixel format. */ int drm_format_vert_chroma_subsampling(uint32_t format) { switch (format) { case DRM_FORMAT_YUV410: case DRM_FORMAT_YVU410: return 4; case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_NV12: case DRM_FORMAT_NV21: return 2; default: return 1; } } EXPORT_SYMBOL(drm_format_vert_chroma_subsampling); /** * drm_rotation_simplify() - Try to simplify the rotation * @rotation: Rotation to be simplified * @supported_rotations: Supported rotations * * Attempt to simplify the rotation to a form that is supported. * Eg. if the hardware supports everything except DRM_REFLECT_X * one could call this function like this: * * drm_rotation_simplify(rotation, BIT(DRM_ROTATE_0) | * BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_180) | * BIT(DRM_ROTATE_270) | BIT(DRM_REFLECT_Y)); * * to eliminate the DRM_ROTATE_X flag. Depending on what kind of * transforms the hardware supports, this function may not * be able to produce a supported transform, so the caller should * check the result afterwards. */ unsigned int drm_rotation_simplify(unsigned int rotation, unsigned int supported_rotations) { if (rotation & ~supported_rotations) { rotation ^= BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y); rotation = (rotation & DRM_REFLECT_MASK) | BIT((ffs(rotation & DRM_ROTATE_MASK) + 1) % 4); } return rotation; } EXPORT_SYMBOL(drm_rotation_simplify); /** * drm_mode_config_init - initialize DRM mode_configuration structure * @dev: DRM device * * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. * * Since this initializes the modeset locks, no locking is possible. Which is no * problem, since this should happen single threaded at init time. It is the * driver's problem to ensure this guarantee. * */ void drm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); drm_modeset_lock_init(&dev->mode_config.connection_mutex); mutex_init(&dev->mode_config.idr_mutex); mutex_init(&dev->mode_config.fb_lock); mutex_init(&dev->mode_config.blob_lock); INIT_LIST_HEAD(&dev->mode_config.fb_list); INIT_LIST_HEAD(&dev->mode_config.crtc_list); INIT_LIST_HEAD(&dev->mode_config.connector_list); INIT_LIST_HEAD(&dev->mode_config.encoder_list); INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.plane_list); idr_init(&dev->mode_config.crtc_idr); idr_init(&dev->mode_config.tile_idr); drm_modeset_lock_all(dev); drm_mode_create_standard_properties(dev); drm_modeset_unlock_all(dev); /* Just to be sure */ dev->mode_config.num_fb = 0; dev->mode_config.num_connector = 0; dev->mode_config.num_crtc = 0; dev->mode_config.num_encoder = 0; dev->mode_config.num_overlay_plane = 0; dev->mode_config.num_total_plane = 0; } EXPORT_SYMBOL(drm_mode_config_init); /** * drm_mode_config_cleanup - free up DRM mode_config info * @dev: DRM device * * Free up all the connectors and CRTCs associated with this DRM device, then * free up the framebuffers and associated buffer objects. * * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. * * FIXME: cleanup any dangling user buffer objects too */ void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_connector *connector, *ot; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; struct drm_property *property, *pt; struct drm_property_blob *blob, *bt; struct drm_plane *plane, *plt; list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list, head) { encoder->funcs->destroy(encoder); } list_for_each_entry_safe(connector, ot, &dev->mode_config.connector_list, head) { connector->funcs->destroy(connector); } list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { drm_property_destroy(dev, property); } list_for_each_entry_safe(blob, bt, &dev->mode_config.property_blob_list, head_global) { drm_property_unreference_blob(blob); } /* * Single-threaded teardown context, so it's not required to grab the * fb_lock to protect against concurrent fb_list access. Contrary, it * would actually deadlock with the drm_framebuffer_cleanup function. * * Also, if there are any framebuffers left, that's a driver leak now, * so politely WARN about this. */ WARN_ON(!list_empty(&dev->mode_config.fb_list)); list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { drm_framebuffer_free(&fb->refcount); } list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, head) { plane->funcs->destroy(plane); } list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) { crtc->funcs->destroy(crtc); } idr_destroy(&dev->mode_config.tile_idr); idr_destroy(&dev->mode_config.crtc_idr); drm_modeset_lock_fini(&dev->mode_config.connection_mutex); } EXPORT_SYMBOL(drm_mode_config_cleanup); struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev, unsigned int supported_rotations) { static const struct drm_prop_enum_list props[] = { { DRM_ROTATE_0, "rotate-0" }, { DRM_ROTATE_90, "rotate-90" }, { DRM_ROTATE_180, "rotate-180" }, { DRM_ROTATE_270, "rotate-270" }, { DRM_REFLECT_X, "reflect-x" }, { DRM_REFLECT_Y, "reflect-y" }, }; return drm_property_create_bitmask(dev, 0, "rotation", props, ARRAY_SIZE(props), supported_rotations); } EXPORT_SYMBOL(drm_mode_create_rotation_property); /** * DOC: Tile group * * Tile groups are used to represent tiled monitors with a unique * integer identifier. Tiled monitors using DisplayID v1.3 have * a unique 8-byte handle, we store this in a tile group, so we * have a common identifier for all tiles in a monitor group. */ static void drm_tile_group_free(struct kref *kref) { struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount); struct drm_device *dev = tg->dev; mutex_lock(&dev->mode_config.idr_mutex); idr_remove(&dev->mode_config.tile_idr, tg->id); mutex_unlock(&dev->mode_config.idr_mutex); kfree(tg); } /** * drm_mode_put_tile_group - drop a reference to a tile group. * @dev: DRM device * @tg: tile group to drop reference to. * * drop reference to tile group and free if 0. */ void drm_mode_put_tile_group(struct drm_device *dev, struct drm_tile_group *tg) { kref_put(&tg->refcount, drm_tile_group_free); } /** * drm_mode_get_tile_group - get a reference to an existing tile group * @dev: DRM device * @topology: 8-bytes unique per monitor. * * Use the unique bytes to get a reference to an existing tile group. * * RETURNS: * tile group or NULL if not found. */ struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev, char topology[8]) { struct drm_tile_group *tg; int id; mutex_lock(&dev->mode_config.idr_mutex); idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) { if (!memcmp(tg->group_data, topology, 8)) { if (!kref_get_unless_zero(&tg->refcount)) tg = NULL; mutex_unlock(&dev->mode_config.idr_mutex); return tg; } } mutex_unlock(&dev->mode_config.idr_mutex); return NULL; } EXPORT_SYMBOL(drm_mode_get_tile_group); /** * drm_mode_create_tile_group - create a tile group from a displayid description * @dev: DRM device * @topology: 8-bytes unique per monitor. * * Create a tile group for the unique monitor, and get a unique * identifier for the tile group. * * RETURNS: * new tile group or error. */ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, char topology[8]) { struct drm_tile_group *tg; int ret; tg = kzalloc(sizeof(*tg), GFP_KERNEL); if (!tg) return ERR_PTR(-ENOMEM); kref_init(&tg->refcount); memcpy(tg->group_data, topology, 8); tg->dev = dev; mutex_lock(&dev->mode_config.idr_mutex); ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL); if (ret >= 0) { tg->id = ret; } else { kfree(tg); tg = ERR_PTR(ret); } mutex_unlock(&dev->mode_config.idr_mutex); return tg; } EXPORT_SYMBOL(drm_mode_create_tile_group);
omor1/linux-430
drivers/gpu/drm/drm_crtc.c
C
gpl-2.0
164,914
/* inode.c: /proc/openprom handling routines * * Copyright (C) 1996-1999 Jakub Jelinek (jakub@redhat.com) * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/magic.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/uaccess.h> static DEFINE_MUTEX(op_mutex); #define OPENPROM_ROOT_INO 0 enum op_inode_type { op_inode_node, op_inode_prop, }; union op_inode_data { struct device_node *node; struct property *prop; }; struct op_inode_info { struct inode vfs_inode; enum op_inode_type type; union op_inode_data u; }; static struct inode *openprom_iget(struct super_block *sb, ino_t ino); static inline struct op_inode_info *OP_I(struct inode *inode) { return container_of(inode, struct op_inode_info, vfs_inode); } static int is_string(unsigned char *p, int len) { int i; for (i = 0; i < len; i++) { unsigned char val = p[i]; if ((i && !val) || (val >= ' ' && val <= '~')) continue; return 0; } return 1; } static int property_show(struct seq_file *f, void *v) { struct property *prop = f->private; void *pval; int len; len = prop->length; pval = prop->value; if (is_string(pval, len)) { while (len > 0) { int n = strlen(pval); seq_printf(f, "%s", (char *) pval); /* Skip over the NULL byte too. */ pval += n + 1; len -= n + 1; if (len > 0) seq_printf(f, " + "); } } else { if (len & 3) { while (len) { len--; if (len) seq_printf(f, "%02x.", *(unsigned char *) pval); else seq_printf(f, "%02x", *(unsigned char *) pval); pval++; } } else { while (len >= 4) { len -= 4; if (len) seq_printf(f, "%08x.", *(unsigned int *) pval); else seq_printf(f, "%08x", *(unsigned int *) pval); pval += 4; } } } seq_printf(f, "\n"); return 0; } static void *property_start(struct seq_file *f, loff_t *pos) { if (*pos == 0) return pos; return NULL; } static void *property_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; return NULL; } static void property_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static const struct seq_operations property_op = { .start = property_start, .next = property_next, .stop = property_stop, .show = property_show }; static int property_open(struct inode *inode, struct file *file) { struct op_inode_info *oi = OP_I(inode); int ret; BUG_ON(oi->type != op_inode_prop); ret = seq_open(file, &property_op); if (!ret) { struct seq_file *m = file->private_data; m->private = oi->u.prop; } return ret; } static const struct file_operations openpromfs_prop_ops = { .open = property_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int openpromfs_readdir(struct file *, void *, filldir_t); static const struct file_operations openprom_operations = { .read = generic_read_dir, .readdir = openpromfs_readdir, .llseek = generic_file_llseek, }; static struct dentry *openpromfs_lookup(struct inode *, struct dentry *, struct nameidata *); static const struct inode_operations openprom_inode_operations = { .lookup = openpromfs_lookup, }; static struct dentry *openpromfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct op_inode_info *ent_oi, *oi = OP_I(dir); struct device_node *dp, *child; struct property *prop; enum op_inode_type ent_type; union op_inode_data ent_data; const char *name; struct inode *inode; unsigned int ino; int len; BUG_ON(oi->type != op_inode_node); dp = oi->u.node; name = dentry->d_name.name; len = dentry->d_name.len; mutex_lock(&op_mutex); child = dp->child; while (child) { int n = strlen(child->path_component_name); if (len == n && !strncmp(child->path_component_name, name, len)) { ent_type = op_inode_node; ent_data.node = child; ino = child->unique_id; goto found; } child = child->sibling; } prop = dp->properties; while (prop) { int n = strlen(prop->name); if (len == n && !strncmp(prop->name, name, len)) { ent_type = op_inode_prop; ent_data.prop = prop; ino = prop->unique_id; goto found; } prop = prop->next; } mutex_unlock(&op_mutex); return ERR_PTR(-ENOENT); found: inode = openprom_iget(dir->i_sb, ino); mutex_unlock(&op_mutex); if (IS_ERR(inode)) return ERR_CAST(inode); ent_oi = OP_I(inode); ent_oi->type = ent_type; ent_oi->u = ent_data; switch (ent_type) { case op_inode_node: inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; inode->i_op = &openprom_inode_operations; inode->i_fop = &openprom_operations; inode->i_nlink = 2; break; case op_inode_prop: if (!strcmp(dp->name, "options") && (len == 17) && !strncmp (name, "security-password", 17)) inode->i_mode = S_IFREG | S_IRUSR | S_IWUSR; else inode->i_mode = S_IFREG | S_IRUGO; inode->i_fop = &openpromfs_prop_ops; inode->i_nlink = 1; inode->i_size = ent_oi->u.prop->length; break; } inode->i_gid = 0; inode->i_uid = 0; d_add(dentry, inode); return NULL; } static int openpromfs_readdir(struct file * filp, void * dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct op_inode_info *oi = OP_I(inode); struct device_node *dp = oi->u.node; struct device_node *child; struct property *prop; unsigned int ino; int i; mutex_lock(&op_mutex); ino = inode->i_ino; i = filp->f_pos; switch (i) { case 0: if (filldir(dirent, ".", 1, i, ino, DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall thru */ case 1: if (filldir(dirent, "..", 2, i, (dp->parent == NULL ? OPENPROM_ROOT_INO : dp->parent->unique_id), DT_DIR) < 0) goto out; i++; filp->f_pos++; /* fall thru */ default: i -= 2; /* First, the children nodes as directories. */ child = dp->child; while (i && child) { child = child->sibling; i--; } while (child) { if (filldir(dirent, child->path_component_name, strlen(child->path_component_name), filp->f_pos, child->unique_id, DT_DIR) < 0) goto out; filp->f_pos++; child = child->sibling; } /* Next, the properties as files. */ prop = dp->properties; while (i && prop) { prop = prop->next; i--; } while (prop) { if (filldir(dirent, prop->name, strlen(prop->name), filp->f_pos, prop->unique_id, DT_REG) < 0) goto out; filp->f_pos++; prop = prop->next; } } out: mutex_unlock(&op_mutex); return 0; } static struct kmem_cache *op_inode_cachep; static struct inode *openprom_alloc_inode(struct super_block *sb) { struct op_inode_info *oi; oi = kmem_cache_alloc(op_inode_cachep, GFP_KERNEL); if (!oi) return NULL; return &oi->vfs_inode; } static void openprom_destroy_inode(struct inode *inode) { kmem_cache_free(op_inode_cachep, OP_I(inode)); } static struct inode *openprom_iget(struct super_block *sb, ino_t ino) { struct inode *inode; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (inode->i_state & I_NEW) { inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; if (inode->i_ino == OPENPROM_ROOT_INO) { inode->i_op = &openprom_inode_operations; inode->i_fop = &openprom_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; } unlock_new_inode(inode); } return inode; } static int openprom_remount(struct super_block *sb, int *flags, char *data) { *flags |= MS_NOATIME; return 0; } static const struct super_operations openprom_sops = { .alloc_inode = openprom_alloc_inode, .destroy_inode = openprom_destroy_inode, .statfs = simple_statfs, .remount_fs = openprom_remount, }; static int openprom_fill_super(struct super_block *s, void *data, int silent) { struct inode *root_inode; struct op_inode_info *oi; int ret; s->s_flags |= MS_NOATIME; s->s_blocksize = 1024; s->s_blocksize_bits = 10; s->s_magic = OPENPROM_SUPER_MAGIC; s->s_op = &openprom_sops; s->s_time_gran = 1; root_inode = openprom_iget(s, OPENPROM_ROOT_INO); if (IS_ERR(root_inode)) { ret = PTR_ERR(root_inode); goto out_no_root; } oi = OP_I(root_inode); oi->type = op_inode_node; oi->u.node = of_find_node_by_path("/"); s->s_root = d_alloc_root(root_inode); if (!s->s_root) goto out_no_root_dentry; return 0; out_no_root_dentry: iput(root_inode); ret = -ENOMEM; out_no_root: printk("openprom_fill_super: get root inode failed\n"); return ret; } static int openprom_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_single(fs_type, flags, data, openprom_fill_super, mnt); } static struct file_system_type openprom_fs_type = { .owner = THIS_MODULE, .name = "openpromfs", .get_sb = openprom_get_sb, .kill_sb = kill_anon_super, }; static void op_inode_init_once(void *data) { struct op_inode_info *oi = (struct op_inode_info *) data; inode_init_once(&oi->vfs_inode); } static int __init init_openprom_fs(void) { int err; op_inode_cachep = kmem_cache_create("op_inode_cache", sizeof(struct op_inode_info), 0, (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD), op_inode_init_once); if (!op_inode_cachep) return -ENOMEM; err = register_filesystem(&openprom_fs_type); if (err) kmem_cache_destroy(op_inode_cachep); return err; } static void __exit exit_openprom_fs(void) { unregister_filesystem(&openprom_fs_type); kmem_cache_destroy(op_inode_cachep); } module_init(init_openprom_fs) module_exit(exit_openprom_fs) MODULE_LICENSE("GPL");
rockly703/linux-2.6.28-tiny6410
fs/openpromfs/inode.c
C
gpl-2.0
9,746
/* sunbmac.c: Driver for Sparc BigMAC 100baseT ethernet adapters. * * Copyright (C) 1997, 1998, 1999, 2003, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/crc32.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/auxio.h> #include <asm/byteorder.h> #include <asm/dma.h> #include <asm/idprom.h> #include <asm/io.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/pgtable.h> #include <asm/system.h> #include "sunbmac.h" #define DRV_NAME "sunbmac" #define DRV_VERSION "2.1" #define DRV_RELDATE "August 26, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun BigMAC 100baseT ethernet driver"); MODULE_LICENSE("GPL"); #undef DEBUG_PROBE #undef DEBUG_TX #undef DEBUG_IRQ #ifdef DEBUG_PROBE #define DP(x) printk x #else #define DP(x) #endif #ifdef DEBUG_TX #define DTX(x) printk x #else #define DTX(x) #endif #ifdef DEBUG_IRQ #define DIRQ(x) printk x #else #define DIRQ(x) #endif #define DEFAULT_JAMSIZE 4 /* Toe jam */ #define QEC_RESET_TRIES 200 static int qec_global_reset(void __iomem *gregs) { int tries = QEC_RESET_TRIES; sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL); while (--tries) { if (sbus_readl(gregs + GLOB_CTRL) & GLOB_CTRL_RESET) { udelay(20); continue; } break; } if (tries) return 0; printk(KERN_ERR "BigMAC: Cannot reset the QEC.\n"); return -1; } static void qec_init(struct bigmac *bp) { struct of_device *qec_op = bp->qec_op; void __iomem *gregs = bp->gregs; u8 bsizes = bp->bigmac_bursts; u32 regval; /* 64byte bursts do not work at the moment, do * not even try to enable them. -DaveM */ if (bsizes & DMA_BURST32) regval = GLOB_CTRL_B32; else regval = GLOB_CTRL_B16; sbus_writel(regval | GLOB_CTRL_BMODE, gregs + GLOB_CTRL); sbus_writel(GLOB_PSIZE_2048, gregs + GLOB_PSIZE); /* All of memsize is given to bigmac. */ sbus_writel(resource_size(&qec_op->resource[1]), gregs + GLOB_MSIZE); /* Half to the transmitter, half to the receiver. */ sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_TSIZE); sbus_writel(resource_size(&qec_op->resource[1]) >> 1, gregs + GLOB_RSIZE); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 static void bigmac_tx_reset(void __iomem *bregs) { int tries = TX_RESET_TRIES; sbus_writel(0, bregs + BMAC_TXCFG); /* The fifo threshold bit is read-only and does * not clear. -DaveM */ while ((sbus_readl(bregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FIFO)) != 0 && --tries != 0) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Transmitter will not reset.\n"); printk(KERN_ERR "BIGMAC: tx_cfg is %08x\n", sbus_readl(bregs + BMAC_TXCFG)); } } static void bigmac_rx_reset(void __iomem *bregs) { int tries = RX_RESET_TRIES; sbus_writel(0, bregs + BMAC_RXCFG); while (sbus_readl(bregs + BMAC_RXCFG) && --tries) udelay(20); if (!tries) { printk(KERN_ERR "BIGMAC: Receiver will not reset.\n"); printk(KERN_ERR "BIGMAC: rx_cfg is %08x\n", sbus_readl(bregs + BMAC_RXCFG)); } } /* Reset the transmitter and receiver. */ static void bigmac_stop(struct bigmac *bp) { bigmac_tx_reset(bp->bregs); bigmac_rx_reset(bp->bregs); } static void bigmac_get_counters(struct bigmac *bp, void __iomem *bregs) { struct net_device_stats *stats = &bp->enet_stats; stats->rx_crc_errors += sbus_readl(bregs + BMAC_RCRCECTR); sbus_writel(0, bregs + BMAC_RCRCECTR); stats->rx_frame_errors += sbus_readl(bregs + BMAC_UNALECTR); sbus_writel(0, bregs + BMAC_UNALECTR); stats->rx_length_errors += sbus_readl(bregs + BMAC_GLECTR); sbus_writel(0, bregs + BMAC_GLECTR); stats->tx_aborted_errors += sbus_readl(bregs + BMAC_EXCTR); stats->collisions += (sbus_readl(bregs + BMAC_EXCTR) + sbus_readl(bregs + BMAC_LTCTR)); sbus_writel(0, bregs + BMAC_EXCTR); sbus_writel(0, bregs + BMAC_LTCTR); } static void bigmac_clean_rings(struct bigmac *bp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (bp->rx_skbs[i] != NULL) { dev_kfree_skb_any(bp->rx_skbs[i]); bp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (bp->tx_skbs[i] != NULL) { dev_kfree_skb_any(bp->tx_skbs[i]); bp->tx_skbs[i] = NULL; } } } static void bigmac_init_rings(struct bigmac *bp, int from_irq) { struct bmac_init_block *bb = bp->bmac_block; struct net_device *dev = bp->dev; int i; gfp_t gfp_flags = GFP_KERNEL; if (from_irq || in_interrupt()) gfp_flags = GFP_ATOMIC; bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ bigmac_clean_rings(bp); /* Now get new skbufs for the receive ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, gfp_flags); if (!skb) continue; bp->rx_skbs[i] = skb; skb->dev = dev; /* Because we reserve afterwards. */ skb_put(skb, ETH_FRAME_LEN); skb_reserve(skb, 34); bb->be_rxd[i].rx_addr = dma_map_single(&bp->bigmac_op->dev, skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bb->be_rxd[i].rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); } for (i = 0; i < TX_RING_SIZE; i++) bb->be_txd[i].tx_flags = bb->be_txd[i].tx_addr = 0; } #define MGMT_CLKON (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB|MGMT_PAL_DCLOCK) #define MGMT_CLKOFF (MGMT_PAL_INT_MDIO|MGMT_PAL_EXT_MDIO|MGMT_PAL_OENAB) static void idle_transceiver(void __iomem *tregs) { int i = 20; while (i--) { sbus_writel(MGMT_CLKOFF, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_CLKON, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } } static void write_tcvr_bit(struct bigmac *bp, void __iomem *tregs, int bit) { if (bp->tcvr_type == internal) { bit = (bit & 1) << 3; sbus_writel(bit | (MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO), tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_OENAB | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { bit = (bit & 1) << 2; sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(bit | MGMT_PAL_INT_MDIO | MGMT_PAL_OENAB | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "write_tcvr_bit: No transceiver type known!\n"); } } static int read_tcvr_bit(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; } else { printk(KERN_ERR "read_tcvr_bit: No transceiver type known!\n"); } return retval; } static int read_tcvr_bit2(struct bigmac *bp, void __iomem *tregs) { int retval = 0; if (bp->tcvr_type == internal) { sbus_writel(MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_INT_MDIO) >> 3; sbus_writel(MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else if (bp->tcvr_type == external) { sbus_writel(MGMT_PAL_INT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); retval = (sbus_readl(tregs + TCVR_MPAL) & MGMT_PAL_EXT_MDIO) >> 2; sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); } else { printk(KERN_ERR "read_tcvr_bit2: No transceiver type known!\n"); } return retval; } static void put_tcvr_byte(struct bigmac *bp, void __iomem *tregs, unsigned int byte) { int shift = 4; do { write_tcvr_bit(bp, tregs, ((byte >> shift) & 1)); shift -= 1; } while (shift >= 0); } static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs, int reg, unsigned short val) { int shift; reg &= 0xff; val &= 0xffff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return; }; idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); shift = 15; do { write_tcvr_bit(bp, tregs, (val >> shift) & 1); shift -= 1; } while (shift >= 0); } static unsigned short bigmac_tcvr_read(struct bigmac *bp, void __iomem *tregs, int reg) { unsigned short retval = 0; reg &= 0xff; switch(bp->tcvr_type) { case internal: case external: break; default: printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); return 0xffff; }; idle_transceiver(tregs); write_tcvr_bit(bp, tregs, 0); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 1); write_tcvr_bit(bp, tregs, 0); put_tcvr_byte(bp, tregs, ((bp->tcvr_type == internal) ? BIGMAC_PHY_INTERNAL : BIGMAC_PHY_EXTERNAL)); put_tcvr_byte(bp, tregs, reg); if (bp->tcvr_type == external) { int shift = 15; (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); do { int tmp; tmp = read_tcvr_bit2(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); (void) read_tcvr_bit2(bp, tregs); } else { int shift = 15; (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); do { int tmp; tmp = read_tcvr_bit(bp, tregs); retval |= ((tmp & 1) << shift); shift -= 1; } while (shift >= 0); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); (void) read_tcvr_bit(bp, tregs); } return retval; } static void bigmac_tcvr_init(struct bigmac *bp) { void __iomem *tregs = bp->tregs; u32 mpal; idle_transceiver(tregs); sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO | MGMT_PAL_DCLOCK, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); /* Only the bit for the present transceiver (internal or * external) will stick, set them both and see what stays. */ sbus_writel(MGMT_PAL_INT_MDIO | MGMT_PAL_EXT_MDIO, tregs + TCVR_MPAL); sbus_readl(tregs + TCVR_MPAL); udelay(20); mpal = sbus_readl(tregs + TCVR_MPAL); if (mpal & MGMT_PAL_EXT_MDIO) { bp->tcvr_type = external; sbus_writel(~(TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else if (mpal & MGMT_PAL_INT_MDIO) { bp->tcvr_type = internal; sbus_writel(~(TCVR_PAL_SERIAL | TCVR_PAL_EXTLBACK | TCVR_PAL_MSENSE | TCVR_PAL_LTENABLE), tregs + TCVR_TPAL); sbus_readl(tregs + TCVR_TPAL); } else { printk(KERN_ERR "BIGMAC: AIEEE, neither internal nor " "external MDIO available!\n"); printk(KERN_ERR "BIGMAC: mgmt_pal[%08x] tcvr_pal[%08x]\n", sbus_readl(tregs + TCVR_MPAL), sbus_readl(tregs + TCVR_TPAL)); } } static int bigmac_init_hw(struct bigmac *, int); static int try_next_permutation(struct bigmac *bp, void __iomem *tregs) { if (bp->sw_bmcr & BMCR_SPEED100) { int timeout; /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* Now we try 10baseT. */ bp->sw_bmcr &= ~(BMCR_SPEED100); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); return 0; } /* We've tried them all. */ return -1; } static void bigmac_timer(unsigned long data) { struct bigmac *bp = (struct bigmac *) data; void __iomem *tregs = bp->tregs; int restart_timer = 0; bp->timer_ticks++; if (bp->timer_state == ltrywait) { bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if (bp->sw_bmsr & BMSR_LSTATUS) { printk(KERN_INFO "%s: Link is now up at %s.\n", bp->dev->name, (bp->sw_bmcr & BMCR_SPEED100) ? "100baseT" : "10baseT"); bp->timer_state = asleep; restart_timer = 0; } else { if (bp->timer_ticks >= 4) { int ret; ret = try_next_permutation(bp, tregs); if (ret == -1) { printk(KERN_ERR "%s: Link down, cable problem?\n", bp->dev->name); ret = bigmac_init_hw(bp, 0); if (ret) { printk(KERN_ERR "%s: Error, cannot re-init the " "BigMAC.\n", bp->dev->name); } return; } bp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } } else { /* Can't happens.... */ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", bp->dev->name); restart_timer = 0; bp->timer_ticks = 0; bp->timer_state = asleep; /* foo on you */ } if (restart_timer != 0) { bp->bigmac_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&bp->bigmac_timer); } } /* Well, really we just force the chip into 100baseT then * 10baseT, each time checking for a link status. */ static void bigmac_begin_auto_negotiation(struct bigmac *bp) { void __iomem *tregs = bp->tregs; int timeout; /* Grab new software copies of PHY registers. */ bp->sw_bmsr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMSR); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* Reset the PHY. */ bp->sw_bmcr = (BMCR_ISOLATE | BMCR_PDOWN | BMCR_LOOPBACK); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->sw_bmcr = (BMCR_RESET); bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); timeout = 64; while (--timeout) { bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); if ((bp->sw_bmcr & BMCR_RESET) == 0) break; udelay(20); } if (timeout == 0) printk(KERN_ERR "%s: PHY reset failed.\n", bp->dev->name); bp->sw_bmcr = bigmac_tcvr_read(bp, tregs, BIGMAC_BMCR); /* First we try 100baseT. */ bp->sw_bmcr |= BMCR_SPEED100; bigmac_tcvr_write(bp, tregs, BIGMAC_BMCR, bp->sw_bmcr); bp->timer_state = ltrywait; bp->timer_ticks = 0; bp->bigmac_timer.expires = jiffies + (12 * HZ) / 10; bp->bigmac_timer.data = (unsigned long) bp; bp->bigmac_timer.function = &bigmac_timer; add_timer(&bp->bigmac_timer); } static int bigmac_init_hw(struct bigmac *bp, int from_irq) { void __iomem *gregs = bp->gregs; void __iomem *cregs = bp->creg; void __iomem *bregs = bp->bregs; unsigned char *e = &bp->dev->dev_addr[0]; /* Latch current counters into statistics. */ bigmac_get_counters(bp, bregs); /* Reset QEC. */ qec_global_reset(gregs); /* Init QEC. */ qec_init(bp); /* Alloc and reset the tx/rx descriptor chains. */ bigmac_init_rings(bp, from_irq); /* Initialize the PHY. */ bigmac_tcvr_init(bp); /* Stop transmitter and receiver. */ bigmac_stop(bp); /* Set hardware ethernet address. */ sbus_writel(((e[4] << 8) | e[5]), bregs + BMAC_MACADDR2); sbus_writel(((e[2] << 8) | e[3]), bregs + BMAC_MACADDR1); sbus_writel(((e[0] << 8) | e[1]), bregs + BMAC_MACADDR0); /* Clear the hash table until mc upload occurs. */ sbus_writel(0, bregs + BMAC_HTABLE3); sbus_writel(0, bregs + BMAC_HTABLE2); sbus_writel(0, bregs + BMAC_HTABLE1); sbus_writel(0, bregs + BMAC_HTABLE0); /* Enable Big Mac hash table filter. */ sbus_writel(BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_FIFO, bregs + BMAC_RXCFG); udelay(20); /* Ok, configure the Big Mac transmitter. */ sbus_writel(BIGMAC_TXCFG_FIFO, bregs + BMAC_TXCFG); /* The HME docs recommend to use the 10LSB of our MAC here. */ sbus_writel(((e[5] | e[4] << 8) & 0x3ff), bregs + BMAC_RSEED); /* Enable the output drivers no matter what. */ sbus_writel(BIGMAC_XCFG_ODENABLE | BIGMAC_XCFG_RESV, bregs + BMAC_XIFCFG); /* Tell the QEC where the ring descriptors are. */ sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), cregs + CREG_RXDS); sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), cregs + CREG_TXDS); /* Setup the FIFO pointers into QEC local memory. */ sbus_writel(0, cregs + CREG_RXRBUFPTR); sbus_writel(0, cregs + CREG_RXWBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXRBUFPTR); sbus_writel(sbus_readl(gregs + GLOB_RSIZE), cregs + CREG_TXWBUFPTR); /* Tell bigmac what interrupts we don't want to hear about. */ sbus_writel(BIGMAC_IMASK_GOTFRAME | BIGMAC_IMASK_SENTFRAME, bregs + BMAC_IMASK); /* Enable the various other irq's. */ sbus_writel(0, cregs + CREG_RIMASK); sbus_writel(0, cregs + CREG_TIMASK); sbus_writel(0, cregs + CREG_QMASK); sbus_writel(0, cregs + CREG_BMASK); /* Set jam size to a reasonable default. */ sbus_writel(DEFAULT_JAMSIZE, bregs + BMAC_JSIZE); /* Clear collision counter. */ sbus_writel(0, cregs + CREG_CCNT); /* Enable transmitter and receiver. */ sbus_writel(sbus_readl(bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE, bregs + BMAC_TXCFG); sbus_writel(sbus_readl(bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE, bregs + BMAC_RXCFG); /* Ok, start detecting link speed/duplex. */ bigmac_begin_auto_negotiation(bp); /* Success. */ return 0; } /* Error interrupts get sent here. */ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_status) { printk(KERN_ERR "bigmac_is_medium_rare: "); if (qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) { if (qec_status & GLOB_STAT_ER) printk("QEC_ERROR, "); if (qec_status & GLOB_STAT_BM) printk("QEC_BMAC_ERROR, "); } if (bmac_status & CREG_STAT_ERRORS) { if (bmac_status & CREG_STAT_BERROR) printk("BMAC_ERROR, "); if (bmac_status & CREG_STAT_TXDERROR) printk("TXD_ERROR, "); if (bmac_status & CREG_STAT_TXLERR) printk("TX_LATE_ERROR, "); if (bmac_status & CREG_STAT_TXPERR) printk("TX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_TXSERR) printk("TX_SBUS_ERROR, "); if (bmac_status & CREG_STAT_RXDROP) printk("RX_DROP_ERROR, "); if (bmac_status & CREG_STAT_RXSMALL) printk("RX_SMALL_ERROR, "); if (bmac_status & CREG_STAT_RXLERR) printk("RX_LATE_ERROR, "); if (bmac_status & CREG_STAT_RXPERR) printk("RX_PARITY_ERROR, "); if (bmac_status & CREG_STAT_RXSERR) printk("RX_SBUS_ERROR, "); } printk(" RESET\n"); bigmac_init_hw(bp, 1); } /* BigMAC transmit complete service routines. */ static void bigmac_tx(struct bigmac *bp) { struct be_txd *txbase = &bp->bmac_block->be_txd[0]; struct net_device *dev = bp->dev; int elem; spin_lock(&bp->lock); elem = bp->tx_old; DTX(("bigmac_tx: tx_old[%d] ", elem)); while (elem != bp->tx_new) { struct sk_buff *skb; struct be_txd *this = &txbase[elem]; DTX(("this(%p) [flags(%08x)addr(%08x)]", this, this->tx_flags, this->tx_addr)); if (this->tx_flags & TXD_OWN) break; skb = bp->tx_skbs[elem]; bp->enet_stats.tx_packets++; bp->enet_stats.tx_bytes += skb->len; dma_unmap_single(&bp->bigmac_op->dev, this->tx_addr, skb->len, DMA_TO_DEVICE); DTX(("skb(%p) ", skb)); bp->tx_skbs[elem] = NULL; dev_kfree_skb_irq(skb); elem = NEXT_TX(elem); } DTX((" DONE, tx_old=%d\n", elem)); bp->tx_old = elem; if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(bp) > 0) netif_wake_queue(bp->dev); spin_unlock(&bp->lock); } /* BigMAC receive complete service routines. */ static void bigmac_rx(struct bigmac *bp) { struct be_rxd *rxbase = &bp->bmac_block->be_rxd[0]; struct be_rxd *this; int elem = bp->rx_new, drops = 0; u32 flags; this = &rxbase[elem]; while (!((flags = this->rx_flags) & RXD_OWN)) { struct sk_buff *skb; int len = (flags & RXD_LENGTH); /* FCS not included */ /* Check for errors. */ if (len < ETH_ZLEN) { bp->enet_stats.rx_errors++; bp->enet_stats.rx_length_errors++; drop_it: /* Return it to the BigMAC. */ bp->enet_stats.rx_dropped++; this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); goto next; } skb = bp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = big_mac_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_single(&bp->bigmac_op->dev, this->rx_addr, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); bp->rx_skbs[elem] = new_skb; new_skb->dev = bp->dev; skb_put(new_skb, ETH_FRAME_LEN); skb_reserve(new_skb, 34); this->rx_addr = dma_map_single(&bp->bigmac_op->dev, new_skb->data, RX_BUF_ALLOC_SIZE - 34, DMA_FROM_DEVICE); this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = dev_alloc_skb(len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); dma_sync_single_for_device(&bp->bigmac_op->dev, this->rx_addr, len, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ this->rx_flags = (RXD_OWN | ((RX_BUF_ALLOC_SIZE - 34) & RXD_LENGTH)); skb = copy_skb; } /* No checksums done by the BigMAC ;-( */ skb->protocol = eth_type_trans(skb, bp->dev); netif_rx(skb); bp->dev->last_rx = jiffies; bp->enet_stats.rx_packets++; bp->enet_stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } bp->rx_new = elem; if (drops) printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", bp->dev->name); } static irqreturn_t bigmac_interrupt(int irq, void *dev_id) { struct bigmac *bp = (struct bigmac *) dev_id; u32 qec_status, bmac_status; DIRQ(("bigmac_interrupt: ")); /* Latch status registers now. */ bmac_status = sbus_readl(bp->creg + CREG_STAT); qec_status = sbus_readl(bp->gregs + GLOB_STAT); DIRQ(("qec_status=%08x bmac_status=%08x\n", qec_status, bmac_status)); if ((qec_status & (GLOB_STAT_ER | GLOB_STAT_BM)) || (bmac_status & CREG_STAT_ERRORS)) bigmac_is_medium_rare(bp, qec_status, bmac_status); if (bmac_status & CREG_STAT_TXIRQ) bigmac_tx(bp); if (bmac_status & CREG_STAT_RXIRQ) bigmac_rx(bp); return IRQ_HANDLED; } static int bigmac_open(struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; int ret; ret = request_irq(dev->irq, &bigmac_interrupt, IRQF_SHARED, dev->name, bp); if (ret) { printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq); return ret; } init_timer(&bp->bigmac_timer); ret = bigmac_init_hw(bp, 0); if (ret) free_irq(dev->irq, bp); return ret; } static int bigmac_close(struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; del_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; bigmac_stop(bp); bigmac_clean_rings(bp); free_irq(dev->irq, bp); return 0; } static void bigmac_tx_timeout(struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; bigmac_init_hw(bp, 0); netif_wake_queue(dev); } /* Put a packet on the wire. */ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; int len, entry; u32 mapping; len = skb->len; mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, len, DMA_TO_DEVICE); /* Avoid a race... */ spin_lock_irq(&bp->lock); entry = bp->tx_new; DTX(("bigmac_start_xmit: len(%d) entry(%d)\n", len, entry)); bp->bmac_block->be_txd[entry].tx_flags = TXD_UPDATE; bp->tx_skbs[entry] = skb; bp->bmac_block->be_txd[entry].tx_addr = mapping; bp->bmac_block->be_txd[entry].tx_flags = (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH)); bp->tx_new = NEXT_TX(entry); if (TX_BUFFS_AVAIL(bp) <= 0) netif_stop_queue(dev); spin_unlock_irq(&bp->lock); /* Get it going. */ sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); dev->trans_start = jiffies; return 0; } static struct net_device_stats *bigmac_get_stats(struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; bigmac_get_counters(bp, bp->bregs); return &bp->enet_stats; } static void bigmac_set_multicast(struct net_device *dev) { struct bigmac *bp = (struct bigmac *) dev->priv; void __iomem *bregs = bp->bregs; struct dev_mc_list *dmi = dev->mc_list; char *addrs; int i; u32 tmp, crc; /* Disable the receiver. The bit self-clears when * the operation is complete. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp &= ~(BIGMAC_RXCFG_ENABLE); sbus_writel(tmp, bregs + BMAC_RXCFG); while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0) udelay(20); if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) { sbus_writel(0xffff, bregs + BMAC_HTABLE0); sbus_writel(0xffff, bregs + BMAC_HTABLE1); sbus_writel(0xffff, bregs + BMAC_HTABLE2); sbus_writel(0xffff, bregs + BMAC_HTABLE3); } else if (dev->flags & IFF_PROMISC) { tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_PMISC; sbus_writel(tmp, bregs + BMAC_RXCFG); } else { u16 hash_table[4]; for (i = 0; i < 4; i++) hash_table[i] = 0; for (i = 0; i < dev->mc_count; i++) { addrs = dmi->dmi_addr; dmi = dmi->next; if (!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } sbus_writel(hash_table[0], bregs + BMAC_HTABLE0); sbus_writel(hash_table[1], bregs + BMAC_HTABLE1); sbus_writel(hash_table[2], bregs + BMAC_HTABLE2); sbus_writel(hash_table[3], bregs + BMAC_HTABLE3); } /* Re-enable the receiver. */ tmp = sbus_readl(bregs + BMAC_RXCFG); tmp |= BIGMAC_RXCFG_ENABLE; sbus_writel(tmp, bregs + BMAC_RXCFG); } /* Ethtool support... */ static void bigmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, "sunbmac"); strcpy(info->version, "2.0"); } static u32 bigmac_get_link(struct net_device *dev) { struct bigmac *bp = dev->priv; spin_lock_irq(&bp->lock); bp->sw_bmsr = bigmac_tcvr_read(bp, bp->tregs, BIGMAC_BMSR); spin_unlock_irq(&bp->lock); return (bp->sw_bmsr & BMSR_LSTATUS); } static const struct ethtool_ops bigmac_ethtool_ops = { .get_drvinfo = bigmac_get_drvinfo, .get_link = bigmac_get_link, }; static int __devinit bigmac_ether_init(struct of_device *op, struct of_device *qec_op) { static int version_printed; struct net_device *dev; u8 bsizes, bsizes_more; DECLARE_MAC_BUF(mac); struct bigmac *bp; int i; /* Get a new device struct for this interface. */ dev = alloc_etherdev(sizeof(struct bigmac)); if (!dev) return -ENOMEM; if (version_printed++ == 0) printk(KERN_INFO "%s", version); for (i = 0; i < 6; i++) dev->dev_addr[i] = idprom->id_ethaddr[i]; /* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */ bp = netdev_priv(dev); bp->qec_op = qec_op; bp->bigmac_op = op; SET_NETDEV_DEV(dev, &op->dev); spin_lock_init(&bp->lock); /* Map in QEC global control registers. */ bp->gregs = of_ioremap(&qec_op->resource[0], 0, GLOB_REG_SIZE, "BigMAC QEC GLobal Regs"); if (!bp->gregs) { printk(KERN_ERR "BIGMAC: Cannot map QEC global registers.\n"); goto fail_and_cleanup; } /* Make sure QEC is in BigMAC mode. */ if ((sbus_readl(bp->gregs + GLOB_CTRL) & 0xf0000000) != GLOB_CTRL_BMODE) { printk(KERN_ERR "BigMAC: AIEEE, QEC is not in BigMAC mode!\n"); goto fail_and_cleanup; } /* Reset the QEC. */ if (qec_global_reset(bp->gregs)) goto fail_and_cleanup; /* Get supported SBUS burst sizes. */ bsizes = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); bsizes_more = of_getintprop_default(qec_op->node, "burst-sizes", 0xff); bsizes &= 0xff; if (bsizes_more != 0xff) bsizes &= bsizes_more; if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 || (bsizes & DMA_BURST32) == 0) bsizes = (DMA_BURST32 - 1); bp->bigmac_bursts = bsizes; /* Perform QEC initialization. */ qec_init(bp); /* Map in the BigMAC channel registers. */ bp->creg = of_ioremap(&op->resource[0], 0, CREG_REG_SIZE, "BigMAC QEC Channel Regs"); if (!bp->creg) { printk(KERN_ERR "BIGMAC: Cannot map QEC channel registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC control registers. */ bp->bregs = of_ioremap(&op->resource[1], 0, BMAC_REG_SIZE, "BigMAC Primary Regs"); if (!bp->bregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC primary registers.\n"); goto fail_and_cleanup; } /* Map in the BigMAC transceiver registers, this is how you poke at * the BigMAC's PHY. */ bp->tregs = of_ioremap(&op->resource[2], 0, TCVR_REG_SIZE, "BigMAC Transceiver Regs"); if (!bp->tregs) { printk(KERN_ERR "BIGMAC: Cannot map BigMAC transceiver registers.\n"); goto fail_and_cleanup; } /* Stop the BigMAC. */ bigmac_stop(bp); /* Allocate transmit/receive descriptor DVMA block. */ bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, PAGE_SIZE, &bp->bblock_dvma, GFP_ATOMIC); if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n"); goto fail_and_cleanup; } /* Get the board revision of this BigMAC. */ bp->board_rev = of_getintprop_default(bp->bigmac_op->node, "board-version", 1); /* Init auto-negotiation timer state. */ init_timer(&bp->bigmac_timer); bp->timer_state = asleep; bp->timer_ticks = 0; /* Backlink to generic net device struct. */ bp->dev = dev; /* Set links to our BigMAC open and close routines. */ dev->open = &bigmac_open; dev->stop = &bigmac_close; dev->hard_start_xmit = &bigmac_start_xmit; dev->ethtool_ops = &bigmac_ethtool_ops; /* Set links to BigMAC statistic and multi-cast loading code. */ dev->get_stats = &bigmac_get_stats; dev->set_multicast_list = &bigmac_set_multicast; dev->tx_timeout = &bigmac_tx_timeout; dev->watchdog_timeo = 5*HZ; /* Finish net device registration. */ dev->irq = bp->bigmac_op->irqs[0]; dev->dma = 0; if (register_netdev(dev)) { printk(KERN_ERR "BIGMAC: Cannot register device.\n"); goto fail_and_cleanup; } dev_set_drvdata(&bp->bigmac_op->dev, bp); printk(KERN_INFO "%s: BigMAC 100baseT Ethernet %s\n", dev->name, print_mac(mac, dev->dev_addr)); return 0; fail_and_cleanup: /* Something went wrong, undo whatever we did so far. */ /* Free register mappings if any. */ if (bp->gregs) of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); if (bp->creg) of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); if (bp->bregs) of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); if (bp->tregs) of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); if (bp->bmac_block) dma_free_coherent(&bp->bigmac_op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); /* This also frees the co-located 'dev->priv' */ free_netdev(dev); return -ENODEV; } /* QEC can be the parent of either QuadEthernet or a BigMAC. We want * the latter. */ static int __devinit bigmac_sbus_probe(struct of_device *op, const struct of_device_id *match) { struct device *parent = op->dev.parent; struct of_device *qec_op; qec_op = to_of_device(parent); return bigmac_ether_init(op, qec_op); } static int __devexit bigmac_sbus_remove(struct of_device *op) { struct bigmac *bp = dev_get_drvdata(&op->dev); struct device *parent = op->dev.parent; struct net_device *net_dev = bp->dev; struct of_device *qec_op; qec_op = to_of_device(parent); unregister_netdev(net_dev); of_iounmap(&qec_op->resource[0], bp->gregs, GLOB_REG_SIZE); of_iounmap(&op->resource[0], bp->creg, CREG_REG_SIZE); of_iounmap(&op->resource[1], bp->bregs, BMAC_REG_SIZE); of_iounmap(&op->resource[2], bp->tregs, TCVR_REG_SIZE); dma_free_coherent(&op->dev, PAGE_SIZE, bp->bmac_block, bp->bblock_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id bigmac_sbus_match[] = { { .name = "be", }, {}, }; MODULE_DEVICE_TABLE(of, bigmac_sbus_match); static struct of_platform_driver bigmac_sbus_driver = { .name = "sunbmac", .match_table = bigmac_sbus_match, .probe = bigmac_sbus_probe, .remove = __devexit_p(bigmac_sbus_remove), }; static int __init bigmac_init(void) { return of_register_driver(&bigmac_sbus_driver, &of_bus_type); } static void __exit bigmac_exit(void) { of_unregister_driver(&bigmac_sbus_driver); } module_init(bigmac_init); module_exit(bigmac_exit);
ya-mouse/openwrt-linux-aspeed
drivers/net/sunbmac.c
C
gpl-2.0
34,041
<?php /************************************************************************************* * freeswitch.php * -------- * Author: James Rose (james.gs@stubbornroses.com) * Copyright: (c) 2006 Christian Lescuyer http://xtian.goelette.info * Release Version: 1.0.8.12 * Date Started: 2011/11/18 * * FreeSWITCH language file for GeSHi. * * This file is based on robots.php * * 2011/11/18 (1.0.0) * - First Release * ************************************************************************************* * * This file is part of GeSHi. * * GeSHi is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GeSHi is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GeSHi; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * ************************************************************************************/ $language_data = array ( 'LANG_NAME' => 'FreeSWITCH', 'COMMENT_SINGLE' => array(1 => '#'), 'COMMENT_MULTI' => array(), 'COMMENT_REGEXP' => array(1 => "/^Comment:.*?$/m"), 'CASE_KEYWORDS' => GESHI_CAPS_NO_CHANGE, 'QUOTEMARKS' => array(), 'ESCAPE_CHAR' => '', 'KEYWORDS' => array( // 1 => array( // 'Disallow', 'Request-rate', 'Robot-version', // 'Sitemap', 'User-agent', 'Visit-time' // ) ), 'SYMBOLS' => array( // ':' ), 'CASE_SENSITIVE' => array( GESHI_COMMENTS => false ), //order is important. regexes will overwrite most things.... 'STYLES' => array( 'KEYWORDS' => array( // 1 => 'color: #FF0000; font-weight: bold;',//red ), 'COMMENTS' => array( 1 => 'color: #808080; font-style: italic;', ), 'ESCAPE_CHAR' => array( 0 => 'color: #000099; font-weight: bold;' ), 'BRACKETS' => array( // 0 => 'color: #66cc66;' ), 'STRINGS' => array( // 0 => 'color: #ff0000;' ), 'NUMBERS' => array( // 0 => 'color: #cc66cc;' ), 'METHODS' => array( ), 'SYMBOLS' => array( // 0 => 'color: #66cc66;' ), 'REGEXPS' => array( 0 => 'color: firebrick; font-weight: bold;', 1 => 'color: cornflowerblue; font-weight: bold;', 2 => 'color: goldenrod; font-weight: bold;', 3 => 'color: green; font-weight: bold;', 4 => 'color: dimgrey; font-style: italic;', 5 => 'color: green; font-weight: bold;', 6 => 'color: firebrick; font-weight: bold;', 7 => 'color: indigo; font-weight: italic;' ), 'SCRIPT' => array( ) ), 'URLS' => array( // 1 => 'http://www.robotstxt.org/wc/norobots.html' ), 'OOLANG' => false, 'OBJECT_SPLITTERS' => array( ), 'REGEXPS' => array( 0 => array( GESHI_SEARCH => '(^.*ERROR.*)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => 'im', GESHI_BEFORE => '', GESHI_AFTER => '' ), 1 => array( GESHI_SEARCH => '(^.*NOTICE.*)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => 'im', GESHI_BEFORE => '', GESHI_AFTER => '' ), 2 => array( GESHI_SEARCH => '(^.*DEBUG.*)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => 'm', GESHI_BEFORE => '', GESHI_AFTER => '' ), 3 => array( GESHI_SEARCH => '(^.*INFO.*|.*info\(.*|^Channel.*|^Caller.*|^variable.*)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => 'm', GESHI_BEFORE => '', GESHI_AFTER => '' ), 4 => array( GESHI_SEARCH => '(^Dialplan.*)', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => 'im', GESHI_BEFORE => '', GESHI_AFTER => '' ), 5 => array( GESHI_SEARCH => '(Regex\ \(PASS\))', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => '', GESHI_BEFORE => '', GESHI_AFTER => '' ), 6 => array( GESHI_SEARCH => '(Regex\ \(FAIL\))', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => '', GESHI_BEFORE => '', GESHI_AFTER => '' ), 7 => array( GESHI_SEARCH => '(\d{7,15})', GESHI_REPLACE => '\\1', GESHI_MODIFIERS => '', GESHI_BEFORE => '', GESHI_AFTER => '' ) ), 'STRICT_MODE_APPLIES' => GESHI_NEVER, 'SCRIPT_DELIMITERS' => array( ), 'HIGHLIGHT_STRICT_BLOCK' => array( ) );
brandonphuong/mediawiki
extensions/SyntaxHighlight_GeSHi/geshi/geshi/freeswitch.php
PHP
gpl-2.0
5,348
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is the Netscape Portable Runtime (NSPR). * * The Initial Developer of the Original Code is * Netscape Communications Corporation. * Portions created by the Initial Developer are Copyright (C) 1998-2000 * the Initial Developer. All Rights Reserved. * * Contributor(s): * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ /*********************************************************************** ** ** Name: op_2long.c ** ** Description: Test Program to verify the PR_NAME_TOO_LONG_ERROR ** ** Modification History: ** 03-June-97 AGarcia- Initial version ***********************************************************************/ /*********************************************************************** ** Includes ***********************************************************************/ /* Used to get the command line option */ #include "prinit.h" #include "prmem.h" #include "prio.h" #include "prerror.h" #include <stdio.h> #include "plerror.h" #include "plgetopt.h" #ifdef XP_MAC #include "prlog.h" #define printf PR_LogPrint #else #endif static PRFileDesc *t1; PRIntn error_code; /* * should exceed any system's maximum file name length * Note: was set at 4096. This is legal on some unix (Linux 2.1+) platforms. * */ #define TOO_LONG 5000 int main(int argc, char **argv) { char nameTooLong[TOO_LONG]; int i; /* Generate a really long pathname */ for (i = 0; i < TOO_LONG - 1; i++) { if (i % 10 == 0) { nameTooLong[i] = '/'; } else { nameTooLong[i] = 'a'; } } nameTooLong[TOO_LONG - 1] = 0; #ifdef XP_MAC SetupMacPrintfLog("pr_open_re.log"); #endif PR_STDIO_INIT(); t1 = PR_Open(nameTooLong, PR_RDWR, 0666); if (t1 == NULL) { if (PR_GetError() == PR_NAME_TOO_LONG_ERROR) { PL_PrintError("error code is"); printf ("PASS\n"); return 0; } else { PL_PrintError("error code is"); printf ("FAIL\n"); return 1; } } else { printf ("Test passed\n"); return 0; } }
ryenus/vbox
src/libs/xpcom18a4/nsprpub/pr/tests/op_2long.c
C
gpl-2.0
3,443
//compatible with prototype if(typeof Prototype != 'undefined' && (typeof $ != 'undefined')) { $prototype = $; } // Tooltip Object var Tooltip = Class.create(); Tooltip.prototype = { initialize: function(el, options) { this.el = $prototype(el); this.initialized = false; this.setOptions(options); // Event handlers this.showEvent = this.show.bindAsEventListener(this); this.hideEvent = this.hide.bindAsEventListener(this); this.updateEvent = this.update.bindAsEventListener(this); Event.observe(this.el, "mouseover", this.showEvent ); Event.observe(this.el, "mouseout", this.hideEvent ); // Removing title from DOM element to avoid showing it this.content = this.el.title; this.el.title = ""; // If descendant elements has 'alt' attribute defined, clear it this.el.descendants().each(function(el){ if(Element.readAttribute(el, 'alt')) el.alt = ""; }); }, setOptions: function(options) { this.options = { backgroundColor: '#999', // Default background color borderColor: '#666', // Default border color textColor: '', // Default text color (use CSS value) textShadowColor: '', // Default text shadow color (use CSS value) maxWidth: 250, // Default tooltip width align: "left", // Default align delay: 250, // Default delay before tooltip appears in ms mouseFollow: true, // Tooltips follows the mouse moving opacity: .75, // Default tooltips opacity appearDuration: .25, // Default appear duration in sec hideDuration: .25 // Default disappear duration in sec }; Object.extend(this.options, options || {}); }, show: function(e) { this.xCord = Event.pointerX(e); this.yCord = Event.pointerY(e); if(!this.initialized) this.timeout = window.setTimeout(this.appear.bind(this), this.options.delay); }, hide: function(e) { if(this.initialized) { //this.appearingFX.cancel(); if(this.options.mouseFollow) Event.stopObserving(this.el, "mousemove", this.updateEvent); this.tooltip.hide(); //new Effect.Fade(this.tooltip, {duration: this.options.hideDuration, afterFinish: function() { Element.remove(this.tooltip) }.bind(this) }); } this._clearTimeout(this.timeout); this.initialized = false; }, update: function(e){ this.xCord = Event.pointerX(e); this.yCord = Event.pointerY(e); this.setup(); }, appear: function() { // Building tooltip container /* this.tooltip = Builder.node("div", {className: "tooltip", style: "display: none;" }, [ Builder.node("div", {className:"xtop"}, [ Builder.node("div", {className:"xb1", style:"background-color:" + this.options.borderColor + ";"}), Builder.node("div", {className:"xb2", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ";"}), Builder.node("div", {className:"xb3", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ";"}), Builder.node("div", {className:"xb4", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ";"}) ]), Builder.node("div", {className: "xboxcontent", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ((this.options.textColor != '') ? "; color:" + this.options.textColor : "") + ((this.options.textShadowColor != '') ? "; text-shadow:2px 2px 0" + this.options.textShadowColor + ";" : "")}, this.content), Builder.node("div", {className:"xbottom"}, [ Builder.node("div", {className:"xb4", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ";"}), Builder.node("div", {className:"xb3", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ";"}), Builder.node("div", {className:"xb2", style: "background-color:" + this.options.backgroundColor + "; border-color:" + this.options.borderColor + ";"}), Builder.node("div", {className:"xb1", style:"background-color:" + this.options.borderColor + ";"}) ]), ]); */ var tooltipString = ''+ '<div class="tooltip" style="display: none;">' + '<div class="xtop">' + '<div class="xb1" style="background-color: rgb(204, 153, 102);"></div>' + '<div class="xb2" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153);"></div>' + '<div class="xb3" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153);"></div>' + '<div class="xb4" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153);"></div>' + '</div>' + '<div class="xboxcontent" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153); color: rgb(0, 0, 0); text-shadow: 2px 2px 0pt rgb(255, 255, 255);">'+ this.content + '</div>' + '<div class="xbottom">' + '<div class="xb4" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153);"></div>' + '<div class="xb3" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153);"></div>' + '<div class="xb2" style="border-color: rgb(204, 153, 102); background-color: rgb(255, 204, 153);"></div>' + '<div class="xb1" style="background-color: rgb(204, 153, 102);"></div>' + '</div>' + '</div>' new Insertion.Before(document.body.childNodes[0], tooltipString) this.tooltip = document.body.childNodes[0]; //document.body.insertBefore(this.tooltip, document.body.childNodes[0]); Element.extend(this.tooltip); // IE needs element to be manually extended this.options.width = this.tooltip.getWidth(); this.tooltip.style.width = this.options.width + 'px'; // IE7 needs width to be defined this.setup(); if(this.options.mouseFollow) Event.observe(this.el, "mousemove", this.updateEvent); this.initialized = true; this.tooltip.show(); //this.appearingFX = new Effect.Appear(this.tooltip, {duration: this.options.appearDuration, to: this.options.opacity }); }, setup: function(){ // If content width is more then allowed max width, set width to max if(this.options.width > this.options.maxWidth) { this.options.width = this.options.maxWidth; this.tooltip.style.width = this.options.width + 'px'; } // Tooltip doesn't fit the current document dimensions if(this.xCord + this.options.width >= Element.getWidth(document.body)) { this.options.align = "right"; this.xCord = this.xCord - this.options.width + 20; } this.tooltip.style.left = this.xCord - 7 + "px"; this.tooltip.style.top = this.yCord + 12 + "px"; }, stop: function () { this.hide(); Event.stopObserving(this.el, "mouseover", this.showEvent); Event.stopObserving(this.el, "mouseout", this.hideEvent); Event.stopObserving(this.el, "mousemove", this.updateEvent); }, _clearTimeout: function(timer) { clearTimeout(timer); clearInterval(timer); return null; } };
diwayou/diamond-all
diamond-server/src/main/webapp/js/tooltips.js
JavaScript
gpl-2.0
6,949
<?php /** * Slider Revolution * * @package Essential_Grid * @author ThemePunch <info@themepunch.com> * @link http://www.revolution.themepunch.com/ * @copyright 2015 ThemePunch */ /** * @package RevSliderExtension * @author ThemePunch <info@themepunch.com> */ if( !defined( 'ABSPATH') ) exit(); class RevSliderExtension { public function __construct() { $this->init_essential_grid_extensions(); } /*************************** * Setup part for Revslider inclusion into Essential Grid ***************************/ /** * Do all initializations for RevSlider integration */ public function init_essential_grid_extensions(){ if(!class_exists('Essential_Grid')) return false; //only add if Essential Grid is installed add_filter('essgrid_set_ajax_source_order', array($this, 'add_slider_to_eg_ajax')); add_filter('essgrid_handle_ajax_content', array($this, 'set_slider_values_to_eg_ajax'), 10, 4); add_action('essgrid_add_meta_options', array($this, 'add_eg_additional_meta_field')); add_action('essgrid_save_meta_options', array($this, 'save_eg_additional_meta_field'), 10, 2); //only do on frontend add_action('admin_head', array($this, 'add_eg_additional_inline_javascript')); add_action('wp_head', array($this, 'add_eg_additional_inline_javascript')); } /** * Add Slider to the List of choosable media */ public function add_slider_to_eg_ajax($media){ $media['revslider'] = array('name' => __('Slider Revolution', REVSLIDER_TEXTDOMAIN), 'type' => 'ccw'); return $media; } /** * Add Slider to the List of choosable media */ public function set_slider_values_to_eg_ajax($handle, $media_sources, $post, $grid_id){ if($handle !== 'revslider') return false; $slider_source = ''; $values = get_post_custom($post['ID']); if(isset($values['eg_sources_revslider'])){ if(isset($values['eg_sources_revslider'][0])) $slider_source = (isset($values['eg_sources_revslider'][0])) ? $values['eg_sources_revslider'][0] : ''; else $slider_source = (isset($values['eg_sources_revslider'])) ? $values['eg_sources_revslider'] : ''; } if($slider_source === ''){ return false; }else{ return ' data-ajaxtype="'.$handle.'" data-ajaxsource="'.$slider_source.'"'; } } /** * Adds custom meta field into the essential grid meta box for post/pages */ public function add_eg_additional_meta_field($values){ $sld = new RevSlider(); $sliders = $sld->getArrSliders(); $shortcodes = array(); if(!empty($sliders)){ $first = true; foreach($sliders as $slider){ $name = $slider->getParam('shortcode','false'); if($name != 'false'){ $shortcodes[$slider->getID()] = $name; $first = false; } } } $selected_slider = (isset($values['eg_sources_revslider'])) ? $values['eg_sources_revslider'] : ''; if($selected_slider == '') $selected_slider[0] = ''; ?> <p> <strong style="font-size:14px"><?php _e('Choose Revolution Slider', REVSLIDER_TEXTDOMAIN); ?></strong> </p> <p> <select name="eg_sources_revslider" id="eg_sources_revslider"> <option value=""<?php selected($selected_slider[0], ''); ?>><?php _e('--- Choose Slider ---', REVSLIDER_TEXTDOMAIN); ?></option> <?php if(!empty($shortcodes)){ foreach($shortcodes as $id => $name){ ?> <option value="<?php echo $id; ?>"<?php selected($selected_slider[0], $id); ?>><?php echo $name; ?></option> <?php } } ?> </select> </p> <?php } /** * Adds custom meta field into the essential grid meta box for post/pages */ public function save_eg_additional_meta_field($metas, $post_id){ if(isset($metas['eg_sources_revslider'])) update_post_meta($post_id, 'eg_sources_revslider', $metas['eg_sources_revslider']); } /** * Adds needed javascript to the DOM */ public function add_eg_additional_inline_javascript(){ ?> <script type="text/javascript"> jQuery(document).ready(function() { // CUSTOM AJAX CONTENT LOADING FUNCTION var ajaxRevslider = function(obj) { // obj.type : Post Type // obj.id : ID of Content to Load // obj.aspectratio : The Aspect Ratio of the Container / Media // obj.selector : The Container Selector where the Content of Ajax will be injected. It is done via the Essential Grid on Return of Content var content = ""; data = {}; data.action = 'revslider_ajax_call_front'; data.client_action = 'get_slider_html'; data.token = '<?php echo wp_create_nonce("RevSlider_Front"); ?>'; data.type = obj.type; data.id = obj.id; data.aspectratio = obj.aspectratio; // SYNC AJAX REQUEST jQuery.ajax({ type:"post", url:"<?php echo admin_url('admin-ajax.php'); ?>", dataType: 'json', data:data, async:false, success: function(ret, textStatus, XMLHttpRequest) { if(ret.success == true) content = ret.data; }, error: function(e) { console.log(e); } }); // FIRST RETURN THE CONTENT WHEN IT IS LOADED !! return content; }; // CUSTOM AJAX FUNCTION TO REMOVE THE SLIDER var ajaxRemoveRevslider = function(obj) { return jQuery(obj.selector+" .rev_slider").revkill(); }; // EXTEND THE AJAX CONTENT LOADING TYPES WITH TYPE AND FUNCTION var extendessential = setInterval(function() { if (jQuery.fn.tpessential != undefined) { clearInterval(extendessential); if(typeof(jQuery.fn.tpessential.defaults) !== 'undefined') { jQuery.fn.tpessential.defaults.ajaxTypes.push({type:"revslider",func:ajaxRevslider,killfunc:ajaxRemoveRevslider,openAnimationSpeed:0.3}); // type: Name of the Post to load via Ajax into the Essential Grid Ajax Container // func: the Function Name which is Called once the Item with the Post Type has been clicked // killfunc: function to kill in case the Ajax Window going to be removed (before Remove function ! // openAnimationSpeed: how quick the Ajax Content window should be animated (default is 0.3) } } },30); }); </script> <?php } } ?>
abdullaniyas/cleansweep
wp-content/plugins/revslider/includes/extension.class.php
PHP
gpl-2.0
6,224
/* * net/sched/pedit.c Generic packet editor * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Jamal Hadi Salim (2002-4) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/module.h> #include <linux/init.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <linux/tc_act/tc_pedit.h> #include <net/tc_act/tc_pedit.h> #define PEDIT_TAB_MASK 15 static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1]; static u32 pedit_idx_gen; static DEFINE_RWLOCK(pedit_lock); static struct tcf_hashinfo pedit_hash_info = { .htab = tcf_pedit_ht, .hmask = PEDIT_TAB_MASK, .lock = &pedit_lock, }; static int tcf_pedit_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, int ovr, int bind) { struct rtattr *tb[TCA_PEDIT_MAX]; struct tc_pedit *parm; int ret = 0; struct tcf_pedit *p; struct tcf_common *pc; struct tc_pedit_key *keys = NULL; int ksize; if (rta == NULL || rtattr_parse_nested(tb, TCA_PEDIT_MAX, rta) < 0) return -EINVAL; if (tb[TCA_PEDIT_PARMS - 1] == NULL || RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm)) return -EINVAL; parm = RTA_DATA(tb[TCA_PEDIT_PARMS-1]); ksize = parm->nkeys * sizeof(struct tc_pedit_key); if (RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm) + ksize) return -EINVAL; pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info); if (!pc) { if (!parm->nkeys) return -EINVAL; pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, &pedit_idx_gen, &pedit_hash_info); if (unlikely(!pc)) return -ENOMEM; p = to_pedit(pc); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { kfree(pc); return -ENOMEM; } ret = ACT_P_CREATED; } else { p = to_pedit(pc); if (!ovr) { tcf_hash_release(pc, bind, &pedit_hash_info); return -EEXIST; } if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) return -ENOMEM; } } spin_lock_bh(&p->tcf_lock); p->tcfp_flags = parm->flags; p->tcf_action = parm->action; if (keys) { kfree(p->tcfp_keys); p->tcfp_keys = keys; p->tcfp_nkeys = parm->nkeys; } memcpy(p->tcfp_keys, parm->keys, ksize); spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &pedit_hash_info); return ret; } static int tcf_pedit_cleanup(struct tc_action *a, int bind) { struct tcf_pedit *p = a->priv; if (p) { struct tc_pedit_key *keys = p->tcfp_keys; if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) { kfree(keys); return 1; } } return 0; } static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_pedit *p = a->priv; int i, munged = 0; u8 *pptr; if (!(skb->tc_verd & TC_OK2MUNGE)) { /* should we set skb->cloned? */ if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { return p->tcf_action; } } pptr = skb_network_header(skb); spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; if (p->tcfp_nkeys > 0) { struct tc_pedit_key *tkey = p->tcfp_keys; for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { u32 *ptr; int offset = tkey->off; if (tkey->offmask) { if (skb->len > tkey->at) { char *j = pptr + tkey->at; offset += ((*j & tkey->offmask) >> tkey->shift); } else { goto bad; } } if (offset % 4) { printk("offset must be on 32 bit boundaries\n"); goto bad; } if (offset > 0 && offset > skb->len) { printk("offset %d cant exceed pkt length %d\n", offset, skb->len); goto bad; } ptr = (u32 *)(pptr+offset); /* just do it, baby */ *ptr = ((*ptr & tkey->mask) ^ tkey->val); munged++; } if (munged) skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); goto done; } else { printk("pedit BUG: index %d\n", p->tcf_index); } bad: p->tcf_qstats.overlimits++; done: p->tcf_bstats.bytes += skb->len; p->tcf_bstats.packets++; spin_unlock(&p->tcf_lock); return p->tcf_action; } static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_pedit *p = a->priv; struct tc_pedit *opt; struct tcf_t t; int s; s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key); /* netlink spinlocks held above us - must use ATOMIC */ opt = kzalloc(s, GFP_ATOMIC); if (unlikely(!opt)) return -ENOBUFS; memcpy(opt->keys, p->tcfp_keys, p->tcfp_nkeys * sizeof(struct tc_pedit_key)); opt->index = p->tcf_index; opt->nkeys = p->tcfp_nkeys; opt->flags = p->tcfp_flags; opt->action = p->tcf_action; opt->refcnt = p->tcf_refcnt - ref; opt->bindcnt = p->tcf_bindcnt - bind; RTA_PUT(skb, TCA_PEDIT_PARMS, s, opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); kfree(opt); return skb->len; rtattr_failure: nlmsg_trim(skb, b); kfree(opt); return -1; } static struct tc_action_ops act_pedit_ops = { .kind = "pedit", .hinfo = &pedit_hash_info, .type = TCA_ACT_PEDIT, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_pedit, .dump = tcf_pedit_dump, .cleanup = tcf_pedit_cleanup, .lookup = tcf_hash_search, .init = tcf_pedit_init, .walk = tcf_generic_walker }; MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); MODULE_DESCRIPTION("Generic Packet Editor actions"); MODULE_LICENSE("GPL"); static int __init pedit_init_module(void) { return tcf_register_action(&act_pedit_ops); } static void __exit pedit_cleanup_module(void) { tcf_unregister_action(&act_pedit_ops); } module_init(pedit_init_module); module_exit(pedit_cleanup_module);
pengdonglin137/linux-2.6.24
net/sched/act_pedit.c
C
gpl-2.0
6,025
<?php namespace Drupal\Core\Form; use Drupal\Component\Utility\NestedArray; use Drupal\Component\Utility\Unicode; use Drupal\Core\Access\CsrfTokenGenerator; use Drupal\Core\Render\Element; use Drupal\Core\StringTranslation\StringTranslationTrait; use Drupal\Core\StringTranslation\TranslationInterface; use Psr\Log\LoggerInterface; use Symfony\Component\HttpFoundation\RequestStack; /** * Provides validation of form submissions. */ class FormValidator implements FormValidatorInterface { use StringTranslationTrait; /** * The CSRF token generator to validate the form token. * * @var \Drupal\Core\Access\CsrfTokenGenerator */ protected $csrfToken; /** * The request stack. * * @var \Symfony\Component\HttpFoundation\RequestStack */ protected $requestStack; /** * A logger instance. * * @var \Psr\Log\LoggerInterface */ protected $logger; /** * The form error handler. * * @var \Drupal\Core\Form\FormErrorHandlerInterface */ protected $formErrorHandler; /** * Constructs a new FormValidator. * * @param \Symfony\Component\HttpFoundation\RequestStack $request_stack * The request stack. * @param \Drupal\Core\StringTranslation\TranslationInterface $string_translation * The string translation service. * @param \Drupal\Core\Access\CsrfTokenGenerator $csrf_token * The CSRF token generator. * @param \Psr\Log\LoggerInterface $logger * A logger instance. * @param \Drupal\Core\Form\FormErrorHandlerInterface $form_error_handler * The form error handler. */ public function __construct(RequestStack $request_stack, TranslationInterface $string_translation, CsrfTokenGenerator $csrf_token, LoggerInterface $logger, FormErrorHandlerInterface $form_error_handler) { $this->requestStack = $request_stack; $this->stringTranslation = $string_translation; $this->csrfToken = $csrf_token; $this->logger = $logger; $this->formErrorHandler = $form_error_handler; } /** * {@inheritdoc} */ public function executeValidateHandlers(&$form, FormStateInterface &$form_state) { // If there was a button pressed, use its handlers. $handlers = $form_state->getValidateHandlers(); // Otherwise, check for a form-level handler. if (!$handlers && isset($form['#validate'])) { $handlers = $form['#validate']; } foreach ($handlers as $callback) { call_user_func_array($form_state->prepareCallback($callback), array(&$form, &$form_state)); } } /** * {@inheritdoc} */ public function validateForm($form_id, &$form, FormStateInterface &$form_state) { // If this form is flagged to always validate, ensure that previous runs of // validation are ignored. if ($form_state->isValidationEnforced()) { $form_state->setValidationComplete(FALSE); } // If this form has completed validation, do not validate again. if ($form_state->isValidationComplete()) { return; } // If the session token was set by self::prepareForm(), ensure that it // matches the current user's session. This is duplicate to code in // FormBuilder::doBuildForm() but left to protect any custom form handling // code. if (isset($form['#token'])) { if (!$this->csrfToken->validate($form_state->getValue('form_token'), $form['#token']) || $form_state->hasInvalidToken()) { $this->setInvalidTokenError($form_state); // Stop here and don't run any further validation handlers, because they // could invoke non-safe operations which opens the door for CSRF // vulnerabilities. $this->finalizeValidation($form, $form_state, $form_id); return; } } // Recursively validate each form element. $this->doValidateForm($form, $form_state, $form_id); $this->finalizeValidation($form, $form_state, $form_id); $this->handleErrorsWithLimitedValidation($form, $form_state, $form_id); } /** * {@inheritdoc} */ public function setInvalidTokenError(FormStateInterface $form_state) { $url = $this->requestStack->getCurrentRequest()->getRequestUri(); // Setting this error will cause the form to fail validation. $form_state->setErrorByName('form_token', $this->t('The form has become outdated. Copy any unsaved work in the form below and then <a href=":link">reload this page</a>.', array(':link' => $url))); } /** * Handles validation errors for forms with limited validation. * * If validation errors are limited then remove any non validated form values, * so that only values that passed validation are left for submit callbacks. * * @param array $form * An associative array containing the structure of the form. * @param \Drupal\Core\Form\FormStateInterface $form_state * The current state of the form. * @param string $form_id * The unique string identifying the form. */ protected function handleErrorsWithLimitedValidation(&$form, FormStateInterface &$form_state, $form_id) { // If validation errors are limited then remove any non validated form values, // so that only values that passed validation are left for submit callbacks. $triggering_element = $form_state->getTriggeringElement(); if (isset($triggering_element['#limit_validation_errors']) && $triggering_element['#limit_validation_errors'] !== FALSE) { $values = array(); foreach ($triggering_element['#limit_validation_errors'] as $section) { // If the section exists within $form_state->getValues(), even if the // value is NULL, copy it to $values. $section_exists = NULL; $value = NestedArray::getValue($form_state->getValues(), $section, $section_exists); if ($section_exists) { NestedArray::setValue($values, $section, $value); } } // A button's #value does not require validation, so for convenience we // allow the value of the clicked button to be retained in its normal // $form_state->getValues() locations, even if these locations are not // included in #limit_validation_errors. if (!empty($triggering_element['#is_button'])) { $button_value = $triggering_element['#value']; // Like all input controls, the button value may be in the location // dictated by #parents. If it is, copy it to $values, but do not // override what may already be in $values. $parents = $triggering_element['#parents']; if (!NestedArray::keyExists($values, $parents) && NestedArray::getValue($form_state->getValues(), $parents) === $button_value) { NestedArray::setValue($values, $parents, $button_value); } // Additionally, self::doBuildForm() places the button value in // $form_state->getValue(BUTTON_NAME). If it's still there, after // validation handlers have run, copy it to $values, but do not override // what may already be in $values. $name = $triggering_element['#name']; if (!isset($values[$name]) && $form_state->getValue($name) === $button_value) { $values[$name] = $button_value; } } $form_state->setValues($values); } } /** * Finalizes validation. * * @param array $form * An associative array containing the structure of the form. * @param \Drupal\Core\Form\FormStateInterface $form_state * The current state of the form. * @param string $form_id * The unique string identifying the form. */ protected function finalizeValidation(&$form, FormStateInterface &$form_state, $form_id) { // Delegate handling of form errors to a service. $this->formErrorHandler->handleFormErrors($form, $form_state); // Mark this form as validated. $form_state->setValidationComplete(); } /** * Performs validation on form elements. * * First ensures required fields are completed, #maxlength is not exceeded, * and selected options were in the list of options given to the user. Then * calls user-defined validators. * * @param $elements * An associative array containing the structure of the form. * @param \Drupal\Core\Form\FormStateInterface $form_state * The current state of the form. The current user-submitted data is stored * in $form_state->getValues(), though form validation functions are passed * an explicit copy of the values for the sake of simplicity. Validation * handlers can also $form_state to pass information on to submit handlers. * For example: * $form_state->set('data_for_submission', $data); * This technique is useful when validation requires file parsing, * web service requests, or other expensive requests that should * not be repeated in the submission step. * @param $form_id * A unique string identifying the form for validation, submission, * theming, and hook_form_alter functions. */ protected function doValidateForm(&$elements, FormStateInterface &$form_state, $form_id = NULL) { // Recurse through all children. foreach (Element::children($elements) as $key) { if (isset($elements[$key]) && $elements[$key]) { $this->doValidateForm($elements[$key], $form_state); } } // Validate the current input. if (!isset($elements['#validated']) || !$elements['#validated']) { // The following errors are always shown. if (isset($elements['#needs_validation'])) { $this->performRequiredValidation($elements, $form_state); } // Set up the limited validation for errors. $form_state->setLimitValidationErrors($this->determineLimitValidationErrors($form_state)); // Make sure a value is passed when the field is required. if (isset($elements['#needs_validation']) && $elements['#required']) { // A simple call to empty() will not cut it here as some fields, like // checkboxes, can return a valid value of '0'. Instead, check the // length if it's a string, and the item count if it's an array. // An unchecked checkbox has a #value of integer 0, different than // string '0', which could be a valid value. $is_empty_multiple = (!count($elements['#value'])); $is_empty_string = (is_string($elements['#value']) && Unicode::strlen(trim($elements['#value'])) == 0); $is_empty_value = ($elements['#value'] === 0); if ($is_empty_multiple || $is_empty_string || $is_empty_value) { // Flag this element as #required_but_empty to allow #element_validate // handlers to set a custom required error message, but without having // to re-implement the complex logic to figure out whether the field // value is empty. $elements['#required_but_empty'] = TRUE; } } // Call user-defined form level validators. if (isset($form_id)) { $this->executeValidateHandlers($elements, $form_state); } // Call any element-specific validators. These must act on the element // #value data. elseif (isset($elements['#element_validate'])) { foreach ($elements['#element_validate'] as $callback) { $complete_form = &$form_state->getCompleteForm(); call_user_func_array($form_state->prepareCallback($callback), array(&$elements, &$form_state, &$complete_form)); } } // Ensure that a #required form error is thrown, regardless of whether // #element_validate handlers changed any properties. If $is_empty_value // is defined, then above #required validation code ran, so the other // variables are also known to be defined and we can test them again. if (isset($is_empty_value) && ($is_empty_multiple || $is_empty_string || $is_empty_value)) { if (isset($elements['#required_error'])) { $form_state->setError($elements, $elements['#required_error']); } // A #title is not mandatory for form elements, but without it we cannot // set a form error message. So when a visible title is undesirable, // form constructors are encouraged to set #title anyway, and then set // #title_display to 'invisible'. This improves accessibility. elseif (isset($elements['#title'])) { $form_state->setError($elements, $this->t('@name field is required.', array('@name' => $elements['#title']))); } else { $form_state->setError($elements); } } $elements['#validated'] = TRUE; } // Done validating this element, so turn off error suppression. // self::doValidateForm() turns it on again when starting on the next // element, if it's still appropriate to do so. $form_state->setLimitValidationErrors(NULL); } /** * Performs validation of elements that are not subject to limited validation. * * @param array $elements * An associative array containing the structure of the form. * @param \Drupal\Core\Form\FormStateInterface $form_state * The current state of the form. The current user-submitted data is stored * in $form_state->getValues(), though form validation functions are passed * an explicit copy of the values for the sake of simplicity. Validation * handlers can also $form_state to pass information on to submit handlers. * For example: * $form_state->set('data_for_submission', $data); * This technique is useful when validation requires file parsing, * web service requests, or other expensive requests that should * not be repeated in the submission step. */ protected function performRequiredValidation(&$elements, FormStateInterface &$form_state) { // Verify that the value is not longer than #maxlength. if (isset($elements['#maxlength']) && Unicode::strlen($elements['#value']) > $elements['#maxlength']) { $form_state->setError($elements, $this->t('@name cannot be longer than %max characters but is currently %length characters long.', array('@name' => empty($elements['#title']) ? $elements['#parents'][0] : $elements['#title'], '%max' => $elements['#maxlength'], '%length' => Unicode::strlen($elements['#value'])))); } if (isset($elements['#options']) && isset($elements['#value'])) { if ($elements['#type'] == 'select') { $options = OptGroup::flattenOptions($elements['#options']); } else { $options = $elements['#options']; } if (is_array($elements['#value'])) { $value = in_array($elements['#type'], array('checkboxes', 'tableselect')) ? array_keys($elements['#value']) : $elements['#value']; foreach ($value as $v) { if (!isset($options[$v])) { $form_state->setError($elements, $this->t('An illegal choice has been detected. Please contact the site administrator.')); $this->logger->error('Illegal choice %choice in %name element.', array('%choice' => $v, '%name' => empty($elements['#title']) ? $elements['#parents'][0] : $elements['#title'])); } } } // Non-multiple select fields always have a value in HTML. If the user // does not change the form, it will be the value of the first option. // Because of this, form validation for the field will almost always // pass, even if the user did not select anything. To work around this // browser behavior, required select fields without a #default_value // get an additional, first empty option. In case the submitted value // is identical to the empty option's value, we reset the element's // value to NULL to trigger the regular #required handling below. // @see \Drupal\Core\Render\Element\Select::processSelect() elseif ($elements['#type'] == 'select' && !$elements['#multiple'] && $elements['#required'] && !isset($elements['#default_value']) && $elements['#value'] === $elements['#empty_value']) { $elements['#value'] = NULL; $form_state->setValueForElement($elements, NULL); } elseif (!isset($options[$elements['#value']])) { $form_state->setError($elements, $this->t('An illegal choice has been detected. Please contact the site administrator.')); $this->logger->error('Illegal choice %choice in %name element.', array('%choice' => $elements['#value'], '%name' => empty($elements['#title']) ? $elements['#parents'][0] : $elements['#title'])); } } } /** * Determines if validation errors should be limited. * * @param \Drupal\Core\Form\FormStateInterface $form_state * The current state of the form. * * @return array|null */ protected function determineLimitValidationErrors(FormStateInterface &$form_state) { // While this element is being validated, it may be desired that some // calls to \Drupal\Core\Form\FormStateInterface::setErrorByName() be // suppressed and not result in a form error, so that a button that // implements low-risk functionality (such as "Previous" or "Add more") that // doesn't require all user input to be valid can still have its submit // handlers triggered. The triggering element's #limit_validation_errors // property contains the information for which errors are needed, and all // other errors are to be suppressed. The #limit_validation_errors property // is ignored if submit handlers will run, but the element doesn't have a // #submit property, because it's too large a security risk to have any // invalid user input when executing form-level submit handlers. $triggering_element = $form_state->getTriggeringElement(); if (isset($triggering_element['#limit_validation_errors']) && ($triggering_element['#limit_validation_errors'] !== FALSE) && !($form_state->isSubmitted() && !isset($triggering_element['#submit']))) { return $triggering_element['#limit_validation_errors']; } // If submit handlers won't run (due to the submission having been // triggered by an element whose #executes_submit_callback property isn't // TRUE), then it's safe to suppress all validation errors, and we do so // by default, which is particularly useful during an Ajax submission // triggered by a non-button. An element can override this default by // setting the #limit_validation_errors property. For button element // types, #limit_validation_errors defaults to FALSE, so that full // validation is their default behavior. elseif ($triggering_element && !isset($triggering_element['#limit_validation_errors']) && !$form_state->isSubmitted()) { return array(); } // As an extra security measure, explicitly turn off error suppression if // one of the above conditions wasn't met. Since this is also done at the // end of this function, doing it here is only to handle the rare edge // case where a validate handler invokes form processing of another form. else { return NULL; } } }
SeeyaSia/www
web/core/lib/Drupal/Core/Form/FormValidator.php
PHP
gpl-2.0
18,990
<?php namespace Drupal\menu_test\Controller; use Drupal\Core\Controller\ControllerBase; use Drupal\Core\Routing\RouteMatchInterface; use Drupal\Core\Theme\ThemeManagerInterface; use Drupal\Core\Theme\ThemeNegotiatorInterface; use Symfony\Component\DependencyInjection\ContainerInterface; /** * Controller routines for menu_test routes. */ class MenuTestController extends ControllerBase { /** * The theme manager. * * @var \Drupal\Core\Theme\ThemeManagerInterface */ protected $themeManager; /** * The theme negotiator. * * @var \Drupal\Core\Theme\ThemeNegotiatorInterface */ protected $themeNegotiator; /** * The active route match. * * @var \Drupal\Core\Routing\RouteMatchInterface */ protected $routeMatch; /** * Constructs the MenuTestController object. * * @param \Drupal\menu_test\Controller\ThemeManagerInterface $theme_manager * The theme manager. * @param \Drupal\menu_test\Controller\ThemeNegotiatorInterface $theme_negotiator * The theme negotiator. * @param \Drupal\menu_test\Controller\RouteMatchInterface $route_match * The current route match. */ public function __construct(ThemeManagerInterface $theme_manager, ThemeNegotiatorInterface $theme_negotiator, RouteMatchInterface $route_match) { $this->themeManager = $theme_manager; $this->themeNegotiator = $theme_negotiator; $this->routeMatch = $route_match; } /** * {@inheritdoc} */ public static function create(ContainerInterface $container) { return new static( $container->get('theme.manager'), $container->get('theme.negotiator'), $container->get('current_route_match') ); } /** * Some known placeholder content which can be used for testing. * * @return string * A string that can be used for comparison. */ public function menuTestCallback() { return ['#markup' => 'This is the menuTestCallback content.']; } /** * A title callback method for test routes. * * @param array $_title_arguments * Optional array from the route defaults. * @param string $_title * Optional _title string from the route defaults. * * @return string * The route title. */ public function titleCallback(array $_title_arguments = array(), $_title = '') { $_title_arguments += array('case_number' => '2', 'title' => $_title); return t($_title_arguments['title']) . ' - Case ' . $_title_arguments['case_number']; } /** * Page callback: Tests the theme negotiation functionality. * * @param bool $inherited * TRUE when the requested page is intended to inherit * the theme of its parent. * * @return string * A string describing the requested custom theme and actual * theme being used * for the current page request. */ public function themePage($inherited) { $theme_key = $this->themeManager->getActiveTheme()->getName(); // Now we check what the theme negotiator service returns. $active_theme = $this->themeNegotiator ->determineActiveTheme($this->routeMatch); $output = "Active theme: $active_theme. Actual theme: $theme_key."; if ($inherited) { $output .= ' Theme negotiation inheritance is being tested.'; } return ['#markup' => $output]; } /** * A title callback for XSS breadcrumb check. * * @return string */ public function breadcrumbTitleCallback() { return '<script>alert(123);</script>'; } }
PuddingNL/pudding_d8
core/modules/system/tests/modules/menu_test/src/Controller/MenuTestController.php
PHP
gpl-2.0
3,491
<?php namespace Drupal\Core\Entity\Query\Sql; use Drupal\Core\Database\Query\SelectInterface; use Drupal\Core\Entity\Query\ConditionAggregateBase; use Drupal\Core\Entity\Query\ConditionAggregateInterface; use Drupal\Core\Database\Query\Condition as SqlCondition; use Drupal\Core\Entity\Query\QueryBase; /** * Defines the aggregate condition for sql based storage. */ class ConditionAggregate extends ConditionAggregateBase { /** * {@inheritdoc} */ public function compile($conditionContainer) { // If this is not the top level condition group then the sql query is // added to the $conditionContainer object by this function itself. The // SQL query object is only necessary to pass to Query::addField() so it // can join tables as necessary. On the other hand, conditions need to be // added to the $conditionContainer object to keep grouping. $sql_query = ($conditionContainer instanceof SelectInterface) ? $conditionContainer : $conditionContainer->sqlQuery; $tables = new Tables($sql_query); foreach ($this->conditions as $condition) { if ($condition['field'] instanceof ConditionAggregateInterface) { $sql_condition = new SqlCondition($condition['field']->getConjunction()); // Add the SQL query to the object before calling this method again. $sql_condition->sqlQuery = $sql_query; $condition['field']->compile($sql_condition); $sql_query->condition($sql_condition); } else { $type = ((strtoupper($this->conjunction) == 'OR') || ($condition['operator'] == 'IS NULL')) ? 'LEFT' : 'INNER'; $field = $tables->addField($condition['field'], $type, $condition['langcode']); $condition_class = QueryBase::getClass($this->namespaces, 'Condition'); $condition_class::translateCondition($condition, $sql_query, $tables->isFieldCaseSensitive($condition['field'])); $function = $condition['function']; $placeholder = ':db_placeholder_' . $conditionContainer->nextPlaceholder(); $conditionContainer->having("$function($field) {$condition['operator']} $placeholder", array($placeholder => $condition['value'])); } } } /** * {@inheritdoc} */ public function exists($field, $function, $langcode = NULL) { return $this->condition($field, $function, NULL, 'IS NOT NULL', $langcode); } /** * {@inheritdoc} */ public function notExists($field, $function, $langcode = NULL) { return $this->condition($field, $function, NULL, 'IS NULL', $langcode); } }
ashishpagar/samvit
core/lib/Drupal/Core/Entity/Query/Sql/ConditionAggregate.php
PHP
gpl-2.0
2,546
/* * wiiuse * * Written By: * Michael Laforest < para > * Email: < thepara (--AT--) g m a i l [--DOT--] com > * * Copyright 2006-2007 * * This file is part of wiiuse. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * $Header$ * */ /** * @file * @brief Handles device I/O for *nix. */ #ifndef WIN32 #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <bluetooth/bluetooth.h> #include <bluetooth/hci.h> #include <bluetooth/hci_lib.h> #include <bluetooth/l2cap.h> #include "definitions.h" #include "wiiuse_internal.h" #include "io.h" static int wiiuse_connect_single(struct wiimote_t* wm, char* address); /** * @brief Find a wiimote or wiimotes. * * @param wm An array of wiimote_t structures. * @param max_wiimotes The number of wiimote structures in \a wm. * @param timeout The number of seconds before the search times out. * * @return The number of wiimotes found. * * @see wiimote_connect() * * This function will only look for wiimote devices. \n * When a device is found the address in the structures will be set. \n * You can then call wiimote_connect() to connect to the found \n * devices. */ int wiiuse_find(struct wiimote_t** wm, int max_wiimotes, int timeout) { int device_id; int device_sock; int found_devices; int found_wiimotes; /* reset all wiimote bluetooth device addresses */ for (found_wiimotes = 0; found_wiimotes < max_wiimotes; ++found_wiimotes) wm[found_wiimotes]->bdaddr = *BDADDR_ANY; found_wiimotes = 0; /* get the id of the first bluetooth device. */ device_id = hci_get_route(NULL); if (device_id < 0) { perror("hci_get_route"); return 0; } /* create a socket to the device */ device_sock = hci_open_dev(device_id); if (device_sock < 0) { perror("hci_open_dev"); return 0; } inquiry_info scan_info_arr[128]; inquiry_info* scan_info = scan_info_arr; memset(&scan_info_arr, 0, sizeof(scan_info_arr)); /* scan for bluetooth devices for 'timeout' seconds */ found_devices = hci_inquiry(device_id, timeout, 128, NULL, &scan_info, IREQ_CACHE_FLUSH); if (found_devices < 0) { perror("hci_inquiry"); return 0; } WIIUSE_INFO("Found %i bluetooth device(s).", found_devices); int i = 0; /* display discovered devices */ for (; (i < found_devices) && (found_wiimotes < max_wiimotes); ++i) { if ((scan_info[i].dev_class[0] == WM_DEV_CLASS_0) && (scan_info[i].dev_class[1] == WM_DEV_CLASS_1) && (scan_info[i].dev_class[2] == WM_DEV_CLASS_2)) { /* found a device */ ba2str(&scan_info[i].bdaddr, wm[found_wiimotes]->bdaddr_str); WIIUSE_INFO("Found wiimote (%s) [id %i].", wm[found_wiimotes]->bdaddr_str, wm[found_wiimotes]->unid); wm[found_wiimotes]->bdaddr = scan_info[i].bdaddr; WIIMOTE_ENABLE_STATE(wm[found_wiimotes], WIIMOTE_STATE_DEV_FOUND); ++found_wiimotes; } } close(device_sock); return found_wiimotes; } /** * @brief Connect to a wiimote or wiimotes once an address is known. * * @param wm An array of wiimote_t structures. * @param wiimotes The number of wiimote structures in \a wm. * * @return The number of wiimotes that successfully connected. * * @see wiiuse_find() * @see wiiuse_connect_single() * @see wiiuse_disconnect() * * Connect to a number of wiimotes when the address is already set * in the wiimote_t structures. These addresses are normally set * by the wiiuse_find() function, but can also be set manually. */ int wiiuse_connect(struct wiimote_t** wm, int wiimotes) { int connected = 0; int i = 0; for (; i < wiimotes; ++i) { if (!WIIMOTE_IS_SET(wm[i], WIIMOTE_STATE_DEV_FOUND)) /* if the device address is not set, skip it */ continue; if (wiiuse_connect_single(wm[i], NULL)) ++connected; } return connected; } /** * @brief Connect to a wiimote with a known address. * * @param wm Pointer to a wiimote_t structure. * @param address The address of the device to connect to. * If NULL, use the address in the struct set by wiiuse_find(). * * @return 1 on success, 0 on failure */ static int wiiuse_connect_single(struct wiimote_t* wm, char* address) { struct sockaddr_l2 addr; memset(&addr, 0, sizeof (addr)); if (!wm || WIIMOTE_IS_CONNECTED(wm)) return 0; addr.l2_family = AF_BLUETOOTH; if (address) /* use provided address */ str2ba(address, &addr.l2_bdaddr); else /* use address of device discovered */ addr.l2_bdaddr = wm->bdaddr; /* * OUTPUT CHANNEL */ wm->out_sock = socket(AF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP); if (wm->out_sock == -1) return 0; addr.l2_psm = htobs(WM_OUTPUT_CHANNEL); /* connect to wiimote */ if (connect(wm->out_sock, (struct sockaddr*)&addr, sizeof(addr)) < 0) { perror("connect() output sock"); return 0; } /* * INPUT CHANNEL */ wm->in_sock = socket(AF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP); if (wm->in_sock == -1) { close(wm->out_sock); wm->out_sock = -1; return 0; } addr.l2_psm = htobs(WM_INPUT_CHANNEL); /* connect to wiimote */ if (connect(wm->in_sock, (struct sockaddr*)&addr, sizeof(addr)) < 0) { perror("connect() interrupt sock"); close(wm->out_sock); wm->out_sock = -1; return 0; } WIIUSE_INFO("Connected to wiimote [id %i].", wm->unid); /* do the handshake */ WIIMOTE_ENABLE_STATE(wm, WIIMOTE_STATE_CONNECTED); wiiuse_handshake(wm, NULL, 0); wiiuse_set_report_type(wm); return 1; } /** * @brief Disconnect a wiimote. * * @param wm Pointer to a wiimote_t structure. * * @see wiiuse_connect() * * Note that this will not free the wiimote structure. */ void wiiuse_disconnect(struct wiimote_t* wm) { if (!wm || WIIMOTE_IS_CONNECTED(wm)) return; close(wm->out_sock); close(wm->in_sock); wm->out_sock = -1; wm->in_sock = -1; wm->event = WIIUSE_NONE; WIIMOTE_DISABLE_STATE(wm, WIIMOTE_STATE_CONNECTED); WIIMOTE_DISABLE_STATE(wm, WIIMOTE_STATE_HANDSHAKE); } int wiiuse_io_read(struct wiimote_t* wm) { /* not used */ return 0; } int wiiuse_io_write(struct wiimote_t* wm, byte* buf, int len) { return write(wm->out_sock, buf, len); } #endif /* ifndef WIN32 */
pokowaka/xbmc
tools/EventClients/Clients/WiiRemote/wiiuse_v0.12/src/io_nix.c
C
gpl-2.0
6,665
<?php namespace Drupal\early_rendering_controller_test; use Symfony\Component\EventDispatcher\EventSubscriberInterface; use Symfony\Component\HttpFoundation\Response; use Symfony\Component\HttpKernel\Event\GetResponseForControllerResultEvent; use Symfony\Component\HttpKernel\KernelEvents; /** * View subscriber for turning TestDomainObject objects into Response objects. */ class TestDomainObjectViewSubscriber implements EventSubscriberInterface { /** * Sets a response given a TestDomainObject instance. * * @param \Symfony\Component\HttpKernel\Event\GetResponseForControllerResultEvent $event * The event to process. */ public function onViewTestDomainObject(GetResponseForControllerResultEvent $event) { $result = $event->getControllerResult(); if ($result instanceof TestDomainObject) { if ($result instanceof AttachmentsTestDomainObject) { $event->setResponse(new AttachmentsTestResponse('AttachmentsTestDomainObject')); } elseif ($result instanceof CacheableTestDomainObject) { $event->setResponse(new CacheableTestResponse('CacheableTestDomainObject')); } else { $event->setResponse(new Response('TestDomainObject')); } } } /** * {@inheritdoc} */ static function getSubscribedEvents() { $events[KernelEvents::VIEW][] = ['onViewTestDomainObject']; return $events; } }
choicelildice/versus
docroot/core/modules/system/tests/modules/early_rendering_controller_test/src/TestDomainObjectViewSubscriber.php
PHP
gpl-2.0
1,401
<?php namespace Drupal\comment\Plugin\views\field; use Drupal\user\Entity\User; use Drupal\views\Plugin\views\field\FieldPluginBase; use Drupal\views\ResultRow; /** * Field handler to present the name of the last comment poster. * * @ingroup views_field_handlers * * @ViewsField("comment_ces_last_comment_name") */ class StatisticsLastCommentName extends FieldPluginBase { /** * {@inheritdoc} */ public function query() { // last_comment_name only contains data if the user is anonymous. So we // have to join in a specially related user table. $this->ensureMyTable(); // join 'users' to this table via vid $definition = array( 'table' => 'users_field_data', 'field' => 'uid', 'left_table' => 'comment_entity_statistics', 'left_field' => 'last_comment_uid', 'extra' => array( array( 'field' => 'uid', 'operator' => '!=', 'value' => '0' ) ) ); $join = \Drupal::service('plugin.manager.views.join')->createInstance('standard', $definition); // nes_user alias so this can work with the sort handler, below. $this->user_table = $this->query->ensureTable('ces_users', $this->relationship, $join); $this->field_alias = $this->query->addField(NULL, "COALESCE($this->user_table.name, $this->tableAlias.$this->field)", $this->tableAlias . '_' . $this->field); $this->user_field = $this->query->addField($this->user_table, 'name'); $this->uid = $this->query->addField($this->tableAlias, 'last_comment_uid'); } /** * {@inheritdoc} */ protected function defineOptions() { $options = parent::defineOptions(); $options['link_to_user'] = array('default' => TRUE); return $options; } /** * {@inheritdoc} */ public function render(ResultRow $values) { if (!empty($this->options['link_to_user'])) { $account = User::create(); $account->name = $this->getValue($values); $account->uid = $values->{$this->uid}; $username = array( '#theme' => 'username', '#account' => $account, ); return drupal_render($username); } else { return $this->sanitizeValue($this->getValue($values)); } } }
darrylri/vintagebmw
docroot/core/modules/comment/src/Plugin/views/field/StatisticsLastCommentName.php
PHP
gpl-2.0
2,233
<?php namespace Drupal\filter_test\Plugin\Filter; use Drupal\filter\FilterProcessResult; use Drupal\filter\Plugin\FilterBase; /** * Provides a test filter to use placeholders. * * @Filter( * id = "filter_test_placeholders", * title = @Translation("Testing filter"), * description = @Translation("Appends a placeholder to the content; associates #lazy_builder callback."), * type = Drupal\filter\Plugin\FilterInterface::TYPE_TRANSFORM_REVERSIBLE * ) */ class FilterTestPlaceholders extends FilterBase { /** * {@inheritdoc} */ public function process($text, $langcode) { $result = new FilterProcessResult($text); $placeholder = $result->createPlaceholder('\Drupal\filter_test\Plugin\Filter\FilterTestPlaceholders::renderDynamicThing', ['llama']); $result->setProcessedText($text . '<p>' . $placeholder . '</p>'); return $result; } /** * #lazy_builder callback; builds a render array containing the dynamic thing. * * @param string $thing * A "thing" string. * * @return array * A renderable array. */ public static function renderDynamicThing($thing) { return [ '#markup' => format_string('This is a dynamic @thing.', array('@thing' => $thing)), ]; } }
danielhanold/sandbox_d8
core/modules/filter/tests/filter_test/src/Plugin/Filter/FilterTestPlaceholders.php
PHP
gpl-2.0
1,250
<?php namespace Drupal\Tests\user\Kernel\Field; use Drupal\Core\Entity\Display\EntityViewDisplayInterface; use Drupal\Core\Entity\FieldableEntityInterface; use Drupal\KernelTests\KernelTestBase; use Drupal\user\Entity\User; /** * Tests the user_name formatter. * * @group field */ class UserNameFormatterTest extends KernelTestBase { /** * Modules to enable. * * @var array */ public static $modules = ['field', 'user', 'system']; /** * @var string */ protected $entityType; /** * @var string */ protected $bundle; /** * @var string */ protected $fieldName; /** * {@inheritdoc} */ protected function setUp() { parent::setUp(); $this->installConfig(['field']); $this->installEntitySchema('user'); $this->installSchema('system', ['sequences']); $this->entityType = 'user'; $this->bundle = $this->entityType; $this->fieldName = 'name'; } /** * Renders fields of a given entity with a given display. * * @param \Drupal\Core\Entity\FieldableEntityInterface $entity * The entity object with attached fields to render. * @param \Drupal\Core\Entity\Display\EntityViewDisplayInterface $display * The display to render the fields in. * * @return string * The rendered entity fields. */ protected function renderEntityFields(FieldableEntityInterface $entity, EntityViewDisplayInterface $display) { $content = $display->build($entity); $content = $this->render($content); return $content; } /** * Tests the formatter output. */ public function testFormatter() { $user = User::create([ 'name' => 'test name', ]); $user->save(); $result = $user->{$this->fieldName}->view(['type' => 'user_name']); $this->assertEqual('username', $result[0]['#theme']); $this->assertEqual(spl_object_hash($user), spl_object_hash($result[0]['#account'])); $result = $user->{$this->fieldName}->view(['type' => 'user_name', 'settings' => ['link_to_entity' => FALSE]]); $this->assertEqual($user->getDisplayName(), $result[0]['#markup']); $user = User::getAnonymousUser(); $result = $user->{$this->fieldName}->view(['type' => 'user_name']); $this->assertEqual('username', $result[0]['#theme']); $this->assertEqual(spl_object_hash($user), spl_object_hash($result[0]['#account'])); $result = $user->{$this->fieldName}->view(['type' => 'user_name', 'settings' => ['link_to_entity' => FALSE]]); $this->assertEqual($user->getDisplayName(), $result[0]['#markup']); $this->assertEqual($this->config('user.settings')->get('anonymous'), $result[0]['#markup']); } }
dockerizedrupal/dockerizedrupal.com
core/modules/user/tests/src/Kernel/Field/UserNameFormatterTest.php
PHP
gpl-2.0
2,658
/* * Copyright (c) 2005 Voltaire Inc. All rights reserved. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved. * Copyright (c) 2005 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mutex.h> #include <linux/inetdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/module.h> #include <net/arp.h> #include <net/neighbour.h> #include <net/route.h> #include <net/netevent.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <rdma/ib_addr.h> #include <rdma/ib.h> MODULE_AUTHOR("Sean Hefty"); MODULE_DESCRIPTION("IB Address Translation"); MODULE_LICENSE("Dual BSD/GPL"); struct addr_req { struct list_head list; struct sockaddr_storage src_addr; struct sockaddr_storage dst_addr; struct rdma_dev_addr *addr; struct rdma_addr_client *client; void *context; void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context); unsigned long timeout; int status; }; static void process_req(struct work_struct *work); static DEFINE_MUTEX(lock); static LIST_HEAD(req_list); static DECLARE_DELAYED_WORK(work, process_req); static struct workqueue_struct *addr_wq; int rdma_addr_size(struct sockaddr *addr) { switch (addr->sa_family) { case AF_INET: return sizeof(struct sockaddr_in); case AF_INET6: return sizeof(struct sockaddr_in6); case AF_IB: return sizeof(struct sockaddr_ib); default: return 0; } } EXPORT_SYMBOL(rdma_addr_size); static struct rdma_addr_client self; void rdma_addr_register_client(struct rdma_addr_client *client) { atomic_set(&client->refcount, 1); init_completion(&client->comp); } EXPORT_SYMBOL(rdma_addr_register_client); static inline void put_client(struct rdma_addr_client *client) { if (atomic_dec_and_test(&client->refcount)) complete(&client->comp); } void rdma_addr_unregister_client(struct rdma_addr_client *client) { put_client(client); wait_for_completion(&client->comp); } EXPORT_SYMBOL(rdma_addr_unregister_client); int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, const unsigned char *dst_dev_addr) { dev_addr->dev_type = dev->type; memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN); memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN); if (dst_dev_addr) memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN); dev_addr->bound_dev_if = dev->ifindex; return 0; } EXPORT_SYMBOL(rdma_copy_addr); int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, u16 *vlan_id) { struct net_device *dev; int ret = -EADDRNOTAVAIL; if (dev_addr->bound_dev_if) { dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if); if (!dev) return -ENODEV; ret = rdma_copy_addr(dev_addr, dev, NULL); dev_put(dev); return ret; } switch (addr->sa_family) { case AF_INET: dev = ip_dev_find(&init_net, ((struct sockaddr_in *) addr)->sin_addr.s_addr); if (!dev) return ret; ret = rdma_copy_addr(dev_addr, dev, NULL); if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); break; #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: rcu_read_lock(); for_each_netdev_rcu(&init_net, dev) { if (ipv6_chk_addr(&init_net, &((struct sockaddr_in6 *) addr)->sin6_addr, dev, 1)) { ret = rdma_copy_addr(dev_addr, dev, NULL); if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); break; } } rcu_read_unlock(); break; #endif } return ret; } EXPORT_SYMBOL(rdma_translate_ip); static void set_timeout(unsigned long time) { unsigned long delay; delay = time - jiffies; if ((long)delay <= 0) delay = 1; mod_delayed_work(addr_wq, &work, delay); } static void queue_req(struct addr_req *req) { struct addr_req *temp_req; mutex_lock(&lock); list_for_each_entry_reverse(temp_req, &req_list, list) { if (time_after_eq(req->timeout, temp_req->timeout)) break; } list_add(&req->list, &temp_req->list); if (req_list.next == &req->list) set_timeout(req->timeout); mutex_unlock(&lock); } static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr, void *daddr) { struct neighbour *n; int ret; n = dst_neigh_lookup(dst, daddr); rcu_read_lock(); if (!n || !(n->nud_state & NUD_VALID)) { if (n) neigh_event_send(n, NULL); ret = -ENODATA; } else { ret = rdma_copy_addr(dev_addr, dst->dev, n->ha); } rcu_read_unlock(); if (n) neigh_release(n); return ret; } static int addr4_resolve(struct sockaddr_in *src_in, struct sockaddr_in *dst_in, struct rdma_dev_addr *addr) { __be32 src_ip = src_in->sin_addr.s_addr; __be32 dst_ip = dst_in->sin_addr.s_addr; struct rtable *rt; struct flowi4 fl4; int ret; memset(&fl4, 0, sizeof(fl4)); fl4.daddr = dst_ip; fl4.saddr = src_ip; fl4.flowi4_oif = addr->bound_dev_if; rt = ip_route_output_key(&init_net, &fl4); if (IS_ERR(rt)) { ret = PTR_ERR(rt); goto out; } src_in->sin_family = AF_INET; src_in->sin_addr.s_addr = fl4.saddr; if (rt->dst.dev->flags & IFF_LOOPBACK) { ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL); if (!ret) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); goto put; } /* If the device does ARP internally, return 'done' */ if (rt->dst.dev->flags & IFF_NOARP) { ret = rdma_copy_addr(addr, rt->dst.dev, NULL); goto put; } ret = dst_fetch_ha(&rt->dst, addr, &fl4.daddr); put: ip_rt_put(rt); out: return ret; } #if IS_ENABLED(CONFIG_IPV6) static int addr6_resolve(struct sockaddr_in6 *src_in, struct sockaddr_in6 *dst_in, struct rdma_dev_addr *addr) { struct flowi6 fl6; struct dst_entry *dst; int ret; memset(&fl6, 0, sizeof fl6); fl6.daddr = dst_in->sin6_addr; fl6.saddr = src_in->sin6_addr; fl6.flowi6_oif = addr->bound_dev_if; dst = ip6_route_output(&init_net, NULL, &fl6); if ((ret = dst->error)) goto put; if (ipv6_addr_any(&fl6.saddr)) { ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev, &fl6.daddr, 0, &fl6.saddr); if (ret) goto put; src_in->sin6_family = AF_INET6; src_in->sin6_addr = fl6.saddr; } if (dst->dev->flags & IFF_LOOPBACK) { ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL); if (!ret) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); goto put; } /* If the device does ARP internally, return 'done' */ if (dst->dev->flags & IFF_NOARP) { ret = rdma_copy_addr(addr, dst->dev, NULL); goto put; } ret = dst_fetch_ha(dst, addr, &fl6.daddr); put: dst_release(dst); return ret; } #else static int addr6_resolve(struct sockaddr_in6 *src_in, struct sockaddr_in6 *dst_in, struct rdma_dev_addr *addr) { return -EADDRNOTAVAIL; } #endif static int addr_resolve(struct sockaddr *src_in, struct sockaddr *dst_in, struct rdma_dev_addr *addr) { if (src_in->sa_family == AF_INET) { return addr4_resolve((struct sockaddr_in *) src_in, (struct sockaddr_in *) dst_in, addr); } else return addr6_resolve((struct sockaddr_in6 *) src_in, (struct sockaddr_in6 *) dst_in, addr); } static void process_req(struct work_struct *work) { struct addr_req *req, *temp_req; struct sockaddr *src_in, *dst_in; struct list_head done_list; INIT_LIST_HEAD(&done_list); mutex_lock(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->status == -ENODATA) { src_in = (struct sockaddr *) &req->src_addr; dst_in = (struct sockaddr *) &req->dst_addr; req->status = addr_resolve(src_in, dst_in, req->addr); if (req->status && time_after_eq(jiffies, req->timeout)) req->status = -ETIMEDOUT; else if (req->status == -ENODATA) continue; } list_move_tail(&req->list, &done_list); } if (!list_empty(&req_list)) { req = list_entry(req_list.next, struct addr_req, list); set_timeout(req->timeout); } mutex_unlock(&lock); list_for_each_entry_safe(req, temp_req, &done_list, list) { list_del(&req->list); req->callback(req->status, (struct sockaddr *) &req->src_addr, req->addr, req->context); put_client(req->client); kfree(req); } } int rdma_resolve_ip(struct rdma_addr_client *client, struct sockaddr *src_addr, struct sockaddr *dst_addr, struct rdma_dev_addr *addr, int timeout_ms, void (*callback)(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context), void *context) { struct sockaddr *src_in, *dst_in; struct addr_req *req; int ret = 0; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; src_in = (struct sockaddr *) &req->src_addr; dst_in = (struct sockaddr *) &req->dst_addr; if (src_addr) { if (src_addr->sa_family != dst_addr->sa_family) { ret = -EINVAL; goto err; } memcpy(src_in, src_addr, rdma_addr_size(src_addr)); } else { src_in->sa_family = dst_addr->sa_family; } memcpy(dst_in, dst_addr, rdma_addr_size(dst_addr)); req->addr = addr; req->callback = callback; req->context = context; req->client = client; atomic_inc(&client->refcount); req->status = addr_resolve(src_in, dst_in, addr); switch (req->status) { case 0: req->timeout = jiffies; queue_req(req); break; case -ENODATA: req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; queue_req(req); break; default: ret = req->status; atomic_dec(&client->refcount); goto err; } return ret; err: kfree(req); return ret; } EXPORT_SYMBOL(rdma_resolve_ip); void rdma_addr_cancel(struct rdma_dev_addr *addr) { struct addr_req *req, *temp_req; mutex_lock(&lock); list_for_each_entry_safe(req, temp_req, &req_list, list) { if (req->addr == addr) { req->status = -ECANCELED; req->timeout = jiffies; list_move(&req->list, &req_list); set_timeout(req->timeout); break; } } mutex_unlock(&lock); } EXPORT_SYMBOL(rdma_addr_cancel); struct resolve_cb_context { struct rdma_dev_addr *addr; struct completion comp; }; static void resolve_cb(int status, struct sockaddr *src_addr, struct rdma_dev_addr *addr, void *context) { memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct rdma_dev_addr)); complete(&((struct resolve_cb_context *)context)->comp); } int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, u16 *vlan_id) { int ret = 0; struct rdma_dev_addr dev_addr; struct resolve_cb_context ctx; struct net_device *dev; union { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } sgid_addr, dgid_addr; ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); if (ret) return ret; ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid); if (ret) return ret; memset(&dev_addr, 0, sizeof(dev_addr)); ctx.addr = &dev_addr; init_completion(&ctx.comp); ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr, &dev_addr, 1000, resolve_cb, &ctx); if (ret) return ret; wait_for_completion(&ctx.comp); memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); if (!dev) return -ENODEV; if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); return ret; } EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh); int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id) { int ret = 0; struct rdma_dev_addr dev_addr; union { struct sockaddr _sockaddr; struct sockaddr_in _sockaddr_in; struct sockaddr_in6 _sockaddr_in6; } gid_addr; ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); if (ret) return ret; memset(&dev_addr, 0, sizeof(dev_addr)); ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); if (ret) return ret; memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN); return ret; } EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid); static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { if (event == NETEVENT_NEIGH_UPDATE) { struct neighbour *neigh = ctx; if (neigh->nud_state & NUD_VALID) { set_timeout(jiffies); } } return 0; } static struct notifier_block nb = { .notifier_call = netevent_callback }; static int __init addr_init(void) { addr_wq = create_singlethread_workqueue("ib_addr"); if (!addr_wq) return -ENOMEM; register_netevent_notifier(&nb); rdma_addr_register_client(&self); return 0; } static void __exit addr_cleanup(void) { rdma_addr_unregister_client(&self); unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); } module_init(addr_init); module_exit(addr_cleanup);
forwis/KVMGT-kernel
drivers/infiniband/core/addr.c
C
gpl-2.0
13,857
html{background:#f1f1f1}a{color:#0074a2}#media-upload a.del-link:hover,.subsubsub a.current:hover,.subsubsub a:hover,a:active,a:focus,a:hover,div.dashboard-widget-submit input:hover{color:#0099d5}input[type=checkbox]:checked:before{color:#59524c}input[type=radio]:checked:before{background:#59524c}.wp-core-ui input[type=reset]:active,.wp-core-ui input[type=reset]:hover{color:#0099d5}.wp-core-ui .button-primary{background:#c7a589;border-color:#b78a66;color:#fff;-webkit-box-shadow:inset 0 1px 0 #e0cdbd,0 1px 0 rgba(0,0,0,.15);box-shadow:inset 0 1px 0 #e0cdbd,0 1px 0 rgba(0,0,0,.15)}.wp-core-ui .button-primary:focus,.wp-core-ui .button-primary:hover{background:#bf9878;border-color:#ae7d55;color:#fff;-webkit-box-shadow:inset 0 1px 0 #d7bfac;box-shadow:inset 0 1px 0 #d7bfac}.wp-core-ui .button-primary:focus{-webkit-box-shadow:inset 0 1px 0 #d7bfac,0 0 0 1px #5b9dd9,0 0 2px 1px rgba(30,140,190,.8);box-shadow:inset 0 1px 0 #d7bfac,0 0 0 1px #5b9dd9,0 0 2px 1px rgba(30,140,190,.8)}.wp-core-ui .button-primary:active{background:#b78a66;border-color:#ae7d55;color:#fff;-webkit-box-shadow:inset 0 2px 5px -3px rgba(0,0,0,.5),0 0 0 1px #5b9dd9,0 0 2px 1px rgba(30,140,190,.8);box-shadow:inset 0 2px 5px -3px rgba(0,0,0,.5),0 0 0 1px #5b9dd9,0 0 2px 1px rgba(30,140,190,.8)}.wp-core-ui .button-primary.button-primary-disabled,.wp-core-ui .button-primary.disabled,.wp-core-ui .button-primary:disabled,.wp-core-ui .button-primary[disabled]{color:#d1cbc7!important;background:#ba906d!important;border-color:#ae7d55!important;text-shadow:none!important}.wp-core-ui .wp-ui-primary{color:#fff;background-color:#59524c}.wp-core-ui .wp-ui-text-primary{color:#59524c}.wp-core-ui .wp-ui-highlight{color:#fff;background-color:#c7a589}.wp-core-ui .wp-ui-text-highlight{color:#c7a589}.wp-core-ui .wp-ui-notification{color:#fff;background-color:#9ea476}.wp-core-ui .wp-ui-text-notification{color:#9ea476}.wp-core-ui .wp-ui-text-icon{color:#f3f2f1}#add-new-comment a:hover,.tablenav .tablenav-pages a:focus,.tablenav .tablenav-pages a:hover,.wrap .add-new-h2:hover{color:#fff;background-color:#59524c}.view-switch a.current:before{color:#59524c}.view-switch a:hover:before{color:#9ea476}.post-com-count:hover:after{border-top-color:#59524c}.post-com-count:hover span{color:#fff;background-color:#59524c}strong .post-com-count:after{border-top-color:#9ea476}strong .post-com-count span{background-color:#9ea476}#adminmenu,#adminmenuback,#adminmenuwrap{background:#59524c}#adminmenu a{color:#fff}#adminmenu div.wp-menu-image:before{color:#f3f2f1}#adminmenu a:hover,#adminmenu li.menu-top:hover,#adminmenu li.opensub>a.menu-top,#adminmenu li>a.menu-top:focus{color:#fff;background-color:#c7a589}#adminmenu li.menu-top:hover div.wp-menu-image:before,#adminmenu li.opensub>a.menu-top div.wp-menu-image:before{color:#fff}.about-wrap h2 .nav-tab-active,.nav-tab-active,.nav-tab-active:hover{background-color:#f1f1f1;border-bottom-color:#f1f1f1}#adminmenu .wp-has-current-submenu .wp-submenu,#adminmenu .wp-has-current-submenu.opensub .wp-submenu,#adminmenu .wp-submenu,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu,.folded #adminmenu .wp-has-current-submenu .wp-submenu{background:#46403c}#adminmenu li.wp-has-submenu.wp-not-current-submenu.opensub:hover:after{border-right-color:#46403c}#adminmenu .wp-has-current-submenu .wp-submenu a,#adminmenu .wp-has-current-submenu.opensub .wp-submenu a,#adminmenu .wp-submenu .wp-submenu-head,#adminmenu .wp-submenu a,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu a,.folded #adminmenu .wp-has-current-submenu .wp-submenu a{color:#cdcbc9}#adminmenu .wp-has-current-submenu .wp-submenu a:focus,#adminmenu .wp-has-current-submenu .wp-submenu a:hover,#adminmenu .wp-has-current-submenu.opensub .wp-submenu a:focus,#adminmenu .wp-has-current-submenu.opensub .wp-submenu a:hover,#adminmenu .wp-submenu a:focus,#adminmenu .wp-submenu a:hover,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu a:focus,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu a:hover,.folded #adminmenu .wp-has-current-submenu .wp-submenu a:focus,.folded #adminmenu .wp-has-current-submenu .wp-submenu a:hover{color:#c7a589}#adminmenu .wp-has-current-submenu.opensub .wp-submenu li.current a,#adminmenu .wp-submenu li.current a,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu li.current a{color:#fff}#adminmenu .wp-has-current-submenu.opensub .wp-submenu li.current a:focus,#adminmenu .wp-has-current-submenu.opensub .wp-submenu li.current a:hover,#adminmenu .wp-submenu li.current a:focus,#adminmenu .wp-submenu li.current a:hover,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu li.current a:focus,#adminmenu a.wp-has-current-submenu:focus+.wp-submenu li.current a:hover{color:#c7a589}ul#adminmenu a.wp-has-current-submenu:after,ul#adminmenu>li.current>a.current:after{border-right-color:#f1f1f1}#adminmenu li.current a.menu-top,#adminmenu li.wp-has-current-submenu .wp-submenu .wp-submenu-head,#adminmenu li.wp-has-current-submenu a.wp-has-current-submenu,.folded #adminmenu li.current.menu-top{color:#fff;background:#c7a589}#adminmenu li.wp-has-current-submenu div.wp-menu-image:before{color:#fff}#adminmenu .awaiting-mod,#adminmenu .update-plugins{color:#fff;background:#9ea476}#adminmenu li a.wp-has-current-submenu .update-plugins,#adminmenu li.current a .awaiting-mod,#adminmenu li.menu-top:hover>a .update-plugins,#adminmenu li:hover a .awaiting-mod{color:#fff;background:#46403c}#collapse-menu{color:#f3f2f1}#collapse-menu:hover{color:#fff}#collapse-button div:after{color:#f3f2f1}#collapse-menu:hover #collapse-button div:after{color:#fff}#wpadminbar{color:#fff;background:#59524c}#wpadminbar .ab-item,#wpadminbar a.ab-item,#wpadminbar>#wp-toolbar span.ab-label,#wpadminbar>#wp-toolbar span.noticon{color:#fff}#wpadminbar .ab-icon,#wpadminbar .ab-icon:before,#wpadminbar .ab-item:after,#wpadminbar .ab-item:before{color:#f3f2f1}#wpadminbar .ab-top-menu>li.hover>.ab-item,#wpadminbar .ab-top-menu>li.menupop.hover>.ab-item,#wpadminbar .ab-top-menu>li:hover>.ab-item,#wpadminbar .ab-top-menu>li>.ab-item:focus,#wpadminbar-nojs .ab-top-menu>li.menupop:hover>.ab-item,#wpadminbar.nojq .quicklinks .ab-top-menu>li>.ab-item:focus{color:#c7a589;background:#46403c}#wpadminbar>#wp-toolbar a:focus span.ab-label,#wpadminbar>#wp-toolbar li.hover span.ab-label,#wpadminbar>#wp-toolbar li:hover span.ab-label{color:#c7a589}#wpadminbar .menupop .ab-sub-wrapper{background:#46403c}#wpadminbar .quicklinks .menupop ul.ab-sub-secondary,#wpadminbar .quicklinks .menupop ul.ab-sub-secondary .ab-submenu{background:#656463}#wpadminbar .ab-submenu .ab-item,#wpadminbar .quicklinks .menupop ul li a,#wpadminbar .quicklinks .menupop.hover ul li a,#wpadminbar-nojs .quicklinks .menupop:hover ul li a{color:#cdcbc9}#wpadminbar .menupop .menupop>.ab-item:before,#wpadminbar .quicklinks li .blavatar{color:#f3f2f1}#wpadminbar .menupop .menupop>.ab-item:hover:before,#wpadminbar .quicklinks .menupop ul li a:focus,#wpadminbar .quicklinks .menupop ul li a:focus strong,#wpadminbar .quicklinks .menupop ul li a:hover,#wpadminbar .quicklinks .menupop ul li a:hover strong,#wpadminbar .quicklinks .menupop.hover ul li a:focus,#wpadminbar .quicklinks .menupop.hover ul li a:hover,#wpadminbar .quicklinks li a:hover .blavatar,#wpadminbar li .ab-item:focus:before,#wpadminbar li a:focus .ab-icon:before,#wpadminbar li.hover .ab-icon:before,#wpadminbar li.hover .ab-item:after,#wpadminbar li.hover .ab-item:before,#wpadminbar li:hover #adminbarsearch:before,#wpadminbar li:hover .ab-icon:before,#wpadminbar li:hover .ab-item:after,#wpadminbar li:hover .ab-item:before,#wpadminbar.nojs .quicklinks .menupop:hover ul li a:focus,#wpadminbar.nojs .quicklinks .menupop:hover ul li a:hover{color:#c7a589}#wpadminbar #adminbarsearch:before{color:#f3f2f1}#wpadminbar>#wp-toolbar>#wp-admin-bar-top-secondary>#wp-admin-bar-search #adminbarsearch input.adminbar-input:focus{color:#fff;background:#6c645c}#wpadminbar #adminbarsearch .adminbar-input::-webkit-input-placeholder{color:#fff;opacity:.7}#wpadminbar #adminbarsearch .adminbar-input:-moz-placeholder{color:#fff;opacity:.7}#wpadminbar #adminbarsearch .adminbar-input::-moz-placeholder{color:#fff;opacity:.7}#wpadminbar #adminbarsearch .adminbar-input:-ms-input-placeholder{color:#fff;opacity:.7}#wpadminbar .quicklinks li#wp-admin-bar-my-account.with-avatar>a img{border-color:#6c645c;background-color:#6c645c}#wpadminbar #wp-admin-bar-user-info .display-name{color:#fff}#wpadminbar #wp-admin-bar-user-info a:hover .display-name{color:#c7a589}#wpadminbar #wp-admin-bar-user-info .username{color:#cdcbc9}.wp-pointer .wp-pointer-content h3{background-color:#c7a589;border-color:#bf9878}.wp-pointer .wp-pointer-content h3:before{color:#c7a589}.wp-pointer.wp-pointer-top .wp-pointer-arrow,.wp-pointer.wp-pointer-undefined .wp-pointer-arrow{border-bottom-color:#c7a589}.media-item .bar,.media-progress-bar div{background-color:#c7a589}.details.attachment{-webkit-box-shadow:inset 0 0 0 3px #fff,inset 0 0 0 7px #c7a589;box-shadow:inset 0 0 0 3px #fff,inset 0 0 0 7px #c7a589}.attachment.details .check{background-color:#c7a589;-webkit-box-shadow:0 0 0 1px #fff,0 0 0 2px #c7a589;box-shadow:0 0 0 1px #fff,0 0 0 2px #c7a589}.media-selection .attachment.selection.details .thumbnail{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 3px #c7a589;box-shadow:0 0 0 1px #fff,0 0 0 3px #c7a589}.theme-browser .theme.active .theme-name,.theme-browser .theme.add-new-theme:hover:after{background:#c7a589}.theme-browser .theme.add-new-theme:hover span:after{color:#c7a589}.theme-filter.current,.theme-section.current{border-bottom-color:#59524c}body.more-filters-opened .more-filters{color:#fff;background-color:#59524c}body.more-filters-opened .more-filters:before{color:#fff}body.more-filters-opened .more-filters:focus,body.more-filters-opened .more-filters:hover{background-color:#c7a589;color:#fff}body.more-filters-opened .more-filters:focus:before,body.more-filters-opened .more-filters:hover:before{color:#fff}.widgets-chooser li.widgets-chooser-selected{background-color:#c7a589;color:#fff}.widgets-chooser li.widgets-chooser-selected:before,.widgets-chooser li.widgets-chooser-selected:focus:before{color:#fff}#customize-theme-controls .widget-area-select .selected{background-color:#c7a589;color:#fff}.wp-slider .ui-slider-handle,.wp-slider .ui-slider-handle.focus,.wp-slider .ui-slider-handle.ui-state-hover{background:#c7a589;border-color:#b78a66;-webkit-box-shadow:inset 0 1px 0 #e0cdbd,0 1px 0 rgba(0,0,0,.15);box-shadow:inset 0 1px 0 #e0cdbd,0 1px 0 rgba(0,0,0,.15)}#sidemenu a.current{background:#f1f1f1;border-bottom-color:#f1f1f1}#plugin-information .action-button{background:#c7a589}div#wp-responsive-toggle a:before{color:#f3f2f1}.wp-responsive-open div#wp-responsive-toggle a{border-color:transparent;background:#c7a589}.star-rating .star{color:#c7a589}.wp-responsive-open #wpadminbar #wp-admin-bar-menu-toggle a{background:#46403c}
lhas/portfolio
wp-admin/css/colors/coffee/colors.min.css
CSS
gpl-2.0
10,873
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2005-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/pci.h> #include <linux/tcp.h> #include <linux/ip.h> #include <linux/in.h> #include <linux/ipv6.h> #include <linux/slab.h> #include <net/ipv6.h> #include <linux/if_ether.h> #include <linux/highmem.h> #include "net_driver.h" #include "efx.h" #include "nic.h" #include "workarounds.h" /* * TX descriptor ring full threshold * * The tx_queue descriptor ring fill-level must fall below this value * before we restart the netif queue */ #define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->unmap_len) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; buffer->unmap_single = false; } if (buffer->skb) { dev_kfree_skb_any((struct sk_buff *) buffer->skb); buffer->skb = NULL; netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, "TX queue %d transmission id %x complete\n", tx_queue->queue, tx_queue->read_count); } } /** * struct efx_tso_header - a DMA mapped buffer for packet headers * @next: Linked list of free ones. * The list is protected by the TX queue lock. * @dma_unmap_len: Length to unmap for an oversize buffer, or 0. * @dma_addr: The DMA address of the header below. * * This controls the memory used for a TSO header. Use TSOH_DATA() * to find the packet header data. Use TSOH_SIZE() to calculate the * total size required for a given packet header length. TSO headers * in the free list are exactly %TSOH_STD_SIZE bytes in size. */ struct efx_tso_header { union { struct efx_tso_header *next; size_t unmap_len; }; dma_addr_t dma_addr; }; static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb); static void efx_fini_tso(struct efx_tx_queue *tx_queue); static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh); static void efx_tsoh_free(struct efx_tx_queue *tx_queue, struct efx_tx_buffer *buffer) { if (buffer->tsoh) { if (likely(!buffer->tsoh->unmap_len)) { buffer->tsoh->next = tx_queue->tso_headers_free; tx_queue->tso_headers_free = buffer->tsoh; } else { efx_tsoh_heap_free(tx_queue, buffer->tsoh); } buffer->tsoh = NULL; } } static inline unsigned efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) { /* Depending on the NIC revision, we can use descriptor * lengths up to 8K or 8K-1. However, since PCI Express * devices must split read requests at 4K boundaries, there is * little benefit from using descriptors that cross those * boundaries and we keep things simple by not doing so. */ unsigned len = (~dma_addr & 0xfff) + 1; /* Work around hardware bug for unaligned buffers. */ if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf)) len = min_t(unsigned, len, 512 - (dma_addr & 0xf)); return len; } /* * Add a socket buffer to a TX queue * * This maps all fragments of a socket buffer for DMA and adds them to * the TX queue. The queue's insert pointer will be incremented by * the number of fragments in the socket buffer. * * If any DMA mapping fails, any mapped fragments will be unmapped, * the queue's insert pointer will be restored to its original value. * * This function is split out from efx_hard_start_xmit to allow the * loopback test to direct packets via specific TX queues. * * Returns NETDEV_TX_OK or NETDEV_TX_BUSY * You must hold netif_tx_lock() to call this function. */ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; struct pci_dev *pci_dev = efx->pci_dev; struct efx_tx_buffer *buffer; skb_frag_t *fragment; struct page *page; int page_offset; unsigned int len, unmap_len = 0, fill_level, insert_ptr; dma_addr_t dma_addr, unmap_addr = 0; unsigned int dma_len; bool unmap_single; int q_space, i = 0; netdev_tx_t rc = NETDEV_TX_OK; EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); if (skb_shinfo(skb)->gso_size) return efx_enqueue_skb_tso(tx_queue, skb); /* Get size of the initial fragment */ len = skb_headlen(skb); /* Pad if necessary */ if (EFX_WORKAROUND_15592(efx) && skb->len <= 32) { EFX_BUG_ON_PARANOID(skb->data_len); len = 32 + 1; if (skb_pad(skb, len - skb->len)) return NETDEV_TX_OK; } fill_level = tx_queue->insert_count - tx_queue->old_read_count; q_space = efx->txq_entries - 1 - fill_level; /* Map for DMA. Use pci_map_single rather than pci_map_page * since this is more efficient on machines with sparse * memory. */ unmap_single = true; dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); /* Process all fragments */ while (1) { if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr))) goto pci_err; /* Store fields for marking in the per-fragment final * descriptor */ unmap_len = len; unmap_addr = dma_addr; /* Add to TX queue, splitting across DMA boundaries */ do { if (unlikely(q_space-- <= 0)) { /* It might be that completions have * happened since the xmit path last * checked. Update the xmit path's * copy of read_count. */ netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the * change of queue state from the access * of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; if (unlikely(q_space-- <= 0)) { rc = NETDEV_TX_BUSY; goto unwind; } smp_mb(); if (likely(!efx->loopback_selftest)) netif_tx_start_queue( tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->tsoh); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->unmap_len); dma_len = efx_max_tx_len(efx, dma_addr); if (likely(dma_len >= len)) dma_len = len; /* Fill out per descriptor fields */ buffer->len = dma_len; buffer->dma_addr = dma_addr; len -= dma_len; dma_addr += dma_len; ++tx_queue->insert_count; } while (len); /* Transfer ownership of the unmapping to the final buffer */ buffer->unmap_single = unmap_single; buffer->unmap_len = unmap_len; unmap_len = 0; /* Get address and size of next fragment */ if (i >= skb_shinfo(skb)->nr_frags) break; fragment = &skb_shinfo(skb)->frags[i]; len = fragment->size; page = fragment->page; page_offset = fragment->page_offset; i++; /* Map for DMA */ unmap_single = false; dma_addr = pci_map_page(pci_dev, page, page_offset, len, PCI_DMA_TODEVICE); } /* Transfer ownership of the skb to the final buffer */ buffer->skb = skb; buffer->continuation = false; /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); return NETDEV_TX_OK; pci_err: netif_err(efx, tx_err, efx->net_dev, " TX queue %d could not map skb with %d bytes %d " "fragments for DMA\n", tx_queue->queue, skb->len, skb_shinfo(skb)->nr_frags + 1); /* Mark the packet as transmitted, and free the SKB ourselves */ dev_kfree_skb_any(skb); unwind: /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; efx_dequeue_buffer(tx_queue, buffer); buffer->len = 0; } /* Free the fragment we were mid-way through pushing */ if (unmap_len) { if (unmap_single) pci_unmap_single(pci_dev, unmap_addr, unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(pci_dev, unmap_addr, unmap_len, PCI_DMA_TODEVICE); } return rc; } /* Remove packets from the TX queue * * This removes packets from the TX queue, up to and including the * specified index. */ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, unsigned int index) { struct efx_nic *efx = tx_queue->efx; unsigned int stop_index, read_ptr; stop_index = (index + 1) & tx_queue->ptr_mask; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; while (read_ptr != stop_index) { struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; if (unlikely(buffer->len == 0)) { netif_err(efx, tx_err, efx->net_dev, "TX queue %d spurious TX completion id %x\n", tx_queue->queue, read_ptr); efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); return; } efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; read_ptr = tx_queue->read_count & tx_queue->ptr_mask; } } /* Initiate a packet transmission. We use one channel per CPU * (sharing when we have more CPUs than channels). On Falcon, the TX * completion events will be directed back to the CPU that transmitted * the packet, which should be cache-efficient. * * Context: non-blocking. * Note that returning anything other than NETDEV_TX_OK will cause the * OS to free the skb. */ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_tx_queue *tx_queue; unsigned index, type; EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); index = skb_get_queue_mapping(skb); type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0; if (index >= efx->n_tx_channels) { index -= efx->n_tx_channels; type |= EFX_TXQ_TYPE_HIGHPRI; } tx_queue = efx_get_tx_queue(efx, index, type); return efx_enqueue_skb(tx_queue, skb); } void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; /* Must be inverse of queue lookup in efx_hard_start_xmit() */ tx_queue->core_txq = netdev_get_tx_queue(efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES + ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? efx->n_tx_channels : 0)); } int efx_setup_tc(struct net_device *net_dev, u8 num_tc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_tx_queue *tx_queue; unsigned tc; int rc; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; if (num_tc == net_dev->num_tc) return 0; for (tc = 0; tc < num_tc; tc++) { net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; net_dev->tc_to_txq[tc].count = efx->n_tx_channels; } if (num_tc > net_dev->num_tc) { /* Initialise high-priority queues as necessary */ efx_for_each_channel(channel, efx) { efx_for_each_possible_channel_tx_queue(tx_queue, channel) { if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) continue; if (!tx_queue->buffer) { rc = efx_probe_tx_queue(tx_queue); if (rc) return rc; } if (!tx_queue->initialised) efx_init_tx_queue(tx_queue); efx_init_tx_queue_core_txq(tx_queue); } } } else { /* Reduce number of classes before number of queues */ net_dev->num_tc = num_tc; } rc = netif_set_real_num_tx_queues(net_dev, max_t(int, num_tc, 1) * efx->n_tx_channels); if (rc) return rc; /* Do not destroy high-priority queues when they become * unused. We would have to flush them first, and it is * fairly difficult to flush a subset of TX queues. Leave * it to efx_fini_channels(). */ net_dev->num_tc = num_tc; return 0; } void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) { unsigned fill_level; struct efx_nic *efx = tx_queue->efx; EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); efx_dequeue_buffers(tx_queue, index); /* See if we need to restart the netif queue. This barrier * separates the update of read_count from the test of the * queue state. */ smp_mb(); if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && likely(efx->port_enabled) && likely(netif_device_present(efx->net_dev))) { fill_level = tx_queue->insert_count - tx_queue->read_count; if (fill_level < EFX_TXQ_THRESHOLD(efx)) { EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); netif_tx_wake_queue(tx_queue->core_txq); } } /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = tx_queue->read_count | EFX_EMPTY_COUNT_VALID; } } } int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) { struct efx_nic *efx = tx_queue->efx; unsigned int entries; int i, rc; /* Create the smallest power-of-two aligned ring */ entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); tx_queue->ptr_mask = entries - 1; netif_dbg(efx, probe, efx->net_dev, "creating TX queue %d size %#x mask %#x\n", tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); /* Allocate software ring */ tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer), GFP_KERNEL); if (!tx_queue->buffer) return -ENOMEM; for (i = 0; i <= tx_queue->ptr_mask; ++i) tx_queue->buffer[i].continuation = true; /* Allocate hardware ring */ rc = efx_nic_probe_tx(tx_queue); if (rc) goto fail; return 0; fail: kfree(tx_queue->buffer); tx_queue->buffer = NULL; return rc; } void efx_init_tx_queue(struct efx_tx_queue *tx_queue) { netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "initialising TX queue %d\n", tx_queue->queue); tx_queue->insert_count = 0; tx_queue->write_count = 0; tx_queue->old_write_count = 0; tx_queue->read_count = 0; tx_queue->old_read_count = 0; tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; /* Set up TX descriptor ring */ efx_nic_init_tx(tx_queue); tx_queue->initialised = true; } void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; if (!tx_queue->buffer) return; /* Free any buffers left in the ring */ while (tx_queue->read_count != tx_queue->write_count) { buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; efx_dequeue_buffer(tx_queue, buffer); buffer->continuation = true; buffer->len = 0; ++tx_queue->read_count; } } void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) { if (!tx_queue->initialised) return; netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "shutting down TX queue %d\n", tx_queue->queue); tx_queue->initialised = false; /* Flush TX queue, remove descriptor ring */ efx_nic_fini_tx(tx_queue); efx_release_tx_buffers(tx_queue); /* Free up TSO header cache */ efx_fini_tso(tx_queue); } void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) { if (!tx_queue->buffer) return; netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, "destroying TX queue %d\n", tx_queue->queue); efx_nic_remove_tx(tx_queue); kfree(tx_queue->buffer); tx_queue->buffer = NULL; } /* Efx TCP segmentation acceleration. * * Why? Because by doing it here in the driver we can go significantly * faster than the GSO. * * Requires TX checksum offload support. */ /* Number of bytes inserted at the start of a TSO header buffer, * similar to NET_IP_ALIGN. */ #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS #define TSOH_OFFSET 0 #else #define TSOH_OFFSET NET_IP_ALIGN #endif #define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET) /* Total size of struct efx_tso_header, buffer and padding */ #define TSOH_SIZE(hdr_len) \ (sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len) /* Size of blocks on free list. Larger blocks must be allocated from * the heap. */ #define TSOH_STD_SIZE 128 #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) #define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data) #define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data) #define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data) #define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data) /** * struct tso_state - TSO state for an SKB * @out_len: Remaining length in current segment * @seqnum: Current sequence number * @ipv4_id: Current IPv4 ID, host endian * @packet_space: Remaining space in current packet * @dma_addr: DMA address of current position * @in_len: Remaining length in current SKB fragment * @unmap_len: Length of SKB fragment * @unmap_addr: DMA address of SKB fragment * @unmap_single: DMA single vs page mapping flag * @protocol: Network protocol (after any VLAN header) * @header_len: Number of bytes of header * @full_packet_size: Number of bytes to put in each outgoing segment * * The state used during segmentation. It is put into this data structure * just to make it easy to pass into inline functions. */ struct tso_state { /* Output position */ unsigned out_len; unsigned seqnum; unsigned ipv4_id; unsigned packet_space; /* Input position */ dma_addr_t dma_addr; unsigned in_len; unsigned unmap_len; dma_addr_t unmap_addr; bool unmap_single; __be16 protocol; unsigned header_len; int full_packet_size; }; /* * Verify that our various assumptions about sk_buffs and the conditions * under which TSO will be attempted hold true. Return the protocol number. */ static __be16 efx_tso_check_protocol(struct sk_buff *skb) { __be16 protocol = skb->protocol; EFX_BUG_ON_PARANOID(((struct ethhdr *)skb->data)->h_proto != protocol); if (protocol == htons(ETH_P_8021Q)) { /* Find the encapsulated protocol; reset network header * and transport header based on that. */ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; skb_set_network_header(skb, sizeof(*veh)); if (protocol == htons(ETH_P_IP)) skb_set_transport_header(skb, sizeof(*veh) + 4 * ip_hdr(skb)->ihl); else if (protocol == htons(ETH_P_IPV6)) skb_set_transport_header(skb, sizeof(*veh) + sizeof(struct ipv6hdr)); } if (protocol == htons(ETH_P_IP)) { EFX_BUG_ON_PARANOID(ip_hdr(skb)->protocol != IPPROTO_TCP); } else { EFX_BUG_ON_PARANOID(protocol != htons(ETH_P_IPV6)); EFX_BUG_ON_PARANOID(ipv6_hdr(skb)->nexthdr != NEXTHDR_TCP); } EFX_BUG_ON_PARANOID((PTR_DIFF(tcp_hdr(skb), skb->data) + (tcp_hdr(skb)->doff << 2u)) > skb_headlen(skb)); return protocol; } /* * Allocate a page worth of efx_tso_header structures, and string them * into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM. */ static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue) { struct pci_dev *pci_dev = tx_queue->efx->pci_dev; struct efx_tso_header *tsoh; dma_addr_t dma_addr; u8 *base_kva, *kva; base_kva = pci_alloc_consistent(pci_dev, PAGE_SIZE, &dma_addr); if (base_kva == NULL) { netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev, "Unable to allocate page for TSO headers\n"); return -ENOMEM; } /* pci_alloc_consistent() allocates pages. */ EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u)); for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) { tsoh = (struct efx_tso_header *)kva; tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva); tsoh->next = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh; } return 0; } /* Free up a TSO header, and all others in the same page. */ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh, struct pci_dev *pci_dev) { struct efx_tso_header **p; unsigned long base_kva; dma_addr_t base_dma; base_kva = (unsigned long)tsoh & PAGE_MASK; base_dma = tsoh->dma_addr & PAGE_MASK; p = &tx_queue->tso_headers_free; while (*p != NULL) { if (((unsigned long)*p & PAGE_MASK) == base_kva) *p = (*p)->next; else p = &(*p)->next; } pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); } static struct efx_tso_header * efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) { struct efx_tso_header *tsoh; tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA); if (unlikely(!tsoh)) return NULL; tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev, TSOH_BUFFER(tsoh), header_len, PCI_DMA_TODEVICE); if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev, tsoh->dma_addr))) { kfree(tsoh); return NULL; } tsoh->unmap_len = header_len; return tsoh; } static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) { pci_unmap_single(tx_queue->efx->pci_dev, tsoh->dma_addr, tsoh->unmap_len, PCI_DMA_TODEVICE); kfree(tsoh); } /** * efx_tx_queue_insert - push descriptors onto the TX queue * @tx_queue: Efx TX queue * @dma_addr: DMA address of fragment * @len: Length of fragment * @final_buffer: The final buffer inserted into the queue * * Push descriptors onto the TX queue. Return 0 on success or 1 if * @tx_queue full. */ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, dma_addr_t dma_addr, unsigned len, struct efx_tx_buffer **final_buffer) { struct efx_tx_buffer *buffer; struct efx_nic *efx = tx_queue->efx; unsigned dma_len, fill_level, insert_ptr; int q_space; EFX_BUG_ON_PARANOID(len <= 0); fill_level = tx_queue->insert_count - tx_queue->old_read_count; /* -1 as there is no way to represent all descriptors used */ q_space = efx->txq_entries - 1 - fill_level; while (1) { if (unlikely(q_space-- <= 0)) { /* It might be that completions have happened * since the xmit path last checked. Update * the xmit path's copy of read_count. */ netif_tx_stop_queue(tx_queue->core_txq); /* This memory barrier protects the change of * queue state from the access of read_count. */ smp_mb(); tx_queue->old_read_count = ACCESS_ONCE(tx_queue->read_count); fill_level = (tx_queue->insert_count - tx_queue->old_read_count); q_space = efx->txq_entries - 1 - fill_level; if (unlikely(q_space-- <= 0)) { *final_buffer = NULL; return 1; } smp_mb(); netif_tx_start_queue(tx_queue->core_txq); } insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; buffer = &tx_queue->buffer[insert_ptr]; ++tx_queue->insert_count; EFX_BUG_ON_PARANOID(tx_queue->insert_count - tx_queue->read_count >= efx->txq_entries); efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->dma_addr = dma_addr; dma_len = efx_max_tx_len(efx, dma_addr); /* If there is enough space to send then do so */ if (dma_len >= len) break; buffer->len = dma_len; /* Don't set the other members */ dma_addr += dma_len; len -= dma_len; } EFX_BUG_ON_PARANOID(!len); buffer->len = len; *final_buffer = buffer; return 0; } /* * Put a TSO header into the TX queue. * * This is special-cased because we know that it is small enough to fit in * a single fragment, and we know it doesn't cross a page boundary. It * also allows us to not worry about end-of-packet etc. */ static void efx_tso_put_header(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh, unsigned len) { struct efx_tx_buffer *buffer; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(!buffer->continuation); EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->len = len; buffer->dma_addr = tsoh->dma_addr; buffer->tsoh = tsoh; ++tx_queue->insert_count; } /* Remove descriptors put into a tx_queue. */ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) { struct efx_tx_buffer *buffer; dma_addr_t unmap_addr; /* Work backwards until we hit the original insert pointer value */ while (tx_queue->insert_count != tx_queue->write_count) { --tx_queue->insert_count; buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->skb); if (buffer->unmap_len) { unmap_addr = (buffer->dma_addr + buffer->len - buffer->unmap_len); if (buffer->unmap_single) pci_unmap_single(tx_queue->efx->pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(tx_queue->efx->pci_dev, unmap_addr, buffer->unmap_len, PCI_DMA_TODEVICE); buffer->unmap_len = 0; } buffer->len = 0; buffer->continuation = true; } } /* Parse the SKB header and initialise state. */ static void tso_start(struct tso_state *st, const struct sk_buff *skb) { /* All ethernet/IP/TCP headers combined size is TCP header size * plus offset of TCP header relative to start of packet. */ st->header_len = ((tcp_hdr(skb)->doff << 2u) + PTR_DIFF(tcp_hdr(skb), skb->data)); st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size; if (st->protocol == htons(ETH_P_IP)) st->ipv4_id = ntohs(ip_hdr(skb)->id); else st->ipv4_id = 0; st->seqnum = ntohl(tcp_hdr(skb)->seq); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->syn); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->rst); st->packet_space = st->full_packet_size; st->out_len = skb->len - st->header_len; st->unmap_len = 0; st->unmap_single = false; } static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, skb_frag_t *frag) { st->unmap_addr = pci_map_page(efx->pci_dev, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { st->unmap_single = false; st->unmap_len = frag->size; st->in_len = frag->size; st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; } static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, const struct sk_buff *skb) { int hl = st->header_len; int len = skb_headlen(skb) - hl; st->unmap_addr = pci_map_single(efx->pci_dev, skb->data + hl, len, PCI_DMA_TODEVICE); if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) { st->unmap_single = true; st->unmap_len = len; st->in_len = len; st->dma_addr = st->unmap_addr; return 0; } return -ENOMEM; } /** * tso_fill_packet_with_fragment - form descriptors for the current fragment * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Form descriptors for the current fragment, until we reach the end * of fragment or end-of-packet. Return 0 on success, 1 if not enough * space in @tx_queue. */ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tx_buffer *buffer; int n, end_of_packet, rc; if (st->in_len == 0) return 0; if (st->packet_space == 0) return 0; EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0); n = min(st->in_len, st->packet_space); st->packet_space -= n; st->out_len -= n; st->in_len -= n; rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); if (likely(rc == 0)) { if (st->out_len == 0) /* Transfer ownership of the skb */ buffer->skb = skb; end_of_packet = st->out_len == 0 || st->packet_space == 0; buffer->continuation = !end_of_packet; if (st->in_len == 0) { /* Transfer ownership of the pci mapping */ buffer->unmap_len = st->unmap_len; buffer->unmap_single = st->unmap_single; st->unmap_len = 0; } } st->dma_addr += n; return rc; } /** * tso_start_new_packet - generate a new header and prepare for the new packet * @tx_queue: Efx TX queue * @skb: Socket buffer * @st: TSO state * * Generate a new header and prepare for the new packet. Return 0 on * success, or -1 if failed to alloc header. */ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, const struct sk_buff *skb, struct tso_state *st) { struct efx_tso_header *tsoh; struct tcphdr *tsoh_th; unsigned ip_length; u8 *header; /* Allocate a DMA-mapped header buffer. */ if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { if (tx_queue->tso_headers_free == NULL) { if (efx_tsoh_block_alloc(tx_queue)) return -1; } EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); tsoh = tx_queue->tso_headers_free; tx_queue->tso_headers_free = tsoh->next; tsoh->unmap_len = 0; } else { tx_queue->tso_long_headers++; tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len); if (unlikely(!tsoh)) return -1; } header = TSOH_BUFFER(tsoh); tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb)); /* Copy and update the headers. */ memcpy(header, skb->data, st->header_len); tsoh_th->seq = htonl(st->seqnum); st->seqnum += skb_shinfo(skb)->gso_size; if (st->out_len > skb_shinfo(skb)->gso_size) { /* This packet will not finish the TSO burst. */ ip_length = st->full_packet_size - ETH_HDR_LEN(skb); tsoh_th->fin = 0; tsoh_th->psh = 0; } else { /* This packet will be the last in the TSO burst. */ ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->psh = tcp_hdr(skb)->psh; } if (st->protocol == htons(ETH_P_IP)) { struct iphdr *tsoh_iph = (struct iphdr *)(header + SKB_IPV4_OFF(skb)); tsoh_iph->tot_len = htons(ip_length); /* Linux leaves suitable gaps in the IP ID space for us to fill. */ tsoh_iph->id = htons(st->ipv4_id); st->ipv4_id++; } else { struct ipv6hdr *tsoh_iph = (struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); } st->packet_space = skb_shinfo(skb)->gso_size; ++tx_queue->tso_packets; /* Form a descriptor for this header. */ efx_tso_put_header(tx_queue, tsoh, st->header_len); return 0; } /** * efx_enqueue_skb_tso - segment and transmit a TSO socket buffer * @tx_queue: Efx TX queue * @skb: Socket buffer * * Context: You must hold netif_tx_lock() to call this function. * * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * @skb was not enqueued. In all cases @skb is consumed. Return * %NETDEV_TX_OK or %NETDEV_TX_BUSY. */ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) { struct efx_nic *efx = tx_queue->efx; int frag_i, rc, rc2 = NETDEV_TX_OK; struct tso_state state; /* Find the packet protocol and sanity-check it */ state.protocol = efx_tso_check_protocol(skb); EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); tso_start(&state, skb); /* Assume that skb header area contains exactly the headers, and * all payload is in the frag list. */ if (skb_headlen(skb) == state.header_len) { /* Grab the first payload fragment. */ EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags < 1); frag_i = 0; rc = tso_get_fragment(&state, efx, skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } else { rc = tso_get_head_fragment(&state, efx, skb); if (rc) goto mem_err; frag_i = -1; } if (tso_start_new_packet(tx_queue, skb, &state) < 0) goto mem_err; while (1) { rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); if (unlikely(rc)) { rc2 = NETDEV_TX_BUSY; goto unwind; } /* Move onto the next fragment? */ if (state.in_len == 0) { if (++frag_i >= skb_shinfo(skb)->nr_frags) /* End of payload reached. */ break; rc = tso_get_fragment(&state, efx, skb_shinfo(skb)->frags + frag_i); if (rc) goto mem_err; } /* Start at new packet? */ if (state.packet_space == 0 && tso_start_new_packet(tx_queue, skb, &state) < 0) goto mem_err; } /* Pass off to hardware */ efx_nic_push_buffers(tx_queue); tx_queue->tso_bursts++; return NETDEV_TX_OK; mem_err: netif_err(efx, tx_err, efx->net_dev, "Out of memory for TSO headers, or PCI mapping error\n"); dev_kfree_skb_any(skb); unwind: /* Free the DMA mapping we were in the process of writing out */ if (state.unmap_len) { if (state.unmap_single) pci_unmap_single(efx->pci_dev, state.unmap_addr, state.unmap_len, PCI_DMA_TODEVICE); else pci_unmap_page(efx->pci_dev, state.unmap_addr, state.unmap_len, PCI_DMA_TODEVICE); } efx_enqueue_unwind(tx_queue); return rc2; } /* * Free up all TSO datastructures associated with tx_queue. This * routine should be called only once the tx_queue is both empty and * will no longer be used. */ static void efx_fini_tso(struct efx_tx_queue *tx_queue) { unsigned i; if (tx_queue->buffer) { for (i = 0; i <= tx_queue->ptr_mask; ++i) efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); } while (tx_queue->tso_headers_free != NULL) efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, tx_queue->efx->pci_dev); }
brieuwers/N8000Kernel
drivers/net/sfc/tx.c
C
gpl-2.0
33,839
/* * Copyright 2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/cdev.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/cpufreq.h> #include <asm/delay.h> #include <asm/dpmc.h> #define DRIVER_NAME "bfin dpmc" #define dprintk(msg...) \ cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg) struct bfin_dpmc_platform_data *pdata; /** * bfin_set_vlev - Update VLEV field in VR_CTL Reg. * Avoid BYPASS sequence */ static void bfin_set_vlev(unsigned int vlev) { unsigned pll_lcnt; pll_lcnt = bfin_read_PLL_LOCKCNT(); bfin_write_PLL_LOCKCNT(1); bfin_write_VR_CTL((bfin_read_VR_CTL() & ~VLEV) | vlev); bfin_write_PLL_LOCKCNT(pll_lcnt); } /** * bfin_get_vlev - Get CPU specific VLEV from platform device data */ static unsigned int bfin_get_vlev(unsigned int freq) { int i; if (!pdata) goto err_out; freq >>= 16; for (i = 0; i < pdata->tabsize; i++) if (freq <= (pdata->tuple_tab[i] & 0xFFFF)) return pdata->tuple_tab[i] >> 16; err_out: printk(KERN_WARNING "DPMC: No suitable CCLK VDDINT voltage pair found\n"); return VLEV_120; } #ifdef CONFIG_CPU_FREQ static int vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) { bfin_set_vlev(bfin_get_vlev(freq->new)); udelay(pdata->vr_settling_time); /* Wait until Volatge settled */ } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) bfin_set_vlev(bfin_get_vlev(freq->new)); return 0; } static struct notifier_block vreg_cpufreq_notifier_block = { .notifier_call = vreg_cpufreq_notifier }; #endif /* CONFIG_CPU_FREQ */ /** * bfin_dpmc_probe - * */ static int __devinit bfin_dpmc_probe(struct platform_device *pdev) { if (pdev->dev.platform_data) pdata = pdev->dev.platform_data; else return -EINVAL; return cpufreq_register_notifier(&vreg_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } /** * bfin_dpmc_remove - */ static int __devexit bfin_dpmc_remove(struct platform_device *pdev) { pdata = NULL; return cpufreq_unregister_notifier(&vreg_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); } struct platform_driver bfin_dpmc_device_driver = { .probe = bfin_dpmc_probe, .remove = __devexit_p(bfin_dpmc_remove), .driver = { .name = DRIVER_NAME, } }; /** * bfin_dpmc_init - Init driver */ static int __init bfin_dpmc_init(void) { return platform_driver_register(&bfin_dpmc_device_driver); } module_init(bfin_dpmc_init); /** * bfin_dpmc_exit - break down driver */ static void __exit bfin_dpmc_exit(void) { platform_driver_unregister(&bfin_dpmc_device_driver); } module_exit(bfin_dpmc_exit); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("cpu power management driver for Blackfin"); MODULE_LICENSE("GPL");
cgjones/samsung-android-kernel
arch/blackfin/mach-common/dpmc.c
C
gpl-2.0
3,024
<?php $lang['db_invalid_connection_str'] = 'Unable to determine the database settings based on the connection string you submitted.'; $lang['db_unable_to_connect'] = 'Unable to connect to your database server using the provided settings.'; $lang['db_unable_to_select'] = 'Unable to select the specified database: %s'; $lang['db_unable_to_create'] = 'Unable to create the specified database: %s'; $lang['db_invalid_query'] = 'The query you submitted is not valid.'; $lang['db_must_set_table'] = 'You must set the database table to be used with your query.'; $lang['db_must_use_set'] = 'You must use the "set" method to update an entry.'; $lang['db_must_use_index'] = 'You must specify an index to match on for batch updates.'; $lang['db_batch_missing_index'] = 'One or more rows submitted for batch updating is missing the specified index.'; $lang['db_must_use_where'] = 'Updates are not allowed unless they contain a "where" clause.'; $lang['db_del_must_use_where'] = 'Deletes are not allowed unless they contain a "where" or "like" clause.'; $lang['db_field_param_missing'] = 'To fetch fields requires the name of the table as a parameter.'; $lang['db_unsupported_function'] = 'This feature is not available for the database you are using.'; $lang['db_transaction_failure'] = 'Transaction failure: Rollback performed.'; $lang['db_unable_to_drop'] = 'Unable to drop the specified database.'; $lang['db_unsuported_feature'] = 'Unsupported feature of the database platform you are using.'; $lang['db_unsuported_compression'] = 'The file compression format you chose is not supported by your server.'; $lang['db_filepath_error'] = 'Unable to write data to the file path you have submitted.'; $lang['db_invalid_cache_path'] = 'The cache path you submitted is not valid or writable.'; $lang['db_table_name_required'] = 'A table name is required for that operation.'; $lang['db_column_name_required'] = 'A column name is required for that operation.'; $lang['db_column_definition_required'] = 'A column definition is required for that operation.'; $lang['db_unable_to_set_charset'] = 'Unable to set client connection character set: %s'; $lang['db_error_heading'] = 'A Database Error Occurred'; /* End of file db_lang.php */ /* Location: ./system/language/english/db_lang.php */
wh04mi/halogy
halogy/system/language/english/db_lang.php
PHP
gpl-2.0
2,273
#include <linux/kdebug.h> #include <linux/kprobes.h> #include <linux/module.h> #include <linux/notifier.h> #include <linux/rcupdate.h> #include <linux/vmalloc.h> #include <linux/reboot.h> /* * Notifier list for kernel code which wants to be called * at shutdown. This is used to stop any idling DMA operations * and the like. */ BLOCKING_NOTIFIER_HEAD(reboot_notifier_list); /* * Notifier chain core routines. The exported routines below * are layered on top of these, with appropriate locking added. */ static int notifier_chain_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } static int notifier_chain_cond_register(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if ((*nl) == n) return 0; if (n->priority > (*nl)->priority) break; nl = &((*nl)->next); } n->next = *nl; rcu_assign_pointer(*nl, n); return 0; } static int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) { while ((*nl) != NULL) { if ((*nl) == n) { rcu_assign_pointer(*nl, n->next); return 0; } nl = &((*nl)->next); } return -ENOENT; } /** * notifier_call_chain - Informs the registered notifiers about an event. * @nl: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: Number of notifier functions to be called. Don't care * value of this parameter is -1. * @nr_calls: Records the number of notifications sent. Don't care * value of this field is NULL. * @returns: notifier_call_chain returns the value returned by the * last notifier function called. */ static int __kprobes notifier_call_chain(struct notifier_block **nl, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; struct notifier_block *nb, *next_nb; nb = rcu_dereference_raw(*nl); while (nb && nr_to_call) { next_nb = rcu_dereference_raw(nb->next); #ifdef CONFIG_DEBUG_NOTIFIERS if (unlikely(!func_ptr_is_kernel_text(nb->notifier_call))) { WARN(1, "Invalid notifier called!"); nb = next_nb; continue; } #endif ret = nb->notifier_call(nb, val, v); if (nr_calls) (*nr_calls)++; if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK) break; nb = next_nb; nr_to_call--; } return ret; } /* * Atomic notifier chain routines. Registration and unregistration * use a spinlock, and call_chain is synchronized by RCU (no locks). */ /** * atomic_notifier_chain_register - Add notifier to an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @n: New entry in notifier chain * * Adds a notifier to an atomic notifier chain. * * Currently always returns zero. */ int atomic_notifier_chain_register(struct atomic_notifier_head *nh, struct notifier_block *n) { unsigned long flags; int ret; spin_lock_irqsave(&nh->lock, flags); ret = notifier_chain_register(&nh->head, n); spin_unlock_irqrestore(&nh->lock, flags); return ret; } EXPORT_SYMBOL_GPL(atomic_notifier_chain_register); /** * atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from an atomic notifier chain. * * Returns zero on success or %-ENOENT on failure. */ int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, struct notifier_block *n) { unsigned long flags; int ret; spin_lock_irqsave(&nh->lock, flags); ret = notifier_chain_unregister(&nh->head, n); spin_unlock_irqrestore(&nh->lock, flags); synchronize_rcu(); return ret; } EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister); /** * __atomic_notifier_call_chain - Call functions in an atomic notifier chain * @nh: Pointer to head of the atomic notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See the comment for notifier_call_chain. * @nr_calls: See the comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in an atomic context, so they must not block. * This routine uses RCU to synchronize with changes to the chain. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret; rcu_read_lock(); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain); int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh, unsigned long val, void *v) { return __atomic_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(atomic_notifier_call_chain); /* * Blocking notifier chain routines. All access to the chain is * synchronized by an rwsem. */ /** * blocking_notifier_chain_register - Add notifier to a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * * Adds a notifier to a blocking notifier chain. * Must be called in process context. * * Currently always returns zero. */ int blocking_notifier_chain_register(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_register(&nh->head, n); down_write(&nh->rwsem); ret = notifier_chain_register(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); /** * blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: New entry in notifier chain * * Adds a notifier to a blocking notifier chain, only if not already * present in the chain. * Must be called in process context. * * Currently always returns zero. */ int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; down_write(&nh->rwsem); ret = notifier_chain_cond_register(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register); /** * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from a blocking notifier chain. * Must be called from process context. * * Returns zero on success or %-ENOENT on failure. */ int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call down_write(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_unregister(&nh->head, n); down_write(&nh->rwsem); ret = notifier_chain_unregister(&nh->head, n); up_write(&nh->rwsem); return ret; } EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister); /** * __blocking_notifier_call_chain - Call functions in a blocking notifier chain * @nh: Pointer to head of the blocking notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain. * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret = NOTIFY_DONE; /* * We check the head outside the lock, but if this access is * racy then it does not matter what the result of the test * is, we re-check the list after having taken the lock anyway: */ if (rcu_dereference_raw(nh->head)) { down_read(&nh->rwsem); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); up_read(&nh->rwsem); } return ret; } EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain); int blocking_notifier_call_chain(struct blocking_notifier_head *nh, unsigned long val, void *v) { return __blocking_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(blocking_notifier_call_chain); /* * Raw notifier chain routines. There is no protection; * the caller must provide it. Use at your own risk! */ /** * raw_notifier_chain_register - Add notifier to a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @n: New entry in notifier chain * * Adds a notifier to a raw notifier chain. * All locking must be provided by the caller. * * Currently always returns zero. */ int raw_notifier_chain_register(struct raw_notifier_head *nh, struct notifier_block *n) { return notifier_chain_register(&nh->head, n); } EXPORT_SYMBOL_GPL(raw_notifier_chain_register); /** * raw_notifier_chain_unregister - Remove notifier from a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from a raw notifier chain. * All locking must be provided by the caller. * * Returns zero on success or %-ENOENT on failure. */ int raw_notifier_chain_unregister(struct raw_notifier_head *nh, struct notifier_block *n) { return notifier_chain_unregister(&nh->head, n); } EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister); /** * __raw_notifier_call_chain - Call functions in a raw notifier chain * @nh: Pointer to head of the raw notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in an undefined context. * All locking must be provided by the caller. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then raw_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); } EXPORT_SYMBOL_GPL(__raw_notifier_call_chain); int raw_notifier_call_chain(struct raw_notifier_head *nh, unsigned long val, void *v) { return __raw_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(raw_notifier_call_chain); /* * SRCU notifier chain routines. Registration and unregistration * use a mutex, and call_chain is synchronized by SRCU (no locks). */ /** * srcu_notifier_chain_register - Add notifier to an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @n: New entry in notifier chain * * Adds a notifier to an SRCU notifier chain. * Must be called in process context. * * Currently always returns zero. */ int srcu_notifier_chain_register(struct srcu_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_register(&nh->head, n); mutex_lock(&nh->mutex); ret = notifier_chain_register(&nh->head, n); mutex_unlock(&nh->mutex); return ret; } EXPORT_SYMBOL_GPL(srcu_notifier_chain_register); /** * srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @n: Entry to remove from notifier chain * * Removes a notifier from an SRCU notifier chain. * Must be called from process context. * * Returns zero on success or %-ENOENT on failure. */ int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, struct notifier_block *n) { int ret; /* * This code gets used during boot-up, when task switching is * not yet working and interrupts must remain disabled. At * such times we must not call mutex_lock(). */ if (unlikely(system_state == SYSTEM_BOOTING)) return notifier_chain_unregister(&nh->head, n); mutex_lock(&nh->mutex); ret = notifier_chain_unregister(&nh->head, n); mutex_unlock(&nh->mutex); synchronize_srcu(&nh->srcu); return ret; } EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister); /** * __srcu_notifier_call_chain - Call functions in an SRCU notifier chain * @nh: Pointer to head of the SRCU notifier chain * @val: Value passed unmodified to notifier function * @v: Pointer passed unmodified to notifier function * @nr_to_call: See comment for notifier_call_chain. * @nr_calls: See comment for notifier_call_chain * * Calls each function in a notifier chain in turn. The functions * run in a process context, so they are allowed to block. * * If the return value of the notifier can be and'ed * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain() * will return immediately, with the return value of * the notifier function which halted execution. * Otherwise the return value is the return value * of the last notifier function called. */ int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v, int nr_to_call, int *nr_calls) { int ret; int idx; idx = srcu_read_lock(&nh->srcu); ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls); srcu_read_unlock(&nh->srcu, idx); return ret; } EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain); int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v) { return __srcu_notifier_call_chain(nh, val, v, -1, NULL); } EXPORT_SYMBOL_GPL(srcu_notifier_call_chain); /** * srcu_init_notifier_head - Initialize an SRCU notifier head * @nh: Pointer to head of the srcu notifier chain * * Unlike other sorts of notifier heads, SRCU notifier heads require * dynamic initialization. Be sure to call this routine before * calling any of the other SRCU notifier routines for this head. * * If an SRCU notifier head is deallocated, it must first be cleaned * up by calling srcu_cleanup_notifier_head(). Otherwise the head's * per-cpu data (used by the SRCU mechanism) will leak. */ void srcu_init_notifier_head(struct srcu_notifier_head *nh) { mutex_init(&nh->mutex); if (init_srcu_struct(&nh->srcu) < 0) BUG(); nh->head = NULL; } EXPORT_SYMBOL_GPL(srcu_init_notifier_head); /** * register_reboot_notifier - Register function to be called at reboot time * @nb: Info about notifier function to be called * * Registers a function with the list of functions * to be called at reboot time. * * Currently always returns zero, as blocking_notifier_chain_register() * always returns zero. */ int register_reboot_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&reboot_notifier_list, nb); } EXPORT_SYMBOL(register_reboot_notifier); /** * unregister_reboot_notifier - Unregister previously registered reboot notifier * @nb: Hook to be unregistered * * Unregisters a previously registered reboot * notifier function. * * Returns zero on success, or %-ENOENT on failure. */ int unregister_reboot_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&reboot_notifier_list, nb); } EXPORT_SYMBOL(unregister_reboot_notifier); static ATOMIC_NOTIFIER_HEAD(die_chain); int notrace __kprobes notify_die(enum die_val val, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig, }; return atomic_notifier_call_chain(&die_chain, val, &args); } int register_die_notifier(struct notifier_block *nb) { vmalloc_sync_all(); return atomic_notifier_chain_register(&die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&die_chain, nb); } EXPORT_SYMBOL_GPL(unregister_die_notifier);
hallovveen31/HELLRAZOR
kernel/notifier.c
C
gpl-2.0
17,340
/* pci_sabre.c: Sabre specific PCI controller support. * * Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net) * Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com) */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/of_device.h> #include <asm/apb.h> #include <asm/iommu.h> #include <asm/irq.h> #include <asm/prom.h> #include <asm/upa.h> #include "pci_impl.h" #include "iommu_common.h" #include "psycho_common.h" #define DRIVER_NAME "sabre" #define PFX DRIVER_NAME ": " /* SABRE PCI controller register offsets and definitions. */ #define SABRE_UE_AFSR 0x0030UL #define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ #define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ #define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ #define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ #define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */ #define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */ #define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ #define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */ #define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */ #define SABRE_UECE_AFAR 0x0038UL #define SABRE_CE_AFSR 0x0040UL #define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */ #define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */ #define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */ #define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */ #define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */ #define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */ #define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */ #define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */ #define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */ #define SABRE_IOMMU_CONTROL 0x0200UL #define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */ #define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */ #define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */ #define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */ #define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */ #define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000 #define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000 #define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000 #define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000 #define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000 #define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000 #define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000 #define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000 #define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */ #define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */ #define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */ #define SABRE_IOMMU_TSBBASE 0x0208UL #define SABRE_IOMMU_FLUSH 0x0210UL #define SABRE_IMAP_A_SLOT0 0x0c00UL #define SABRE_IMAP_B_SLOT0 0x0c20UL #define SABRE_IMAP_SCSI 0x1000UL #define SABRE_IMAP_ETH 0x1008UL #define SABRE_IMAP_BPP 0x1010UL #define SABRE_IMAP_AU_REC 0x1018UL #define SABRE_IMAP_AU_PLAY 0x1020UL #define SABRE_IMAP_PFAIL 0x1028UL #define SABRE_IMAP_KMS 0x1030UL #define SABRE_IMAP_FLPY 0x1038UL #define SABRE_IMAP_SHW 0x1040UL #define SABRE_IMAP_KBD 0x1048UL #define SABRE_IMAP_MS 0x1050UL #define SABRE_IMAP_SER 0x1058UL #define SABRE_IMAP_UE 0x1070UL #define SABRE_IMAP_CE 0x1078UL #define SABRE_IMAP_PCIERR 0x1080UL #define SABRE_IMAP_GFX 0x1098UL #define SABRE_IMAP_EUPA 0x10a0UL #define SABRE_ICLR_A_SLOT0 0x1400UL #define SABRE_ICLR_B_SLOT0 0x1480UL #define SABRE_ICLR_SCSI 0x1800UL #define SABRE_ICLR_ETH 0x1808UL #define SABRE_ICLR_BPP 0x1810UL #define SABRE_ICLR_AU_REC 0x1818UL #define SABRE_ICLR_AU_PLAY 0x1820UL #define SABRE_ICLR_PFAIL 0x1828UL #define SABRE_ICLR_KMS 0x1830UL #define SABRE_ICLR_FLPY 0x1838UL #define SABRE_ICLR_SHW 0x1840UL #define SABRE_ICLR_KBD 0x1848UL #define SABRE_ICLR_MS 0x1850UL #define SABRE_ICLR_SER 0x1858UL #define SABRE_ICLR_UE 0x1870UL #define SABRE_ICLR_CE 0x1878UL #define SABRE_ICLR_PCIERR 0x1880UL #define SABRE_WRSYNC 0x1c20UL #define SABRE_PCICTRL 0x2000UL #define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */ #define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */ #define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */ #define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */ #define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */ #define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */ #define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */ #define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */ #define SABRE_PIOAFSR 0x2010UL #define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */ #define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */ #define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */ #define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */ #define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */ #define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */ #define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */ #define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */ #define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */ #define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */ #define SABRE_PIOAFAR 0x2018UL #define SABRE_PCIDIAG 0x2020UL #define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */ #define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */ #define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */ #define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */ #define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */ #define SABRE_PCITASR 0x2028UL #define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */ #define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */ #define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */ #define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */ #define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */ #define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */ #define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */ #define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */ #define SABRE_PIOBUF_DIAG 0x5000UL #define SABRE_DMABUF_DIAGLO 0x5100UL #define SABRE_DMABUF_DIAGHI 0x51c0UL #define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */ #define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */ #define SABRE_IOMMU_VADIAG 0xa400UL #define SABRE_IOMMU_TCDIAG 0xa408UL #define SABRE_IOMMU_TAG 0xa580UL #define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */ #define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */ #define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */ #define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */ #define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */ #define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */ #define SABRE_IOMMU_DATA 0xa600UL #define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */ #define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */ #define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */ #define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */ #define SABRE_PCI_IRQSTATE 0xa800UL #define SABRE_OBIO_IRQSTATE 0xa808UL #define SABRE_FFBCFG 0xf000UL #define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */ #define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */ #define SABRE_MCCTRL0 0xf010UL #define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */ #define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */ #define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */ #define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */ #define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */ #define SABRE_MCCTRL1 0xf018UL #define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */ #define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */ #define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */ #define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */ #define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */ #define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */ #define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */ #define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */ #define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */ #define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */ #define SABRE_RESETCTRL 0xf020UL #define SABRE_CONFIGSPACE 0x001000000UL #define SABRE_IOSPACE 0x002000000UL #define SABRE_IOSPACE_SIZE 0x000ffffffUL #define SABRE_MEMSPACE 0x100000000UL #define SABRE_MEMSPACE_SIZE 0x07fffffffUL static int hummingbird_p; static struct pci_bus *sabre_root_bus; static irqreturn_t sabre_ue_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg = pbm->controller_regs + SABRE_UE_AFSR; unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR; unsigned long afsr, afar, error_bits; int reported; /* Latch uncorrectable error status. */ afar = upa_readq(afar_reg); afsr = upa_readq(afsr_reg); /* Clear the primary/secondary error status bits. */ error_bits = afsr & (SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE); if (!error_bits) return IRQ_NONE; upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: Uncorrectable Error, primary error type[%s%s]\n", pbm->name, ((error_bits & SABRE_UEAFSR_PDRD) ? "DMA Read" : ((error_bits & SABRE_UEAFSR_PDWR) ? "DMA Write" : "???")), ((error_bits & SABRE_UEAFSR_PDTE) ? ":Translation Error" : "")); printk("%s: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n", pbm->name, (afsr & SABRE_UEAFSR_BMSK) >> 32UL, (afsr & SABRE_UEAFSR_OFF) >> 29UL, ((afsr & SABRE_UEAFSR_BLK) ? 1 : 0)); printk("%s: UE AFAR [%016lx]\n", pbm->name, afar); printk("%s: UE Secondary errors [", pbm->name); reported = 0; if (afsr & SABRE_UEAFSR_SDRD) { reported++; printk("(DMA Read)"); } if (afsr & SABRE_UEAFSR_SDWR) { reported++; printk("(DMA Write)"); } if (afsr & SABRE_UEAFSR_SDTE) { reported++; printk("(Translation Error)"); } if (!reported) printk("(none)"); printk("]\n"); /* Interrogate IOMMU for error status. */ psycho_check_iommu_error(pbm, afsr, afar, UE_ERR); return IRQ_HANDLED; } static irqreturn_t sabre_ce_intr(int irq, void *dev_id) { struct pci_pbm_info *pbm = dev_id; unsigned long afsr_reg = pbm->controller_regs + SABRE_CE_AFSR; unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR; unsigned long afsr, afar, error_bits; int reported; /* Latch error status. */ afar = upa_readq(afar_reg); afsr = upa_readq(afsr_reg); /* Clear primary/secondary error status bits. */ error_bits = afsr & (SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR); if (!error_bits) return IRQ_NONE; upa_writeq(error_bits, afsr_reg); /* Log the error. */ printk("%s: Correctable Error, primary error type[%s]\n", pbm->name, ((error_bits & SABRE_CEAFSR_PDRD) ? "DMA Read" : ((error_bits & SABRE_CEAFSR_PDWR) ? "DMA Write" : "???"))); /* XXX Use syndrome and afar to print out module string just like * XXX UDB CE trap handler does... -DaveM */ printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] " "was_block(%d)\n", pbm->name, (afsr & SABRE_CEAFSR_ESYND) >> 48UL, (afsr & SABRE_CEAFSR_BMSK) >> 32UL, (afsr & SABRE_CEAFSR_OFF) >> 29UL, ((afsr & SABRE_CEAFSR_BLK) ? 1 : 0)); printk("%s: CE AFAR [%016lx]\n", pbm->name, afar); printk("%s: CE Secondary errors [", pbm->name); reported = 0; if (afsr & SABRE_CEAFSR_SDRD) { reported++; printk("(DMA Read)"); } if (afsr & SABRE_CEAFSR_SDWR) { reported++; printk("(DMA Write)"); } if (!reported) printk("(none)"); printk("]\n"); return IRQ_HANDLED; } static void sabre_register_error_handlers(struct pci_pbm_info *pbm) { struct device_node *dp = pbm->op->dev.of_node; struct platform_device *op; unsigned long base = pbm->controller_regs; u64 tmp; int err; if (pbm->chip_type == PBM_CHIP_TYPE_SABRE) dp = dp->parent; op = of_find_device_by_node(dp); if (!op) return; /* Sabre/Hummingbird IRQ property layout is: * 0: PCI ERR * 1: UE ERR * 2: CE ERR * 3: POWER FAIL */ if (op->archdata.num_irqs < 4) return; /* We clear the error bits in the appropriate AFSR before * registering the handler so that we don't get spurious * interrupts. */ upa_writeq((SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR | SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR | SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE), base + SABRE_UE_AFSR); err = request_irq(op->archdata.irqs[1], sabre_ue_intr, 0, "SABRE_UE", pbm); if (err) printk(KERN_WARNING "%s: Couldn't register UE, err=%d.\n", pbm->name, err); upa_writeq((SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR | SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR), base + SABRE_CE_AFSR); err = request_irq(op->archdata.irqs[2], sabre_ce_intr, 0, "SABRE_CE", pbm); if (err) printk(KERN_WARNING "%s: Couldn't register CE, err=%d.\n", pbm->name, err); err = request_irq(op->archdata.irqs[0], psycho_pcierr_intr, 0, "SABRE_PCIERR", pbm); if (err) printk(KERN_WARNING "%s: Couldn't register PCIERR, err=%d.\n", pbm->name, err); tmp = upa_readq(base + SABRE_PCICTRL); tmp |= SABRE_PCICTRL_ERREN; upa_writeq(tmp, base + SABRE_PCICTRL); } static void apb_init(struct pci_bus *sabre_bus) { struct pci_dev *pdev; list_for_each_entry(pdev, &sabre_bus->devices, bus_list) { if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_SIMBA) { u16 word16; pci_read_config_word(pdev, PCI_COMMAND, &word16); word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO; pci_write_config_word(pdev, PCI_COMMAND, word16); /* Status register bits are "write 1 to clear". */ pci_write_config_word(pdev, PCI_STATUS, 0xffff); pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff); /* Use a primary/seconday latency timer value * of 64. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64); pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64); /* Enable reporting/forwarding of master aborts, * parity, and SERR. */ pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL, (PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_MASTER_ABORT)); } } } static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { static int once; /* The APB bridge speaks to the Sabre host PCI bridge * at 66Mhz, but the front side of APB runs at 33Mhz * for both segments. * * Hummingbird systems do not use APB, so they run * at 66MHZ. */ if (hummingbird_p) pbm->is_66mhz_capable = 1; else pbm->is_66mhz_capable = 0; /* This driver has not been verified to handle * multiple SABREs yet, so trap this. * * Also note that the SABRE host bridge is hardwired * to live at bus 0. */ if (once != 0) { printk(KERN_ERR PFX "Multiple controllers unsupported.\n"); return; } once++; pbm->pci_bus = pci_scan_one_pbm(pbm, parent); if (!pbm->pci_bus) return; sabre_root_bus = pbm->pci_bus; apb_init(pbm->pci_bus); sabre_register_error_handlers(pbm); } static void sabre_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op) { psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE); pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR; pbm->pci_afar = pbm->controller_regs + SABRE_PIOAFAR; pbm->pci_csr = pbm->controller_regs + SABRE_PCICTRL; sabre_scan_bus(pbm, &op->dev); } static const struct of_device_id sabre_match[]; static int sabre_probe(struct platform_device *op) { const struct of_device_id *match; const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; u32 upa_portid, dma_mask; struct iommu *iommu; int tsbsize, err; const u32 *vdma; u64 clear_irq; match = of_match_device(sabre_match, &op->dev); hummingbird_p = match && (match->data != NULL); if (!hummingbird_p) { struct device_node *cpu_dp; /* Of course, Sun has to encode things a thousand * different ways, inconsistently. */ for_each_node_by_type(cpu_dp, "cpu") { if (!strcmp(cpu_dp->name, "SUNW,UltraSPARC-IIe")) hummingbird_p = 1; } } err = -ENOMEM; pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); if (!pbm) { printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n"); goto out_err; } iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); if (!iommu) { printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); goto out_free_controller; } pbm->iommu = iommu; upa_portid = of_getintprop_default(dp, "upa-portid", 0xff); pbm->portid = upa_portid; /* * Map in SABRE register set and report the presence of this SABRE. */ pr_regs = of_get_property(dp, "reg", NULL); err = -ENODEV; if (!pr_regs) { printk(KERN_ERR PFX "No reg property\n"); goto out_free_iommu; } /* * First REG in property is base of entire SABRE register space. */ pbm->controller_regs = pr_regs[0].phys_addr; /* Clear interrupts */ /* PCI first */ for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8) upa_writeq(0x0UL, pbm->controller_regs + clear_irq); /* Then OBIO */ for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8) upa_writeq(0x0UL, pbm->controller_regs + clear_irq); /* Error interrupts are enabled later after the bus scan. */ upa_writeq((SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR | SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN), pbm->controller_regs + SABRE_PCICTRL); /* Now map in PCI config space for entire SABRE. */ pbm->config_space = pbm->controller_regs + SABRE_CONFIGSPACE; vdma = of_get_property(dp, "virtual-dma", NULL); if (!vdma) { printk(KERN_ERR PFX "No virtual-dma property\n"); goto out_free_iommu; } dma_mask = vdma[0]; switch(vdma[1]) { case 0x20000000: dma_mask |= 0x1fffffff; tsbsize = 64; break; case 0x40000000: dma_mask |= 0x3fffffff; tsbsize = 128; break; case 0x80000000: dma_mask |= 0x7fffffff; tsbsize = 128; break; default: printk(KERN_ERR PFX "Strange virtual-dma size.\n"); goto out_free_iommu; } err = psycho_iommu_init(pbm, tsbsize, vdma[0], dma_mask, SABRE_WRSYNC); if (err) goto out_free_iommu; /* * Look for APB underneath. */ sabre_pbm_init(pbm, op); pbm->next = pci_pbm_root; pci_pbm_root = pbm; dev_set_drvdata(&op->dev, pbm); return 0; out_free_iommu: kfree(pbm->iommu); out_free_controller: kfree(pbm); out_err: return err; } static const struct of_device_id sabre_match[] = { { .name = "pci", .compatible = "pci108e,a001", .data = (void *) 1, }, { .name = "pci", .compatible = "pci108e,a000", }, {}, }; static struct platform_driver sabre_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = sabre_match, }, .probe = sabre_probe, }; static int __init sabre_init(void) { return platform_driver_register(&sabre_driver); } subsys_initcall(sabre_init);
Fusion-Devices/android_kernel_moto_shamu_old
arch/sparc/kernel/pci_sabre.c
C
gpl-2.0
20,936
/** * wm831x-on.c - WM831X ON pin driver * * Copyright (C) 2009 Wolfson Microelectronics plc * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/mfd/wm831x/core.h> struct wm831x_on { struct input_dev *dev; struct delayed_work work; struct wm831x *wm831x; }; /* * The chip gives us an interrupt when the ON pin is asserted but we * then need to poll to see when the pin is deasserted. */ static void wm831x_poll_on(struct work_struct *work) { struct wm831x_on *wm831x_on = container_of(work, struct wm831x_on, work.work); struct wm831x *wm831x = wm831x_on->wm831x; int poll, ret; ret = wm831x_reg_read(wm831x, WM831X_ON_PIN_CONTROL); if (ret >= 0) { poll = !(ret & WM831X_ON_PIN_STS); input_report_key(wm831x_on->dev, KEY_POWER, poll); input_sync(wm831x_on->dev); } else { dev_err(wm831x->dev, "Failed to read ON status: %d\n", ret); poll = 1; } if (poll) schedule_delayed_work(&wm831x_on->work, 100); } static irqreturn_t wm831x_on_irq(int irq, void *data) { struct wm831x_on *wm831x_on = data; schedule_delayed_work(&wm831x_on->work, 0); return IRQ_HANDLED; } static int __devinit wm831x_on_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_on *wm831x_on; int irq = platform_get_irq(pdev, 0); int ret; wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL); if (!wm831x_on) { dev_err(&pdev->dev, "Can't allocate data\n"); return -ENOMEM; } wm831x_on->wm831x = wm831x; INIT_DELAYED_WORK(&wm831x_on->work, wm831x_poll_on); wm831x_on->dev = input_allocate_device(); if (!wm831x_on->dev) { dev_err(&pdev->dev, "Can't allocate input dev\n"); ret = -ENOMEM; goto err; } wm831x_on->dev->evbit[0] = BIT_MASK(EV_KEY); wm831x_on->dev->keybit[BIT_WORD(KEY_POWER)] = BIT_MASK(KEY_POWER); wm831x_on->dev->name = "wm831x_on"; wm831x_on->dev->phys = "wm831x_on/input0"; wm831x_on->dev->dev.parent = &pdev->dev; ret = request_threaded_irq(irq, NULL, wm831x_on_irq, IRQF_TRIGGER_RISING, "wm831x_on", wm831x_on); if (ret < 0) { dev_err(&pdev->dev, "Unable to request IRQ: %d\n", ret); goto err_input_dev; } ret = input_register_device(wm831x_on->dev); if (ret) { dev_dbg(&pdev->dev, "Can't register input device: %d\n", ret); goto err_irq; } platform_set_drvdata(pdev, wm831x_on); return 0; err_irq: free_irq(irq, wm831x_on); err_input_dev: input_free_device(wm831x_on->dev); err: kfree(wm831x_on); return ret; } static int __devexit wm831x_on_remove(struct platform_device *pdev) { struct wm831x_on *wm831x_on = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); free_irq(irq, wm831x_on); cancel_delayed_work_sync(&wm831x_on->work); input_unregister_device(wm831x_on->dev); kfree(wm831x_on); return 0; } static struct platform_driver wm831x_on_driver = { .probe = wm831x_on_probe, .remove = __devexit_p(wm831x_on_remove), .driver = { .name = "wm831x-on", .owner = THIS_MODULE, }, }; module_platform_driver(wm831x_on_driver); MODULE_ALIAS("platform:wm831x-on"); MODULE_DESCRIPTION("WM831x ON pin"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
TheDarkestObscrurity/mako
drivers/input/misc/wm831x-on.c
C
gpl-2.0
4,041
/* * Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand. * Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz> * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include "../../dccp.h" #include "tfrc.h" #define TFRC_CALC_X_ARRSIZE 500 #define TFRC_CALC_X_SPLIT 50000 /* 0.05 * 1000000, details below */ #define TFRC_SMALLEST_P (TFRC_CALC_X_SPLIT/TFRC_CALC_X_ARRSIZE) /* TFRC TCP Reno Throughput Equation Lookup Table for f(p) The following two-column lookup table implements a part of the TCP throughput equation from [RFC 3448, sec. 3.1]: s X_calc = -------------------------------------------------------------- R * sqrt(2*b*p/3) + (3 * t_RTO * sqrt(3*b*p/8) * (p + 32*p^3)) Where: X is the transmit rate in bytes/second s is the packet size in bytes R is the round trip time in seconds p is the loss event rate, between 0 and 1.0, of the number of loss events as a fraction of the number of packets transmitted t_RTO is the TCP retransmission timeout value in seconds b is the number of packets acknowledged by a single TCP ACK We can assume that b = 1 and t_RTO is 4 * R. The equation now becomes: s X_calc = ------------------------------------------------------- R * sqrt(p*2/3) + (12 * R * sqrt(p*3/8) * (p + 32*p^3)) which we can break down into: s X_calc = --------- R * f(p) where f(p) is given for 0 < p <= 1 by: f(p) = sqrt(2*p/3) + 12 * sqrt(3*p/8) * (p + 32*p^3) Since this is kernel code, floating-point arithmetic is avoided in favour of integer arithmetic. This means that nearly all fractional parameters are scaled by 1000000: * the parameters p and R * the return result f(p) The lookup table therefore actually tabulates the following function g(q): g(q) = 1000000 * f(q/1000000) Hence, when p <= 1, q must be less than or equal to 1000000. To achieve finer granularity for the practically more relevant case of small values of p (up to 5%), the second column is used; the first one ranges up to 100%. This split corresponds to the value of q = TFRC_CALC_X_SPLIT. At the same time this also determines the smallest resolution possible with this lookup table: TFRC_SMALLEST_P = TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE The entire table is generated by: for(i=0; i < TFRC_CALC_X_ARRSIZE; i++) { lookup[i][0] = g((i+1) * 1000000/TFRC_CALC_X_ARRSIZE); lookup[i][1] = g((i+1) * TFRC_CALC_X_SPLIT/TFRC_CALC_X_ARRSIZE); } With the given configuration, we have, with M = TFRC_CALC_X_ARRSIZE-1, lookup[0][0] = g(1000000/(M+1)) = 1000000 * f(0.2%) lookup[M][0] = g(1000000) = 1000000 * f(100%) lookup[0][1] = g(TFRC_SMALLEST_P) = 1000000 * f(0.01%) lookup[M][1] = g(TFRC_CALC_X_SPLIT) = 1000000 * f(5%) In summary, the two columns represent f(p) for the following ranges: * The first column is for 0.002 <= p <= 1.0 * The second column is for 0.0001 <= p <= 0.05 Where the columns overlap, the second (finer-grained) is given preference, i.e. the first column is used only for p >= 0.05. */ static const u32 tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE][2] = { { 37172, 8172 }, { 53499, 11567 }, { 66664, 14180 }, { 78298, 16388 }, { 89021, 18339 }, { 99147, 20108 }, { 108858, 21738 }, { 118273, 23260 }, { 127474, 24693 }, { 136520, 26052 }, { 145456, 27348 }, { 154316, 28589 }, { 163130, 29783 }, { 171919, 30935 }, { 180704, 32049 }, { 189502, 33130 }, { 198328, 34180 }, { 207194, 35202 }, { 216114, 36198 }, { 225097, 37172 }, { 234153, 38123 }, { 243294, 39055 }, { 252527, 39968 }, { 261861, 40864 }, { 271305, 41743 }, { 280866, 42607 }, { 290553, 43457 }, { 300372, 44293 }, { 310333, 45117 }, { 320441, 45929 }, { 330705, 46729 }, { 341131, 47518 }, { 351728, 48297 }, { 362501, 49066 }, { 373460, 49826 }, { 384609, 50577 }, { 395958, 51320 }, { 407513, 52054 }, { 419281, 52780 }, { 431270, 53499 }, { 443487, 54211 }, { 455940, 54916 }, { 468635, 55614 }, { 481581, 56306 }, { 494785, 56991 }, { 508254, 57671 }, { 521996, 58345 }, { 536019, 59014 }, { 550331, 59677 }, { 564939, 60335 }, { 579851, 60988 }, { 595075, 61636 }, { 610619, 62279 }, { 626491, 62918 }, { 642700, 63553 }, { 659253, 64183 }, { 676158, 64809 }, { 693424, 65431 }, { 711060, 66050 }, { 729073, 66664 }, { 747472, 67275 }, { 766266, 67882 }, { 785464, 68486 }, { 805073, 69087 }, { 825103, 69684 }, { 845562, 70278 }, { 866460, 70868 }, { 887805, 71456 }, { 909606, 72041 }, { 931873, 72623 }, { 954614, 73202 }, { 977839, 73778 }, { 1001557, 74352 }, { 1025777, 74923 }, { 1050508, 75492 }, { 1075761, 76058 }, { 1101544, 76621 }, { 1127867, 77183 }, { 1154739, 77741 }, { 1182172, 78298 }, { 1210173, 78852 }, { 1238753, 79405 }, { 1267922, 79955 }, { 1297689, 80503 }, { 1328066, 81049 }, { 1359060, 81593 }, { 1390684, 82135 }, { 1422947, 82675 }, { 1455859, 83213 }, { 1489430, 83750 }, { 1523671, 84284 }, { 1558593, 84817 }, { 1594205, 85348 }, { 1630518, 85878 }, { 1667543, 86406 }, { 1705290, 86932 }, { 1743770, 87457 }, { 1782994, 87980 }, { 1822973, 88501 }, { 1863717, 89021 }, { 1905237, 89540 }, { 1947545, 90057 }, { 1990650, 90573 }, { 2034566, 91087 }, { 2079301, 91600 }, { 2124869, 92111 }, { 2171279, 92622 }, { 2218543, 93131 }, { 2266673, 93639 }, { 2315680, 94145 }, { 2365575, 94650 }, { 2416371, 95154 }, { 2468077, 95657 }, { 2520707, 96159 }, { 2574271, 96660 }, { 2628782, 97159 }, { 2684250, 97658 }, { 2740689, 98155 }, { 2798110, 98651 }, { 2856524, 99147 }, { 2915944, 99641 }, { 2976382, 100134 }, { 3037850, 100626 }, { 3100360, 101117 }, { 3163924, 101608 }, { 3228554, 102097 }, { 3294263, 102586 }, { 3361063, 103073 }, { 3428966, 103560 }, { 3497984, 104045 }, { 3568131, 104530 }, { 3639419, 105014 }, { 3711860, 105498 }, { 3785467, 105980 }, { 3860253, 106462 }, { 3936229, 106942 }, { 4013410, 107422 }, { 4091808, 107902 }, { 4171435, 108380 }, { 4252306, 108858 }, { 4334431, 109335 }, { 4417825, 109811 }, { 4502501, 110287 }, { 4588472, 110762 }, { 4675750, 111236 }, { 4764349, 111709 }, { 4854283, 112182 }, { 4945564, 112654 }, { 5038206, 113126 }, { 5132223, 113597 }, { 5227627, 114067 }, { 5324432, 114537 }, { 5422652, 115006 }, { 5522299, 115474 }, { 5623389, 115942 }, { 5725934, 116409 }, { 5829948, 116876 }, { 5935446, 117342 }, { 6042439, 117808 }, { 6150943, 118273 }, { 6260972, 118738 }, { 6372538, 119202 }, { 6485657, 119665 }, { 6600342, 120128 }, { 6716607, 120591 }, { 6834467, 121053 }, { 6953935, 121514 }, { 7075025, 121976 }, { 7197752, 122436 }, { 7322131, 122896 }, { 7448175, 123356 }, { 7575898, 123815 }, { 7705316, 124274 }, { 7836442, 124733 }, { 7969291, 125191 }, { 8103877, 125648 }, { 8240216, 126105 }, { 8378321, 126562 }, { 8518208, 127018 }, { 8659890, 127474 }, { 8803384, 127930 }, { 8948702, 128385 }, { 9095861, 128840 }, { 9244875, 129294 }, { 9395760, 129748 }, { 9548529, 130202 }, { 9703198, 130655 }, { 9859782, 131108 }, { 10018296, 131561 }, { 10178755, 132014 }, { 10341174, 132466 }, { 10505569, 132917 }, { 10671954, 133369 }, { 10840345, 133820 }, { 11010757, 134271 }, { 11183206, 134721 }, { 11357706, 135171 }, { 11534274, 135621 }, { 11712924, 136071 }, { 11893673, 136520 }, { 12076536, 136969 }, { 12261527, 137418 }, { 12448664, 137867 }, { 12637961, 138315 }, { 12829435, 138763 }, { 13023101, 139211 }, { 13218974, 139658 }, { 13417071, 140106 }, { 13617407, 140553 }, { 13819999, 140999 }, { 14024862, 141446 }, { 14232012, 141892 }, { 14441465, 142339 }, { 14653238, 142785 }, { 14867346, 143230 }, { 15083805, 143676 }, { 15302632, 144121 }, { 15523842, 144566 }, { 15747453, 145011 }, { 15973479, 145456 }, { 16201939, 145900 }, { 16432847, 146345 }, { 16666221, 146789 }, { 16902076, 147233 }, { 17140429, 147677 }, { 17381297, 148121 }, { 17624696, 148564 }, { 17870643, 149007 }, { 18119154, 149451 }, { 18370247, 149894 }, { 18623936, 150336 }, { 18880241, 150779 }, { 19139176, 151222 }, { 19400759, 151664 }, { 19665007, 152107 }, { 19931936, 152549 }, { 20201564, 152991 }, { 20473907, 153433 }, { 20748982, 153875 }, { 21026807, 154316 }, { 21307399, 154758 }, { 21590773, 155199 }, { 21876949, 155641 }, { 22165941, 156082 }, { 22457769, 156523 }, { 22752449, 156964 }, { 23049999, 157405 }, { 23350435, 157846 }, { 23653774, 158287 }, { 23960036, 158727 }, { 24269236, 159168 }, { 24581392, 159608 }, { 24896521, 160049 }, { 25214642, 160489 }, { 25535772, 160929 }, { 25859927, 161370 }, { 26187127, 161810 }, { 26517388, 162250 }, { 26850728, 162690 }, { 27187165, 163130 }, { 27526716, 163569 }, { 27869400, 164009 }, { 28215234, 164449 }, { 28564236, 164889 }, { 28916423, 165328 }, { 29271815, 165768 }, { 29630428, 166208 }, { 29992281, 166647 }, { 30357392, 167087 }, { 30725779, 167526 }, { 31097459, 167965 }, { 31472452, 168405 }, { 31850774, 168844 }, { 32232445, 169283 }, { 32617482, 169723 }, { 33005904, 170162 }, { 33397730, 170601 }, { 33792976, 171041 }, { 34191663, 171480 }, { 34593807, 171919 }, { 34999428, 172358 }, { 35408544, 172797 }, { 35821174, 173237 }, { 36237335, 173676 }, { 36657047, 174115 }, { 37080329, 174554 }, { 37507197, 174993 }, { 37937673, 175433 }, { 38371773, 175872 }, { 38809517, 176311 }, { 39250924, 176750 }, { 39696012, 177190 }, { 40144800, 177629 }, { 40597308, 178068 }, { 41053553, 178507 }, { 41513554, 178947 }, { 41977332, 179386 }, { 42444904, 179825 }, { 42916290, 180265 }, { 43391509, 180704 }, { 43870579, 181144 }, { 44353520, 181583 }, { 44840352, 182023 }, { 45331092, 182462 }, { 45825761, 182902 }, { 46324378, 183342 }, { 46826961, 183781 }, { 47333531, 184221 }, { 47844106, 184661 }, { 48358706, 185101 }, { 48877350, 185541 }, { 49400058, 185981 }, { 49926849, 186421 }, { 50457743, 186861 }, { 50992759, 187301 }, { 51531916, 187741 }, { 52075235, 188181 }, { 52622735, 188622 }, { 53174435, 189062 }, { 53730355, 189502 }, { 54290515, 189943 }, { 54854935, 190383 }, { 55423634, 190824 }, { 55996633, 191265 }, { 56573950, 191706 }, { 57155606, 192146 }, { 57741621, 192587 }, { 58332014, 193028 }, { 58926806, 193470 }, { 59526017, 193911 }, { 60129666, 194352 }, { 60737774, 194793 }, { 61350361, 195235 }, { 61967446, 195677 }, { 62589050, 196118 }, { 63215194, 196560 }, { 63845897, 197002 }, { 64481179, 197444 }, { 65121061, 197886 }, { 65765563, 198328 }, { 66414705, 198770 }, { 67068508, 199213 }, { 67726992, 199655 }, { 68390177, 200098 }, { 69058085, 200540 }, { 69730735, 200983 }, { 70408147, 201426 }, { 71090343, 201869 }, { 71777343, 202312 }, { 72469168, 202755 }, { 73165837, 203199 }, { 73867373, 203642 }, { 74573795, 204086 }, { 75285124, 204529 }, { 76001380, 204973 }, { 76722586, 205417 }, { 77448761, 205861 }, { 78179926, 206306 }, { 78916102, 206750 }, { 79657310, 207194 }, { 80403571, 207639 }, { 81154906, 208084 }, { 81911335, 208529 }, { 82672880, 208974 }, { 83439562, 209419 }, { 84211402, 209864 }, { 84988421, 210309 }, { 85770640, 210755 }, { 86558080, 211201 }, { 87350762, 211647 }, { 88148708, 212093 }, { 88951938, 212539 }, { 89760475, 212985 }, { 90574339, 213432 }, { 91393551, 213878 }, { 92218133, 214325 }, { 93048107, 214772 }, { 93883493, 215219 }, { 94724314, 215666 }, { 95570590, 216114 }, { 96422343, 216561 }, { 97279594, 217009 }, { 98142366, 217457 }, { 99010679, 217905 }, { 99884556, 218353 }, { 100764018, 218801 }, { 101649086, 219250 }, { 102539782, 219698 }, { 103436128, 220147 }, { 104338146, 220596 }, { 105245857, 221046 }, { 106159284, 221495 }, { 107078448, 221945 }, { 108003370, 222394 }, { 108934074, 222844 }, { 109870580, 223294 }, { 110812910, 223745 }, { 111761087, 224195 }, { 112715133, 224646 }, { 113675069, 225097 }, { 114640918, 225548 }, { 115612702, 225999 }, { 116590442, 226450 }, { 117574162, 226902 }, { 118563882, 227353 }, { 119559626, 227805 }, { 120561415, 228258 }, { 121569272, 228710 }, { 122583219, 229162 }, { 123603278, 229615 }, { 124629471, 230068 }, { 125661822, 230521 }, { 126700352, 230974 }, { 127745083, 231428 }, { 128796039, 231882 }, { 129853241, 232336 }, { 130916713, 232790 }, { 131986475, 233244 }, { 133062553, 233699 }, { 134144966, 234153 }, { 135233739, 234608 }, { 136328894, 235064 }, { 137430453, 235519 }, { 138538440, 235975 }, { 139652876, 236430 }, { 140773786, 236886 }, { 141901190, 237343 }, { 143035113, 237799 }, { 144175576, 238256 }, { 145322604, 238713 }, { 146476218, 239170 }, { 147636442, 239627 }, { 148803298, 240085 }, { 149976809, 240542 }, { 151156999, 241000 }, { 152343890, 241459 }, { 153537506, 241917 }, { 154737869, 242376 }, { 155945002, 242835 }, { 157158929, 243294 }, { 158379673, 243753 }, { 159607257, 244213 }, { 160841704, 244673 }, { 162083037, 245133 }, { 163331279, 245593 }, { 164586455, 246054 }, { 165848586, 246514 }, { 167117696, 246975 }, { 168393810, 247437 }, { 169676949, 247898 }, { 170967138, 248360 }, { 172264399, 248822 }, { 173568757, 249284 }, { 174880235, 249747 }, { 176198856, 250209 }, { 177524643, 250672 }, { 178857621, 251136 }, { 180197813, 251599 }, { 181545242, 252063 }, { 182899933, 252527 }, { 184261908, 252991 }, { 185631191, 253456 }, { 187007807, 253920 }, { 188391778, 254385 }, { 189783129, 254851 }, { 191181884, 255316 }, { 192588065, 255782 }, { 194001698, 256248 }, { 195422805, 256714 }, { 196851411, 257181 }, { 198287540, 257648 }, { 199731215, 258115 }, { 201182461, 258582 }, { 202641302, 259050 }, { 204107760, 259518 }, { 205581862, 259986 }, { 207063630, 260454 }, { 208553088, 260923 }, { 210050262, 261392 }, { 211555174, 261861 }, { 213067849, 262331 }, { 214588312, 262800 }, { 216116586, 263270 }, { 217652696, 263741 }, { 219196666, 264211 }, { 220748520, 264682 }, { 222308282, 265153 }, { 223875978, 265625 }, { 225451630, 266097 }, { 227035265, 266569 }, { 228626905, 267041 }, { 230226576, 267514 }, { 231834302, 267986 }, { 233450107, 268460 }, { 235074016, 268933 }, { 236706054, 269407 }, { 238346244, 269881 }, { 239994613, 270355 }, { 241651183, 270830 }, { 243315981, 271305 } }; /* return largest index i such that fval <= lookup[i][small] */ static inline u32 tfrc_binsearch(u32 fval, u8 small) { u32 try, low = 0, high = TFRC_CALC_X_ARRSIZE - 1; while (low < high) { try = (low + high) / 2; if (fval <= tfrc_calc_x_lookup[try][small]) high = try; else low = try + 1; } return high; } /** * tfrc_calc_x - Calculate the send rate as per section 3.1 of RFC3448 * @s: packet size in bytes * @R: RTT scaled by 1000000 (i.e., microseconds) * @p: loss ratio estimate scaled by 1000000 * Returns X_calc in bytes per second (not scaled). */ u32 tfrc_calc_x(u16 s, u32 R, u32 p) { u16 index; u32 f; u64 result; /* check against invalid parameters and divide-by-zero */ BUG_ON(p > 1000000); /* p must not exceed 100% */ BUG_ON(p == 0); /* f(0) = 0, divide by zero */ if (R == 0) { /* possible divide by zero */ DCCP_CRIT("WARNING: RTT is 0, returning maximum X_calc."); return ~0U; } if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */ if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */ DCCP_WARN("Value of p (%d) below resolution. " "Substituting %d\n", p, TFRC_SMALLEST_P); index = 0; } else /* 0.0001 <= p <= 0.05 */ index = p/TFRC_SMALLEST_P - 1; f = tfrc_calc_x_lookup[index][1]; } else { /* 0.05 < p <= 1.00 */ index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1; f = tfrc_calc_x_lookup[index][0]; } /* * Compute X = s/(R*f(p)) in bytes per second. * Since f(p) and R are both scaled by 1000000, we need to multiply by * 1000000^2. To avoid overflow, the result is computed in two stages. * This works under almost all reasonable operational conditions, for a * wide range of parameters. Yet, should some strange combination of * parameters result in overflow, the use of scaled_div32 will catch * this and return UINT_MAX - which is a logically adequate consequence. */ result = scaled_div(s, R); return scaled_div32(result, f); } /** * tfrc_calc_x_reverse_lookup - try to find p given f(p) * @fvalue: function value to match, scaled by 1000000 * Returns closest match for p, also scaled by 1000000 */ u32 tfrc_calc_x_reverse_lookup(u32 fvalue) { int index; if (fvalue == 0) /* f(p) = 0 whenever p = 0 */ return 0; /* Error cases. */ if (fvalue < tfrc_calc_x_lookup[0][1]) { DCCP_WARN("fvalue %u smaller than resolution\n", fvalue); return TFRC_SMALLEST_P; } if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) { DCCP_WARN("fvalue %u exceeds bounds!\n", fvalue); return 1000000; } if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1]) { index = tfrc_binsearch(fvalue, 1); return (index + 1) * TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE; } /* else ... it must be in the coarse-grained column */ index = tfrc_binsearch(fvalue, 0); return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE; } /** * tfrc_invert_loss_event_rate - Compute p so that 10^6 corresponds to 100% * When @loss_event_rate is large, there is a chance that p is truncated to 0. * To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0. */ u32 tfrc_invert_loss_event_rate(u32 loss_event_rate) { if (loss_event_rate == UINT_MAX) /* see RFC 4342, 8.5 */ return 0; if (unlikely(loss_event_rate == 0)) /* map 1/0 into 100% */ return 1000000; return max_t(u32, scaled_div(1, loss_event_rate), TFRC_SMALLEST_P); }
AriesVE-DevCon-TEAM/samsung-kernel-msm7x30
net/dccp/ccids/lib/tfrc_equation.c
C
gpl-2.0
19,079
#include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <linux/mutex.h> #include <net/sock.h> #include "nf_internals.h" /* Sockopts only registered and called from user context, so net locking would be overkill. Also, [gs]etsockopt calls may sleep. */ static DEFINE_MUTEX(nf_sockopt_mutex); static LIST_HEAD(nf_sockopts); /* Do exclusive ranges overlap? */ static inline int overlap(int min1, int max1, int min2, int max2) { return max1 > min2 && min1 < max2; } /* Functions to register sockopt ranges (exclusive). */ int nf_register_sockopt(struct nf_sockopt_ops *reg) { struct nf_sockopt_ops *ops; int ret = 0; if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return -EINTR; list_for_each_entry(ops, &nf_sockopts, list) { if (ops->pf == reg->pf && (overlap(ops->set_optmin, ops->set_optmax, reg->set_optmin, reg->set_optmax) || overlap(ops->get_optmin, ops->get_optmax, reg->get_optmin, reg->get_optmax))) { NFDEBUG("nf_sock overlap: %u-%u/%u-%u v %u-%u/%u-%u\n", ops->set_optmin, ops->set_optmax, ops->get_optmin, ops->get_optmax, reg->set_optmin, reg->set_optmax, reg->get_optmin, reg->get_optmax); ret = -EBUSY; goto out; } } list_add(&reg->list, &nf_sockopts); out: mutex_unlock(&nf_sockopt_mutex); return ret; } EXPORT_SYMBOL(nf_register_sockopt); void nf_unregister_sockopt(struct nf_sockopt_ops *reg) { mutex_lock(&nf_sockopt_mutex); list_del(&reg->list); mutex_unlock(&nf_sockopt_mutex); } EXPORT_SYMBOL(nf_unregister_sockopt); static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, int val, int get) { struct nf_sockopt_ops *ops; if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) return ERR_PTR(-EINTR); list_for_each_entry(ops, &nf_sockopts, list) { if (ops->pf == pf) { if (!try_module_get(ops->owner)) goto out_nosup; if (get) { if (val >= ops->get_optmin && val < ops->get_optmax) goto out; } else { if (val >= ops->set_optmin && val < ops->set_optmax) goto out; } module_put(ops->owner); } } out_nosup: ops = ERR_PTR(-ENOPROTOOPT); out: mutex_unlock(&nf_sockopt_mutex); return ops; } /* Call get/setsockopt() */ static int nf_sockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len, int get) { struct nf_sockopt_ops *ops; int ret; ops = nf_sockopt_find(sk, pf, val, get); if (IS_ERR(ops)) return PTR_ERR(ops); if (get) ret = ops->get(sk, val, opt, len); else ret = ops->set(sk, val, opt, *len); module_put(ops->owner); return ret; } int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, unsigned int len) { return nf_sockopt(sk, pf, val, opt, &len, 0); } EXPORT_SYMBOL(nf_setsockopt); int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len) { return nf_sockopt(sk, pf, val, opt, len, 1); } EXPORT_SYMBOL(nf_getsockopt); #ifdef CONFIG_COMPAT static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len, int get) { struct nf_sockopt_ops *ops; int ret; ops = nf_sockopt_find(sk, pf, val, get); if (IS_ERR(ops)) return PTR_ERR(ops); if (get) { if (ops->compat_get) ret = ops->compat_get(sk, val, opt, len); else ret = ops->get(sk, val, opt, len); } else { if (ops->compat_set) ret = ops->compat_set(sk, val, opt, *len); else ret = ops->set(sk, val, opt, *len); } module_put(ops->owner); return ret; } int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, unsigned int len) { return compat_nf_sockopt(sk, pf, val, opt, &len, 0); } EXPORT_SYMBOL(compat_nf_setsockopt); int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, int *len) { return compat_nf_sockopt(sk, pf, val, opt, len, 1); } EXPORT_SYMBOL(compat_nf_getsockopt); #endif
percy-g2/android_kernel_motorola_msm8610
net/netfilter/nf_sockopt.c
C
gpl-2.0
3,951
/* Common hooks for IA64. Copyright (C) 1999-2013 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "diagnostic-core.h" #include "tm.h" #include "tm_p.h" #include "common/common-target.h" #include "common/common-target-def.h" #include "opts.h" #include "flags.h" #include "params.h" /* Implement overriding of the optimization options. */ static const struct default_options ia64_option_optimization_table[] = { { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 }, #ifdef SUBTARGET_OPTIMIZATION_OPTIONS SUBTARGET_OPTIMIZATION_OPTIONS, #endif { OPT_LEVELS_NONE, 0, NULL, 0 } }; /* Implement TARGET_HANDLE_OPTION. */ static bool ia64_handle_option (struct gcc_options *opts ATTRIBUTE_UNUSED, struct gcc_options *opts_set ATTRIBUTE_UNUSED, const struct cl_decoded_option *decoded, location_t loc) { size_t code = decoded->opt_index; const char *arg = decoded->arg; int value = decoded->value; switch (code) { case OPT_mtls_size_: if (value != 14 && value != 22 && value != 64) error_at (loc, "bad value %<%s%> for -mtls-size= switch", arg); return true; default: return true; } } /* Implement TARGET_EXCEPT_UNWIND_INFO. */ enum unwind_info_type ia64_except_unwind_info (struct gcc_options *opts) { /* Honor the --enable-sjlj-exceptions configure switch. */ #ifdef CONFIG_SJLJ_EXCEPTIONS if (CONFIG_SJLJ_EXCEPTIONS) return UI_SJLJ; #endif /* For simplicity elsewhere in this file, indicate that all unwind info is disabled if we're not emitting unwind tables. */ if (!opts->x_flag_exceptions && !opts->x_flag_unwind_tables) return UI_NONE; return UI_TARGET; } /* Implement TARGET_OPTION_DEFAULT_PARAMS. */ static void ia64_option_default_params (void) { /* Let the scheduler form additional regions. */ set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2); /* Set the default values for cache-related parameters. */ set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6); set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32); set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4); } #undef TARGET_OPTION_OPTIMIZATION_TABLE #define TARGET_OPTION_OPTIMIZATION_TABLE ia64_option_optimization_table #undef TARGET_OPTION_DEFAULT_PARAMS #define TARGET_OPTION_DEFAULT_PARAMS ia64_option_default_params #undef TARGET_EXCEPT_UNWIND_INFO #define TARGET_EXCEPT_UNWIND_INFO ia64_except_unwind_info #undef TARGET_DEFAULT_TARGET_FLAGS #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT) #undef TARGET_HANDLE_OPTION #define TARGET_HANDLE_OPTION ia64_handle_option struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
EZchip/gcc
gcc/common/config/ia64/ia64-common.c
C
gpl-2.0
3,385
/* Copyright (C) 1999-2013 Free Software Foundation, Inc. NOTE: This source is derived from an old version taken from the GNU C Library (glibc). This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include <assert.h> #include <stdlib.h> #include "exit.h" #undef __cxa_atexit #define atomic_write_barrier() __asm__ ("eieio" ::: "memory") int attribute_hidden __internal_atexit (void (*func) (void *), void *arg, void *d, struct exit_function_list **listp) { struct exit_function *new = __new_exitfn (listp); if (new == NULL) return -1; #ifdef PTR_MANGLE PTR_MANGLE (func); #endif new->func.cxa.fn = (void (*) (void *, int)) func; new->func.cxa.arg = arg; new->func.cxa.dso_handle = d; atomic_write_barrier (); new->flavor = ef_cxa; return 0; } /* Register a function to be called by exit or when a shared library is unloaded. This function is only called from code generated by the C++ compiler. */ int __cxa_atexit (void (*func) (void *), void *arg, void *d) { return __internal_atexit (func, arg, d, &__exit_funcs); } INTDEF(__cxa_atexit) static struct exit_function_list initial; struct exit_function_list *__exit_funcs = &initial; uint64_t __new_exitfn_called; struct exit_function * __new_exitfn (struct exit_function_list **listp) { struct exit_function_list *p = NULL; struct exit_function_list *l; struct exit_function *r = NULL; size_t i = 0; for (l = *listp; l != NULL; p = l, l = l->next) { for (i = l->idx; i > 0; --i) if (l->fns[i - 1].flavor != ef_free) break; if (i > 0) break; /* This block is completely unused. */ l->idx = 0; } if (l == NULL || i == sizeof (l->fns) / sizeof (l->fns[0])) { /* The last entry in a block is used. Use the first entry in the previous block if it exists. Otherwise create a new one. */ if (p == NULL) { assert (l != NULL); p = (struct exit_function_list *) calloc (1, sizeof (struct exit_function_list)); if (p != NULL) { p->next = *listp; *listp = p; } } if (p != NULL) { r = &p->fns[0]; p->idx = 1; } } else { /* There is more room in the block. */ r = &l->fns[i]; l->idx = i + 1; } /* Mark entry as used, but we don't know the flavor now. */ if (r != NULL) { r->flavor = ef_us; ++__new_exitfn_called; } return r; }
Hellybean/SaberMod_ROM_Toolchain
libgcc/config/rs6000/cxa_atexit.c
C
gpl-2.0
3,257
/* * Copyright (c) 2002, 2007, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package com.sun.media.sound; import javax.sound.midi.*; /** * an optimized ShortMessage that does not need an array * * @author Florian Bomers */ class FastShortMessage extends ShortMessage { private int packedMsg; public FastShortMessage(int packedMsg) throws InvalidMidiDataException { this.packedMsg = packedMsg; getDataLength(packedMsg & 0xFF); // to check for validity } /** Creates a FastShortMessage from this ShortMessage */ public FastShortMessage(ShortMessage msg) { this.packedMsg = msg.getStatus() | (msg.getData1() << 8) | (msg.getData2() << 16); } int getPackedMsg() { return packedMsg; } public byte[] getMessage() { int length = 0; try { // fix for bug 4851018: MidiMessage.getLength and .getData return wrong values // fix for bug 4890405: Reading MidiMessage byte array fails in 1.4.2 length = getDataLength(packedMsg & 0xFF) + 1; } catch (InvalidMidiDataException imde) { // should never happen } byte[] returnedArray = new byte[length]; if (length>0) { returnedArray[0] = (byte) (packedMsg & 0xFF); if (length>1) { returnedArray[1] = (byte) ((packedMsg & 0xFF00) >> 8); if (length>2) { returnedArray[2] = (byte) ((packedMsg & 0xFF0000) >> 16); } } } return returnedArray; } public int getLength() { try { return getDataLength(packedMsg & 0xFF) + 1; } catch (InvalidMidiDataException imde) { // should never happen } return 0; } public void setMessage(int status) throws InvalidMidiDataException { // check for valid values int dataLength = getDataLength(status); // can throw InvalidMidiDataException if (dataLength != 0) { super.setMessage(status); // throws Exception } packedMsg = (packedMsg & 0xFFFF00) | (status & 0xFF); } public void setMessage(int status, int data1, int data2) throws InvalidMidiDataException { getDataLength(status); // can throw InvalidMidiDataException packedMsg = (status & 0xFF) | ((data1 & 0xFF) << 8) | ((data2 & 0xFF) << 16); } public void setMessage(int command, int channel, int data1, int data2) throws InvalidMidiDataException { getDataLength(command); // can throw InvalidMidiDataException packedMsg = (command & 0xF0) | (channel & 0x0F) | ((data1 & 0xFF) << 8) | ((data2 & 0xFF) << 16); } public int getChannel() { return packedMsg & 0x0F; } public int getCommand() { return packedMsg & 0xF0; } public int getData1() { return (packedMsg & 0xFF00) >> 8; } public int getData2() { return (packedMsg & 0xFF0000) >> 16; } public int getStatus() { return packedMsg & 0xFF; } /** * Creates a new object of the same class and with the same contents * as this object. * @return a clone of this instance. */ public Object clone() { try { return new FastShortMessage(packedMsg); } catch (InvalidMidiDataException imde) { // should never happen } return null; } } // class FastShortMsg
shchiu/openjdk
jdk/src/share/classes/com/sun/media/sound/FastShortMessage.java
Java
gpl-2.0
4,623
<?php /** * Zend Framework * * LICENSE * * This source file is subject to the new BSD license that is bundled * with this package in the file LICENSE.txt. * It is also available through the world-wide-web at this URL: * http://framework.zend.com/license/new-bsd * If you did not receive a copy of the license and are unable to * obtain it through the world-wide-web, please send an email * to license@zend.com so we can send you a copy immediately. * * @category Zend * @package Zend_Service_WindowsAzure * @subpackage Storage * @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License * @version $Id: DynamicTableEntity.php 23167 2010-10-19 17:53:31Z mabe $ */ /** * @see Zend_Service_WindowsAzure_Exception */ #require_once 'Zend/Service/WindowsAzure/Exception.php'; /** * @see Zend_Service_WindowsAzure_Storage_TableEntity */ #require_once 'Zend/Service/WindowsAzure/Storage/TableEntity.php'; /** * @category Zend * @package Zend_Service_WindowsAzure * @subpackage Storage * @copyright Copyright (c) 2005-2010 Zend Technologies USA Inc. (http://www.zend.com) * @license http://framework.zend.com/license/new-bsd New BSD License */ class Zend_Service_WindowsAzure_Storage_DynamicTableEntity extends Zend_Service_WindowsAzure_Storage_TableEntity { /** * Dynamic properties * * @var array */ protected $_dynamicProperties = array(); /** * Magic overload for setting properties * * @param string $name Name of the property * @param string $value Value to set */ public function __set($name, $value) { $this->setAzureProperty($name, $value, null); } /** * Magic overload for getting properties * * @param string $name Name of the property */ public function __get($name) { return $this->getAzureProperty($name); } /** * Set an Azure property * * @param string $name Property name * @param mixed $value Property value * @param string $type Property type (Edm.xxxx) * @return Zend_Service_WindowsAzure_Storage_DynamicTableEntity */ public function setAzureProperty($name, $value = '', $type = null) { if (strtolower($name) == 'partitionkey') { $this->setPartitionKey($value); } else if (strtolower($name) == 'rowkey') { $this->setRowKey($value); } else if (strtolower($name) == 'etag') { $this->setEtag($value); } else { if (!array_key_exists(strtolower($name), $this->_dynamicProperties)) { // Determine type? if ($type === null) { $type = 'Edm.String'; if (is_int($value)) { $type = 'Edm.Int32'; } else if (is_float($value)) { $type = 'Edm.Double'; } else if (is_bool($value)) { $type = 'Edm.Boolean'; } } // Set dynamic property $this->_dynamicProperties[strtolower($name)] = (object)array( 'Name' => $name, 'Type' => $type, 'Value' => $value, ); } $this->_dynamicProperties[strtolower($name)]->Value = $value; } return $this; } /** * Set an Azure property type * * @param string $name Property name * @param string $type Property type (Edm.xxxx) * @return Zend_Service_WindowsAzure_Storage_DynamicTableEntity */ public function setAzurePropertyType($name, $type = 'Edm.String') { if (!array_key_exists(strtolower($name), $this->_dynamicProperties)) { $this->setAzureProperty($name, '', $type); } else { $this->_dynamicProperties[strtolower($name)]->Type = $type; } return $this; } /** * Get an Azure property * * @param string $name Property name * @param mixed $value Property value * @param string $type Property type (Edm.xxxx) * @return Zend_Service_WindowsAzure_Storage_DynamicTableEntity */ public function getAzureProperty($name) { if (strtolower($name) == 'partitionkey') { return $this->getPartitionKey(); } if (strtolower($name) == 'rowkey') { return $this->getRowKey(); } if (strtolower($name) == 'etag') { return $this->getEtag(); } if (!array_key_exists(strtolower($name), $this->_dynamicProperties)) { $this->setAzureProperty($name); } return $this->_dynamicProperties[strtolower($name)]->Value; } /** * Get an Azure property type * * @param string $name Property name * @return string Property type (Edm.xxxx) */ public function getAzurePropertyType($name) { if (!array_key_exists(strtolower($name), $this->_dynamicProperties)) { $this->setAzureProperty($name, '', $type); } return $this->_dynamicProperties[strtolower($name)]->Type; } /** * Get Azure values * * @return array */ public function getAzureValues() { return array_merge(array_values($this->_dynamicProperties), parent::getAzureValues()); } /** * Set Azure values * * @param array $values * @param boolean $throwOnError Throw Zend_Service_WindowsAzure_Exception when a property is not specified in $values? * @throws Zend_Service_WindowsAzure_Exception */ public function setAzureValues($values = array(), $throwOnError = false) { // Set parent values parent::setAzureValues($values, false); // Set current values foreach ($values as $key => $value) { $this->$key = $value; } } }
MaxYaroshenko/3rdarea
lib/Zend/Service/WindowsAzure/Storage/DynamicTableEntity.php
PHP
gpl-2.0
6,188
/* * GStreamer * Copyright (C) 2009 Carl-Anton Ingmarsson <ca.ingmarsson@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "gstvdpbuffer.h" static GObjectClass *gst_vdp_buffer_parent_class; void gst_vdp_buffer_set_buffer_pool (GstVdpBuffer * buffer, GstVdpBufferPool * bpool) { g_return_if_fail (GST_IS_VDP_BUFFER (buffer)); if (bpool) { g_return_if_fail (GST_IS_VDP_BUFFER_POOL (bpool)); g_object_add_weak_pointer (G_OBJECT (bpool), (void **) &buffer->bpool); } buffer->bpool = bpool; } gboolean gst_vdp_buffer_revive (GstVdpBuffer * buffer) { if (buffer->bpool) return gst_vdp_buffer_pool_put_buffer (buffer->bpool, buffer); return FALSE; } static void gst_vdp_buffer_init (GstVdpBuffer * buffer, gpointer g_class) { buffer->bpool = NULL; } static void gst_vdp_buffer_class_init (gpointer g_class, gpointer class_data) { gst_vdp_buffer_parent_class = g_type_class_peek_parent (g_class); } GType gst_vdp_buffer_get_type (void) { static GType _gst_vdp_buffer_type; if (G_UNLIKELY (_gst_vdp_buffer_type == 0)) { static const GTypeInfo info = { sizeof (GstBufferClass), NULL, NULL, gst_vdp_buffer_class_init, NULL, NULL, sizeof (GstVdpBuffer), 0, (GInstanceInitFunc) gst_vdp_buffer_init, NULL }; _gst_vdp_buffer_type = g_type_register_static (GST_TYPE_BUFFER, "GstVdpBuffer", &info, 0); } return _gst_vdp_buffer_type; }
Mistobaan/gst-plugins-bad
sys/vdpau/gstvdp/gstvdpbuffer.c
C
gpl-2.0
2,216
<?php /** * @file * Contains \Drupal\hal\Tests\NormalizeTest. */ namespace Drupal\hal\Tests; use Drupal\Core\Url; /** * Tests that entities can be normalized in HAL. * * @group hal */ class NormalizeTest extends NormalizerTestBase { /** * {@inheritdoc} */ protected function setUp() { parent::setUp(); \Drupal::service('router.builder')->rebuild(); } /** * Tests the normalize function. */ public function testNormalize() { $target_entity_de = entity_create('entity_test', (array('langcode' => 'de', 'field_test_entity_reference' => NULL))); $target_entity_de->save(); $target_entity_en = entity_create('entity_test', (array('langcode' => 'en', 'field_test_entity_reference' => NULL))); $target_entity_en->save(); // Create a German entity. $values = array( 'langcode' => 'de', 'name' => $this->randomMachineName(), 'field_test_text' => array( 'value' => $this->randomMachineName(), 'format' => 'full_html', ), 'field_test_entity_reference' => array( 'target_id' => $target_entity_de->id(), ), ); // Array of translated values. $translation_values = array( 'name' => $this->randomMachineName(), 'field_test_entity_reference' => array( 'target_id' => $target_entity_en->id(), ) ); $entity = entity_create('entity_test', $values); $entity->save(); // Add an English value for name and entity reference properties. $entity->getTranslation('en')->set('name', array(0 => array('value' => $translation_values['name']))); $entity->getTranslation('en')->set('field_test_entity_reference', array(0 => $translation_values['field_test_entity_reference'])); $entity->save(); $type_uri = Url::fromUri('base:rest/type/entity_test/entity_test', array('absolute' => TRUE))->toString(); $relation_uri = Url::fromUri('base:rest/relation/entity_test/entity_test/field_test_entity_reference', array('absolute' => TRUE))->toString(); $expected_array = array( '_links' => array( 'curies' => array( array( 'href' => '/relations', 'name' => 'site', 'templated' => true, ), ), 'self' => array( 'href' => $this->getEntityUri($entity), ), 'type' => array( 'href' => $type_uri, ), $relation_uri => array( array( 'href' => $this->getEntityUri($target_entity_de), 'lang' => 'de', ), array( 'href' => $this->getEntityUri($target_entity_en), 'lang' => 'en', ), ), ), '_embedded' => array( $relation_uri => array( array( '_links' => array( 'self' => array( 'href' => $this->getEntityUri($target_entity_de), ), 'type' => array( 'href' => $type_uri, ), ), 'uuid' => array( array( 'value' => $target_entity_de->uuid(), ), ), 'lang' => 'de', ), array( '_links' => array( 'self' => array( 'href' => $this->getEntityUri($target_entity_en), ), 'type' => array( 'href' => $type_uri, ), ), 'uuid' => array( array( 'value' => $target_entity_en->uuid(), ), ), 'lang' => 'en', ), ), ), 'uuid' => array( array( 'value' => $entity->uuid(), ), ), 'langcode' => array( array( 'value' => 'de', ), ), 'name' => array( array( 'value' => $values['name'], 'lang' => 'de', ), array( 'value' => $translation_values['name'], 'lang' => 'en', ), ), 'field_test_text' => array( array( 'value' => $values['field_test_text']['value'], 'format' => $values['field_test_text']['format'], ), ), ); $normalized = $this->serializer->normalize($entity, $this->format); $this->assertEqual($normalized['_links']['self'], $expected_array['_links']['self'], 'self link placed correctly.'); // @todo Test curies. // @todo Test type. $this->assertFalse(isset($normalized['id']), 'Internal id is not exposed.'); $this->assertEqual($normalized['uuid'], $expected_array['uuid'], 'Non-translatable fields is normalized.'); $this->assertEqual($normalized['name'], $expected_array['name'], 'Translatable field with multiple language values is normalized.'); $this->assertEqual($normalized['field_test_text'], $expected_array['field_test_text'], 'Field with properties is normalized.'); $this->assertEqual($normalized['_embedded'][$relation_uri], $expected_array['_embedded'][$relation_uri], 'Entity reference field is normalized.'); $this->assertEqual($normalized['_links'][$relation_uri], $expected_array['_links'][$relation_uri], 'Links are added for entity reference field.'); } /** * Constructs the entity URI. * * @param $entity * The entity. * * @return string * The entity URI. */ protected function getEntityUri($entity) { return $entity->url('canonical', array('absolute' => TRUE)); } }
geerlingguy/demo-drupal-8
core/modules/hal/src/Tests/NormalizeTest.php
PHP
gpl-2.0
5,512
<?php /** * TbDataColumn class file. * @author Antonio Ramirez <ramirez.cobos@gmail.com> * @author Christoffer Niska <ChristofferNiska@gmail.com> * @copyright Copyright &copy; Christoffer Niska 2013- * @license http://www.opensource.org/licenses/bsd-license.php New BSD License * @package bootstrap.widgets */ Yii::import('bootstrap.helpers.TbHtml'); Yii::import('zii.widgets.grid.CDataColumn'); /** * Bootstrap grid data column. */ class TbDataColumn extends CDataColumn { /** * @var array HTML options for filter input * @link {TbDataColumn::renderFilterCellContent()} */ public $filterInputOptions; /** * Renders the header cell content. * This method will render a link that can trigger the sorting if the column is sortable. */ protected function renderHeaderCellContent() { if ($this->grid->enableSorting && $this->sortable && $this->name !== null) { $sort = $this->grid->dataProvider->getSort(); $label = isset($this->header) ? $this->header : $sort->resolveLabel($this->name); if ($sort->resolveAttribute($this->name) !== false) { $label .= '<span class="caret"></span>'; } echo $sort->link($this->name, $label, array('class' => 'sort-link')); } else { if ($this->name !== null && $this->header === null) { if ($this->grid->dataProvider instanceof CActiveDataProvider) { echo CHtml::encode($this->grid->dataProvider->model->getAttributeLabel($this->name)); } else { echo CHtml::encode($this->name); } } else { parent::renderHeaderCellContent(); } } } /** * Renders the filter cell. */ public function renderFilterCell() { echo CHtml::openTag('td', $this->filterHtmlOptions); echo '<div class="filter-container">'; $this->renderFilterCellContent(); echo '</div>'; echo CHtml::closeTag('td'); } /** * Renders the filter cell content. Here we can provide HTML options for actual filter input */ protected function renderFilterCellContent() { if (is_string($this->filter)) { echo $this->filter; } else { if ($this->filter !== false && $this->grid->filter !== null && $this->name !== null && strpos( $this->name, '.' ) === false ) { if ($this->filterInputOptions) { $filterInputOptions = $this->filterInputOptions; if (empty($filterInputOptions['id'])) { $filterInputOptions['id'] = false; } } else { $filterInputOptions = array(); } if (is_array($this->filter)) { $filterInputOptions['prompt'] = ''; echo CHtml::activeDropDownList( $this->grid->filter, $this->name, $this->filter, $filterInputOptions ); } else { if ($this->filter === null) { echo CHtml::activeTextField($this->grid->filter, $this->name, $filterInputOptions); } } } else { parent::renderFilterCellContent(); } } } }
daschatten/mcc
www/protected/extensions/yiistrap-master/widgets/TbDataColumn.php
PHP
gpl-2.0
3,565
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Code Aurora Forum nor * the names of its contributors may be used to endorse or promote * products derived from this software without specific prior written * permission. * * Alternatively, provided that this notice is retained in full, this software * may be relicensed by the recipient under the terms of the GNU General Public * License version 2 ("GPL") and only version 2, in which case the provisions of * the GPL apply INSTEAD OF those given above. If the recipient relicenses the * software under the GPL, then the identification text in the MODULE_LICENSE * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a * recipient changes the license terms to the GPL, subsequent recipients shall * not relicense under alternate licensing terms, including the BSD or dual * BSD/GPL terms. In addition, the following license statement immediately * below and between the words START and END shall also then apply when this * software is relicensed under the GPL: * * START * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 and only version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * END * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <mach/debug_audio_mm.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <mach/qdsp5v2/aux_pcm.h> #include <mach/gpio.h> #include "../proc_comm.h" #include <linux/delay.h> /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ /* define offset of registers here, may put them into platform data */ #define AUX_CODEC_CTL_OFFSET 0x00 #define PCM_PATH_CTL_OFFSET 0x04 #define AUX_CODEC_CTL_OUT_OFFSET 0x08 /* define some bit values in PCM_PATH_CTL register */ #define PCM_PATH_CTL__ADSP_CTL_EN_BMSK 0x8 /* mask and shift */ #define AUX_CODEC_CTL_ADSP_CODEC_CTL_EN_BMSK 0x800 #define AUX_CODEC_CTL_PCM_SYNC_LONG_BMSK 0x400 #define AUX_CODEC_CTL_PCM_SYNC_SHORT_BMSK 0x200 #define AUX_CODEC_CTL_I2S_SAMPLE_CLK_SRC_BMSK 0x80 #define AUX_CODEC_CTL_I2S_SAMPLE_CLK_MODE_BMSK 0x40 #define AUX_CODEC_CTL_I2S_RX_MODE_BMSK 0x20 #define AUX_CODEC_CTL_I2S_CLK_MODE_BMSK 0x10 #define AUX_CODEC_CTL_AUX_PCM_MODE_BMSK 0x0b #define AUX_CODEC_CTL_AUX_CODEC_MODE_BMSK 0x02 /* AUX PCM MODE */ #define MASTER_PRIM_PCM_SHORT 0 #define MASTER_AUX_PCM_LONG 1 #define SLAVE_PRIM_PCM_SHORT 2 struct aux_pcm_state { void __iomem *aux_pcm_base; /* configure aux pcm through Scorpion */ int dout; int din; int syncout; int clkin_a; }; static struct aux_pcm_state the_aux_pcm_state; static void __iomem *get_base_addr(struct aux_pcm_state *aux_pcm) { return aux_pcm->aux_pcm_base; } /* Set who control aux pcm : adsp or MSM */ void aux_codec_adsp_codec_ctl_en(bool msm_adsp_en) { void __iomem *baddr = get_base_addr(&the_aux_pcm_state); uint32_t val; if (!IS_ERR(baddr)) { val = readl(baddr + AUX_CODEC_CTL_OFFSET); if (msm_adsp_en) { /* adsp */ writel( ((val & ~AUX_CODEC_CTL_ADSP_CODEC_CTL_EN_BMSK) | AUX_CODEC_CTL__ADSP_CODEC_CTL_EN__ADSP_V), baddr + AUX_CODEC_CTL_OFFSET); } else { /* MSM */ writel( ((val & ~AUX_CODEC_CTL_ADSP_CODEC_CTL_EN_BMSK) | AUX_CODEC_CTL__ADSP_CODEC_CTL_EN__MSM_V), baddr + AUX_CODEC_CTL_OFFSET); } } } /* Set who control aux pcm path: adsp or MSM */ void aux_codec_pcm_path_ctl_en(bool msm_adsp_en) { void __iomem *baddr = get_base_addr(&the_aux_pcm_state); uint32_t val; if (!IS_ERR(baddr)) { val = readl(baddr + PCM_PATH_CTL_OFFSET); if (msm_adsp_en) { /* adsp */ writel( ((val & ~PCM_PATH_CTL__ADSP_CTL_EN_BMSK) | PCM_PATH_CTL__ADSP_CTL_EN__ADSP_V), baddr + PCM_PATH_CTL_OFFSET); } else { /* MSM */ writel( ((val & ~PCM_PATH_CTL__ADSP_CTL_EN_BMSK) | PCM_PATH_CTL__ADSP_CTL_EN__MSM_V), baddr + PCM_PATH_CTL_OFFSET); } } return; } EXPORT_SYMBOL(aux_codec_pcm_path_ctl_en); int aux_pcm_gpios_request(void) { int rc = 0; MM_INFO(" aux_pcm_gpios_request \n"); rc = gpio_request(the_aux_pcm_state.dout, "AUX PCM DOUT"); if (rc) { MM_ERR("GPIO request for AUX PCM DOUT failed\n"); return rc; } rc = gpio_request(the_aux_pcm_state.din, "AUX PCM DIN"); if (rc) { MM_ERR("GPIO request for AUX PCM DIN failed\n"); gpio_free(the_aux_pcm_state.dout); return rc; } rc = gpio_request(the_aux_pcm_state.syncout, "AUX PCM SYNC OUT"); if (rc) { MM_ERR("GPIO request for AUX PCM SYNC OUT failed\n"); gpio_free(the_aux_pcm_state.dout); gpio_free(the_aux_pcm_state.din); return rc; } rc = gpio_request(the_aux_pcm_state.clkin_a, "AUX PCM CLKIN A"); if (rc) { MM_ERR("GPIO request for AUX PCM CLKIN A failed\n"); gpio_free(the_aux_pcm_state.dout); gpio_free(the_aux_pcm_state.din); gpio_free(the_aux_pcm_state.syncout); return rc; } return rc; } EXPORT_SYMBOL(aux_pcm_gpios_request); void aux_pcm_gpios_free(void) { MM_INFO(" aux_pcm_gpios_free \n"); /* * Feed silence frames before close to prevent buzzing sound in BT at * call end. This fix is applicable only to Marimba BT. */ gpio_tlmm_config(PCOM_GPIO_CFG(the_aux_pcm_state.dout, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), GPIO_ENABLE); gpio_set_value(the_aux_pcm_state.dout, 0); msleep(20); gpio_tlmm_config(PCOM_GPIO_CFG(the_aux_pcm_state.dout, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), GPIO_ENABLE); gpio_free(the_aux_pcm_state.dout); gpio_free(the_aux_pcm_state.din); gpio_free(the_aux_pcm_state.syncout); gpio_free(the_aux_pcm_state.clkin_a); } EXPORT_SYMBOL(aux_pcm_gpios_free); static int get_aux_pcm_gpios(struct platform_device *pdev) { int rc = 0; struct resource *res; /* Claim all of the GPIOs. */ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_dout"); if (!res) { MM_ERR("%s: failed to get gpio AUX PCM DOUT\n", __func__); return -ENODEV; } the_aux_pcm_state.dout = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_din"); if (!res) { MM_ERR("%s: failed to get gpio AUX PCM DIN\n", __func__); return -ENODEV; } the_aux_pcm_state.din = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_syncout"); if (!res) { MM_ERR("%s: failed to get gpio AUX PCM SYNC OUT\n", __func__); return -ENODEV; } the_aux_pcm_state.syncout = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_IO, "aux_pcm_clkin_a"); if (!res) { MM_ERR("%s: failed to get gpio AUX PCM CLKIN A\n", __func__); return -ENODEV; } the_aux_pcm_state.clkin_a = res->start; return rc; } static int aux_pcm_probe(struct platform_device *pdev) { int rc = 0; struct resource *mem_src; MM_INFO("aux_pcm_probe \n"); mem_src = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux_codec_reg_addr"); if (!mem_src) { rc = -ENODEV; goto done; } the_aux_pcm_state.aux_pcm_base = ioremap(mem_src->start, (mem_src->end - mem_src->start) + 1); if (!the_aux_pcm_state.aux_pcm_base) { rc = -ENOMEM; goto done; } rc = get_aux_pcm_gpios(pdev); if (rc) { MM_ERR("GPIO configuration failed\n"); rc = -ENODEV; } done: return rc; } static int aux_pcm_remove(struct platform_device *pdev) { iounmap(the_aux_pcm_state.aux_pcm_base); return 0; } static struct platform_driver aux_pcm_driver = { .probe = aux_pcm_probe, .remove = aux_pcm_remove, .driver = { .name = "msm_aux_pcm", .owner = THIS_MODULE, }, }; static int __init aux_pcm_init(void) { return platform_driver_register(&aux_pcm_driver); } static void __exit aux_pcm_exit(void) { platform_driver_unregister(&aux_pcm_driver); } module_init(aux_pcm_init); module_exit(aux_pcm_exit); MODULE_DESCRIPTION("MSM AUX PCM driver"); MODULE_LICENSE("Dual BSD/GPL");
Soaa-/-lightspeed-vision
arch/arm/mach-msm/qdsp5v2/aux_pcm.c
C
gpl-2.0
9,772
/**************************************************************************** * FileName : tcc353x_linux_i2c.c * Description : tcc353x i2c function for linux **************************************************************************** * * TCC Version 1.0 * Copyright (c) Telechips Inc. * All rights reserved This source code contains confidential information of Telechips. Any unauthorized use without a written permission of Telechips including not limited to re- distribution in source or binary form is strictly prohibited. This source code is provided "AS IS" and nothing contained in this source code shall constitute any express or implied warranty of any kind, including without limitation, any warranty of merchantability, fitness for a particular purpose or non-infringement of any patent, copyright or other third party intellectual property right. No warranty is made, express or implied, regarding the information's accuracy, completeness, or performance. In no event shall Telechips be liable for any claim, damages or other liability arising from, out of or in connection with this source code or the use in the source code. This source code is provided subject to the terms of a Mutual Non-Disclosure Agreement between Telechips and Company. * ****************************************************************************/ #include <linux/module.h> #include <linux/semaphore.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/i2c.h> #include "tcc353x_common.h" #include "tcpal_os.h" #define MAX_I2C_BURST (1024*8) struct i2c_client *TcpalI2cClient = NULL; I32S Tcc353xI2cClose(I32S _moduleIndex); static I32U gI2cHanleInit0 = 0; static I32U gI2cHanleInit1 = 0; static I32U gI2cHanleInited = 0; static I08U gI2cChipAddr[4]; static I08U I2cBuffer[(MAX_I2C_BURST+4)+32] __cacheline_aligned; extern struct i2c_client *TCC_GET_I2C_DRIVER(void); static I32S Tcc353xI2cSetup(I32S _moduleIndex) { if (_moduleIndex >= 2) { TcpalPrintErr((I08S *) "Not supported, moduleidx=%d\n", _moduleIndex); return TCC353X_RETURN_FAIL; } TcpalI2cClient = TCC_GET_I2C_DRIVER(); return TCC353X_RETURN_SUCCESS; } I32S Tcc353xI2cOpen(I32S _moduleIndex) { I32S ret; ret = TCC353X_RETURN_FAIL; /* */ if (_moduleIndex == 0) { if (gI2cHanleInit0 != 0 && gI2cHanleInit1 == 0) Tcc353xI2cClose(_moduleIndex); } else { if (gI2cHanleInit1 != 0 && gI2cHanleInit0 == 0) Tcc353xI2cClose(_moduleIndex); } /* */ if (_moduleIndex == 0) gI2cHanleInit0 = 1; else gI2cHanleInit1 = 1; if (gI2cHanleInited != 0) { return TCC353X_RETURN_SUCCESS; } gI2cHanleInited = 1; TcpalMemset(&gI2cChipAddr[_moduleIndex], 0x00, 4); ret = Tcc353xI2cSetup(_moduleIndex); /* */ return ret; } I32S Tcc353xI2cClose(I32S _moduleIndex) { if (_moduleIndex == 0) gI2cHanleInit0 = 0; else gI2cHanleInit1 = 0; if (gI2cHanleInit0 == 0 && gI2cHanleInit1 == 0) { gI2cHanleInited = 0; TcpalPrintLog((I08S *)"TcpalI2cClient :0x%X\n", (unsigned int)TcpalI2cClient); } return TCC353X_RETURN_SUCCESS; } I32S Tcc353xAdaptI2CWriteEx(I08U raddr, I08U* txdata, I32S length) { I32S rc; I32S cMax, remain; I32S i; struct i2c_msg msg; TcpalMemset (&msg, 0x00, sizeof(struct i2c_msg)); cMax = length / MAX_I2C_BURST; remain = length % MAX_I2C_BURST; msg.addr = TcpalI2cClient->addr; msg.flags = 0; msg.len = MAX_I2C_BURST+1; msg.buf = (unsigned char*)I2cBuffer; I2cBuffer[0] = (unsigned char) (raddr); for (i = 0; i < cMax; i++) { TcpalMemcpy(&I2cBuffer[1], &txdata[i * MAX_I2C_BURST], MAX_I2C_BURST); msg.len = MAX_I2C_BURST+1; msg.buf = (unsigned char*)I2cBuffer; rc = i2c_transfer(TcpalI2cClient->adapter, &msg, 1); if(rc < 0) { TcpalPrintErr((I08S *)"fail rc = (%d) addr =(0x%X) data=0x%02x\n", (int)rc, (unsigned int)TcpalI2cClient->addr, (unsigned int)txdata[1]); return TCC353X_RETURN_FAIL; } } if (remain) { TcpalMemcpy(&I2cBuffer[1], &txdata[cMax * MAX_I2C_BURST], remain); msg.len = remain+1; msg.buf = (unsigned char*)I2cBuffer; rc = i2c_transfer(TcpalI2cClient->adapter, &msg, 1); if(rc < 0) { TcpalPrintErr((I08S *)"fail rc = (%d) addr =(0x%X) data=0x%02x\n", (int)rc, (unsigned int)TcpalI2cClient->addr, (unsigned int)txdata[1]); return TCC353X_RETURN_FAIL; } } return TCC353X_RETURN_SUCCESS; } I32S Tcc353xAdaptI2CReadEx(I08U raddr, I08U *rxdata, I32S length) { I32S rc; I32S cMax, remain; I32S i; struct i2c_msg msgs[2]; TcpalMemset (&msgs[0], 0x00, sizeof(struct i2c_msg)*2); cMax = length / MAX_I2C_BURST; remain = length % MAX_I2C_BURST; msgs[0].addr = TcpalI2cClient->addr; msgs[0].flags = 0; msgs[0].len = 1; msgs[0].buf = (unsigned char*)&raddr; msgs[1].addr = TcpalI2cClient->addr; msgs[1].flags = I2C_M_RD; msgs[1].len = length; msgs[1].buf = (unsigned char*)rxdata; for (i = 0; i < cMax; i++) { msgs[1].len = MAX_I2C_BURST; msgs[1].buf = (unsigned char*)rxdata + i*MAX_I2C_BURST; rc = i2c_transfer(TcpalI2cClient->adapter, msgs, 2); if(rc < 0) { TcpalPrintErr((I08S *)"failed! rc =(%d),%x \n", (int)rc, (unsigned int)TcpalI2cClient->addr); return TCC353X_RETURN_FAIL; } } if (remain) { msgs[1].len = remain; msgs[1].buf = (unsigned char*)rxdata + cMax*MAX_I2C_BURST; rc = i2c_transfer(TcpalI2cClient->adapter, msgs, 2); if(rc < 0) { TcpalPrintErr((I08S *)"failed! rc =(%d),%x \n", (int)rc, (unsigned int)TcpalI2cClient->addr); return TCC353X_RETURN_FAIL; } } return TCC353X_RETURN_SUCCESS; };
Plain-Devices/android_kernel_lge_msm8974
drivers/broadcast/oneseg/tcc3535/Tcc353xDriver/Linux_Adapt/tcc353x_linux_i2c.c
C
gpl-2.0
5,871
/* $Id: user.css,v 1.7 2007/06/21 04:38:41 unconed Exp $ */ #permissions td.module { font-weight: bold; } #permissions td.permission { padding-left: 1.5em; /* LTR */ } #access-rules .access-type, #access-rules .rule-type { margin-right: 1em; /* LTR */ float: left; /* LTR */ } #access-rules .access-type .form-item, #access-rules .rule-type .form-item { margin-top: 0; } #access-rules .mask { clear: both; } #user-login-form { text-align: center; } #user-admin-filter ul { list-style-type: none; padding: 0; margin: 0; width: 100%; } #user-admin-buttons { float: left; /* LTR */ margin-left: 0.5em; /* LTR */ clear: right; /* LTR */ } #user-admin-settings fieldset .description { font-size: 0.85em; padding-bottom: .5em; } /* Generated by user.module but used by profile.module: */ .profile { clear: both; margin: 1em 0; } .profile .picture { float: right; /* LTR */ margin: 0 1em 1em 0; /* LTR */ } .profile h3 { border-bottom: 1px solid #ccc; } .profile dl { margin: 0 0 1.5em 0; } .profile dt { margin: 0 0 0.2em 0; font-weight: bold; } .profile dd { margin: 0 0 1em 0; }
drugdiller/instrument
sites/all/themes/zen/instrument/css/user.css
CSS
gpl-2.0
1,127
<?php namespace Drupal\field\Tests; use Drupal\field\Entity\FieldConfig; use Drupal\simpletest\WebTestBase; use Drupal\field\Entity\FieldStorageConfig; /** * Tests the behavior of a field module after being disabled and re-enabled. * * @group field */ class reEnableModuleFieldTest extends WebTestBase { /** * Modules to enable. * * @var array */ public static $modules = array( 'field', 'node', // We use telephone module instead of test_field because test_field is // hidden and does not display on the admin/modules page. 'telephone' ); protected function setUp() { parent::setUp(); $this->drupalCreateContentType(array('type' => 'article')); $this->drupalLogin($this->drupalCreateUser(array( 'create article content', 'edit own article content', ))); } /** * Test the behavior of a field module after being disabled and re-enabled. * * @see field_system_info_alter() */ function testReEnabledField() { // Add a telephone field to the article content type. $field_storage = FieldStorageConfig::create(array( 'field_name' => 'field_telephone', 'entity_type' => 'node', 'type' => 'telephone', )); $field_storage->save(); FieldConfig::create([ 'field_storage' => $field_storage, 'bundle' => 'article', 'label' => 'Telephone Number', ])->save(); entity_get_form_display('node', 'article', 'default') ->setComponent('field_telephone', array( 'type' => 'telephone_default', 'settings' => array( 'placeholder' => '123-456-7890', ), )) ->save(); entity_get_display('node', 'article', 'default') ->setComponent('field_telephone', array( 'type' => 'telephone_link', 'weight' => 1, )) ->save(); // Display the article node form and verify the telephone widget is present. $this->drupalGet('node/add/article'); $this->assertFieldByName("field_telephone[0][value]", '', 'Widget found.'); // Submit an article node with a telephone field so data exist for the // field. $edit = array( 'title[0][value]' => $this->randomMachineName(), 'field_telephone[0][value]' => "123456789", ); $this->drupalPostForm(NULL, $edit, t('Save')); $this->assertRaw('<a href="tel:123456789">'); // Test that the module can't be uninstalled from the UI while there is data // for it's fields. $admin_user = $this->drupalCreateUser(array('access administration pages', 'administer modules')); $this->drupalLogin($admin_user); $this->drupalGet('admin/modules/uninstall'); $this->assertText("The Telephone number field type is used in the following field: node.field_telephone"); // Add another telephone field to a different entity type in order to test // the message for the case when multiple fields are blocking the // uninstallation of a module. $field_storage2 = entity_create('field_storage_config', array( 'field_name' => 'field_telephone_2', 'entity_type' => 'user', 'type' => 'telephone', )); $field_storage2->save(); FieldConfig::create([ 'field_storage' => $field_storage2, 'bundle' => 'user', 'label' => 'User Telephone Number', ])->save(); $this->drupalGet('admin/modules/uninstall'); $this->assertText("The Telephone number field type is used in the following fields: node.field_telephone, user.field_telephone_2"); // Delete both fields. $field_storage->delete(); $field_storage2->delete(); $this->drupalGet('admin/modules/uninstall'); $this->assertText('Fields pending deletion'); $this->cronRun(); $this->assertNoText("The Telephone number field type is used in the following field: node.field_telephone"); $this->assertNoText('Fields pending deletion'); } }
danielpopdan/drupal-community-cluj
docroot/core/modules/field/src/Tests/reEnableModuleFieldTest.php
PHP
gpl-2.0
3,886
<?php /** * @file * Generic transliteration data for the PhpTransliteration class. */ $base = [ 0x00 => 'ben', 'yuan', 'wen', 'ruo', 'fei', 'qing', 'yuan', 'ke', 'ji', 'she', 'yuan', 'se', 'lu', 'zi', 'du', 'qi', 0x10 => 'jian', 'mian', 'pi', 'xi', 'yu', 'yuan', 'shen', 'shen', 'rou', 'huan', 'zhu', 'jian', 'nuan', 'yu', 'qiu', 'ting', 0x20 => 'qu', 'du', 'fan', 'zha', 'bo', 'wo', 'wo', 'di', 'wei', 'wen', 'ru', 'xie', 'ce', 'wei', 'he', 'gang', 0x30 => 'yan', 'hong', 'xuan', 'mi', 'ke', 'mao', 'ying', 'yan', 'you', 'hong', 'miao', 'sheng', 'mei', 'zai', 'hun', 'nai', 0x40 => 'gui', 'chi', 'e', 'pai', 'mei', 'lian', 'qi', 'qi', 'mei', 'tian', 'cou', 'wei', 'can', 'tuan', 'mian', 'hui', 0x50 => 'mo', 'xu', 'ji', 'pen', 'jian', 'jian', 'hu', 'feng', 'xiang', 'yi', 'yin', 'zhan', 'shi', 'jie', 'cheng', 'huang', 0x60 => 'tan', 'yu', 'bi', 'min', 'shi', 'tu', 'sheng', 'yong', 'ju', 'dong', 'tuan', 'jiao', 'jiao', 'qiu', 'yan', 'tang', 0x70 => 'long', 'huo', 'yuan', 'nan', 'ban', 'you', 'quan', 'zhuang', 'liang', 'chan', 'yan', 'chun', 'nie', 'zi', 'wan', 'shi', 0x80 => 'man', 'ying', 'la', 'kui', 'feng', 'jian', 'xu', 'lou', 'wei', 'gai', 'xia', 'ying', 'po', 'jin', 'yan', 'tang', 0x90 => 'yuan', 'suo', 'yuan', 'lian', 'yao', 'meng', 'zhun', 'cheng', 'ke', 'tai', 'ta', 'wa', 'liu', 'gou', 'sao', 'ming', 0xA0 => 'zha', 'shi', 'yi', 'lun', 'ma', 'pu', 'wei', 'li', 'cai', 'wu', 'xi', 'wen', 'qiang', 'ze', 'shi', 'su', 0xB0 => 'ai', 'qin', 'sou', 'yun', 'xiu', 'yin', 'rong', 'hun', 'su', 'suo', 'ni', 'ta', 'shi', 'ru', 'ai', 'pan', 0xC0 => 'chu', 'chu', 'pang', 'weng', 'cang', 'mie', 'ge', 'dian', 'hao', 'huang', 'xi', 'zi', 'di', 'zhi', 'xing', 'fu', 0xD0 => 'jie', 'hua', 'ge', 'zi', 'tao', 'teng', 'sui', 'bi', 'jiao', 'hui', 'gun', 'yin', 'gao', 'long', 'zhi', 'yan', 0xE0 => 'she', 'man', 'ying', 'chun', 'lu', 'lan', 'luan', 'xiao', 'bin', 'tan', 'yu', 'xiu', 'hu', 'bi', 'biao', 'zhi', 0xF0 => 'jiang', 'kou', 'shen', 'shang', 'di', 'mi', 'ao', 'lu', 'hu', 'hu', 'you', 'chan', 'fan', 'yong', 'gun', 'man', ];
trokhanenko/agrobirzha
core/lib/Drupal/Component/Transliteration/data/x6e.php
PHP
gpl-2.0
2,073
<?php namespace Drupal\rdf\Tests; use Drupal\file\Tests\FileFieldTestBase; use Drupal\file\Entity\File; /** * Tests the RDFa markup of filefields. * * @group rdf */ class FileFieldAttributesTest extends FileFieldTestBase { /** * Modules to enable. * * @var array */ public static $modules = array('rdf', 'file'); /** * The name of the file field used in the test. * * @var string */ protected $fieldName; /** * The file object used in the test. * * @var \Drupal\file\FileInterface */ protected $file; /** * The node object used in the test. * * @var \Drupal\node\NodeInterface */ protected $node; protected function setUp() { parent::setUp(); $node_storage = $this->container->get('entity.manager')->getStorage('node'); $this->fieldName = strtolower($this->randomMachineName()); $type_name = 'article'; $this->createFileField($this->fieldName, 'node', $type_name); // Set the teaser display to show this field. entity_get_display('node', 'article', 'teaser') ->setComponent($this->fieldName, array('type' => 'file_default')) ->save(); // Set the RDF mapping for the new field. $mapping = rdf_get_mapping('node', 'article'); $mapping->setFieldMapping($this->fieldName, array('properties' => array('rdfs:seeAlso'), 'mapping_type' => 'rel'))->save(); $test_file = $this->getTestFile('text'); // Create a new node with the uploaded file. $nid = $this->uploadNodeFile($test_file, $this->fieldName, $type_name); $node_storage->resetCache(array($nid)); $this->node = $node_storage->load($nid); $this->file = File::load($this->node->{$this->fieldName}->target_id); } /** * Tests if file fields in teasers have correct resources. * * Ensure that file fields have the correct resource as the object in RDFa * when displayed as a teaser. */ function testNodeTeaser() { // Render the teaser. $node_render_array = entity_view_multiple(array($this->node), 'teaser'); $html = \Drupal::service('renderer')->renderRoot($node_render_array); // Parses front page where the node is displayed in its teaser form. $parser = new \EasyRdf_Parser_Rdfa(); $graph = new \EasyRdf_Graph(); $base_uri = \Drupal::url('<front>', [], ['absolute' => TRUE]); $parser->parse($graph, $html, 'rdfa', $base_uri); $node_uri = $this->node->url('canonical', ['absolute' => TRUE]); $file_uri = file_create_url($this->file->getFileUri()); // Node relation to attached file. $expected_value = array( 'type' => 'uri', 'value' => $file_uri, ); $this->assertTrue($graph->hasProperty($node_uri, 'http://www.w3.org/2000/01/rdf-schema#seeAlso', $expected_value), 'Node to file relation found in RDF output (rdfs:seeAlso).'); $this->drupalGet('node'); } }
thesushi/drUpjv
core/modules/rdf/src/Tests/FileFieldAttributesTest.php
PHP
gpl-2.0
2,864
/* * Copyright (C) 2011 Samsung Electronics Co., Ltd. * * Exynos4 camera interface GPIO configuration. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <plat/gpio-cfg.h> #include <plat/camport.h> int exynos4_fimc_setup_gpio(enum s5p_camport_id id) { u32 gpio8, gpio5; u32 sfn; int ret; switch (id) { case S5P_CAMPORT_A: gpio8 = EXYNOS4210_GPJ0(0); /* PCLK, VSYNC, HREF, DATA[0:4] */ gpio5 = EXYNOS4210_GPJ1(0); /* DATA[5:7], CLKOUT, FIELD */ sfn = S3C_GPIO_SFN(2); break; case S5P_CAMPORT_B: gpio8 = EXYNOS4210_GPE0(0); /* DATA[0:7] */ gpio5 = EXYNOS4210_GPE1(0); /* PCLK, VSYNC, HREF, CLKOUT, FIELD */ sfn = S3C_GPIO_SFN(3); break; default: WARN(1, "Wrong camport id: %d\n", id); return -EINVAL; } ret = s3c_gpio_cfgall_range(gpio8, 8, sfn, S3C_GPIO_PULL_UP); if (ret) return ret; return s3c_gpio_cfgall_range(gpio5, 5, sfn, S3C_GPIO_PULL_UP); }
atilag/android_kernel_samsung_smdk4412
arch/arm/mach-exynos/setup-fimc.c
C
gpl-2.0
1,068
/* Intel Ethernet Switch Host Interface Driver * Copyright(c) 2013 - 2014 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Contact Information: * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 */ #include "fm10k_vf.h" /** * fm10k_stop_hw_vf - Stop Tx/Rx units * @hw: pointer to hardware structure * **/ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw) { u8 *perm_addr = hw->mac.perm_addr; u32 bal = 0, bah = 0; s32 err; u16 i; /* we need to disable the queues before taking further steps */ err = fm10k_stop_hw_generic(hw); if (err) return err; /* If permenant address is set then we need to restore it */ if (is_valid_ether_addr(perm_addr)) { bal = (((u32)perm_addr[3]) << 24) | (((u32)perm_addr[4]) << 16) | (((u32)perm_addr[5]) << 8); bah = (((u32)0xFF) << 24) | (((u32)perm_addr[0]) << 16) | (((u32)perm_addr[1]) << 8) | ((u32)perm_addr[2]); } /* The queues have already been disabled so we just need to * update their base address registers */ for (i = 0; i < hw->mac.max_queues; i++) { fm10k_write_reg(hw, FM10K_TDBAL(i), bal); fm10k_write_reg(hw, FM10K_TDBAH(i), bah); fm10k_write_reg(hw, FM10K_RDBAL(i), bal); fm10k_write_reg(hw, FM10K_RDBAH(i), bah); } return 0; } /** * fm10k_reset_hw_vf - VF hardware reset * @hw: pointer to hardware structure * * This function should return the hardare to a state similar to the * one it is in after just being initialized. **/ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw) { s32 err; /* shut down queues we own and reset DMA configuration */ err = fm10k_stop_hw_vf(hw); if (err) return err; /* Inititate VF reset */ fm10k_write_reg(hw, FM10K_VFCTRL, FM10K_VFCTRL_RST); /* Flush write and allow 100us for reset to complete */ fm10k_write_flush(hw); udelay(FM10K_RESET_TIMEOUT); /* Clear reset bit and verify it was cleared */ fm10k_write_reg(hw, FM10K_VFCTRL, 0); if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST) err = FM10K_ERR_RESET_FAILED; return err; } /** * fm10k_init_hw_vf - VF hardware initialization * @hw: pointer to hardware structure * **/ static s32 fm10k_init_hw_vf(struct fm10k_hw *hw) { u32 tqdloc, tqdloc0 = ~fm10k_read_reg(hw, FM10K_TQDLOC(0)); s32 err; u16 i; /* assume we always have at least 1 queue */ for (i = 1; tqdloc0 && (i < FM10K_MAX_QUEUES_POOL); i++) { /* verify the Descriptor cache offsets are increasing */ tqdloc = ~fm10k_read_reg(hw, FM10K_TQDLOC(i)); if (!tqdloc || (tqdloc == tqdloc0)) break; /* check to verify the PF doesn't own any of our queues */ if (!~fm10k_read_reg(hw, FM10K_TXQCTL(i)) || !~fm10k_read_reg(hw, FM10K_RXQCTL(i))) break; } /* shut down queues we own and reset DMA configuration */ err = fm10k_disable_queues_generic(hw, i); if (err) return err; /* record maximum queue count */ hw->mac.max_queues = i; return 0; } /** * fm10k_is_slot_appropriate_vf - Indicate appropriate slot for this SKU * @hw: pointer to hardware structure * * Looks at the PCIe bus info to confirm whether or not this slot can support * the necessary bandwidth for this device. Since the VF has no control over * the "slot" it is in, always indicate that the slot is appropriate. **/ static bool fm10k_is_slot_appropriate_vf(struct fm10k_hw *hw) { return true; } /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_mac_vlan_msg_attr[] = { FM10K_TLV_ATTR_U32(FM10K_MAC_VLAN_MSG_VLAN), FM10K_TLV_ATTR_BOOL(FM10K_MAC_VLAN_MSG_SET), FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MAC), FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_DEFAULT_MAC), FM10K_TLV_ATTR_MAC_ADDR(FM10K_MAC_VLAN_MSG_MULTICAST), FM10K_TLV_ATTR_LAST }; /** * fm10k_update_vlan_vf - Update status of VLAN ID in VLAN filter table * @hw: pointer to hardware structure * @vid: VLAN ID to add to table * @vsi: Reserved, should always be 0 * @set: Indicates if this is a set or clear operation * * This function adds or removes the corresponding VLAN ID from the VLAN * filter table for this VF. **/ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[4]; /* verify the index is not set */ if (vsi) return FM10K_ERR_PARAM; /* verify upper 4 bits of vid and length are 0 */ if ((vid << 16 | vid) >> 28) return FM10K_ERR_PARAM; /* encode set bit into the VLAN ID */ if (!set) vid |= FM10K_VLAN_CLEAR; /* generate VLAN request */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); fm10k_tlv_attr_put_u32(msg, FM10K_MAC_VLAN_MSG_VLAN, vid); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_msg_mac_vlan_vf - Read device MAC address from mailbox message * @hw: pointer to the HW structure * @results: Attributes for message * @mbx: unused mailbox data * * This function should determine the MAC address for the VF **/ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { u8 perm_addr[ETH_ALEN]; u16 vid; s32 err; /* record MAC address requested */ err = fm10k_tlv_attr_get_mac_vlan( results[FM10K_MAC_VLAN_MSG_DEFAULT_MAC], perm_addr, &vid); if (err) return err; ether_addr_copy(hw->mac.perm_addr, perm_addr); hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1); hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR); return 0; } /** * fm10k_read_mac_addr_vf - Read device MAC address * @hw: pointer to the HW structure * * This function should determine the MAC address for the VF **/ static s32 fm10k_read_mac_addr_vf(struct fm10k_hw *hw) { u8 perm_addr[ETH_ALEN]; u32 base_addr; base_addr = fm10k_read_reg(hw, FM10K_TDBAL(0)); /* last byte should be 0 */ if (base_addr << 24) return FM10K_ERR_INVALID_MAC_ADDR; perm_addr[3] = (u8)(base_addr >> 24); perm_addr[4] = (u8)(base_addr >> 16); perm_addr[5] = (u8)(base_addr >> 8); base_addr = fm10k_read_reg(hw, FM10K_TDBAH(0)); /* first byte should be all 1's */ if ((~base_addr) >> 24) return FM10K_ERR_INVALID_MAC_ADDR; perm_addr[0] = (u8)(base_addr >> 16); perm_addr[1] = (u8)(base_addr >> 8); perm_addr[2] = (u8)(base_addr); ether_addr_copy(hw->mac.perm_addr, perm_addr); ether_addr_copy(hw->mac.addr, perm_addr); return 0; } /** * fm10k_update_uc_addr_vf - Update device unicast address * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table * @vid: VLAN ID to add/remove from table * @add: Indicates if this is an add or remove operation * @flags: flags field to indicate add and secure - unused * * This function is used to add or remove unicast MAC addresses for * the VF. **/ static s32 fm10k_update_uc_addr_vf(struct fm10k_hw *hw, u16 glort, const u8 *mac, u16 vid, bool add, u8 flags) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[7]; /* verify VLAN ID is valid */ if (vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* verify MAC address is valid */ if (!is_valid_ether_addr(mac)) return FM10K_ERR_PARAM; /* verify we are not locked down on the MAC address */ if (is_valid_ether_addr(hw->mac.perm_addr) && memcmp(hw->mac.perm_addr, mac, ETH_ALEN)) return FM10K_ERR_PARAM; /* add bit to notify us if this is a set of clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; /* generate VLAN request */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MAC, mac, vid); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_update_mc_addr_vf - Update device multicast address * @hw: pointer to the HW structure * @glort: unused * @mac: MAC address to add/remove from table * @vid: VLAN ID to add/remove from table * @add: Indicates if this is an add or remove operation * * This function is used to add or remove multicast MAC addresses for * the VF. **/ static s32 fm10k_update_mc_addr_vf(struct fm10k_hw *hw, u16 glort, const u8 *mac, u16 vid, bool add) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[7]; /* verify VLAN ID is valid */ if (vid >= FM10K_VLAN_TABLE_VID_MAX) return FM10K_ERR_PARAM; /* verify multicast address is valid */ if (!is_multicast_ether_addr(mac)) return FM10K_ERR_PARAM; /* add bit to notify us if this is a set of clear operation */ if (!add) vid |= FM10K_VLAN_CLEAR; /* generate VLAN request */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN); fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_MULTICAST, mac, vid); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_update_int_moderator_vf - Request update of interrupt moderator list * @hw: pointer to hardware structure * * This function will issue a request to the PF to rescan our MSI-X table * and to update the interrupt moderator linked list. **/ static void fm10k_update_int_moderator_vf(struct fm10k_hw *hw) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[1]; /* generate MSI-X request */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MSIX); /* load onto outgoing mailbox */ mbx->ops.enqueue_tx(hw, mbx, msg); } /* This structure defines the attibutes to be parsed below */ const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[] = { FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_DISABLE), FM10K_TLV_ATTR_U8(FM10K_LPORT_STATE_MSG_XCAST_MODE), FM10K_TLV_ATTR_BOOL(FM10K_LPORT_STATE_MSG_READY), FM10K_TLV_ATTR_LAST }; /** * fm10k_msg_lport_state_vf - Message handler for lport_state message from PF * @hw: Pointer to hardware structure * @results: pointer array containing parsed data * @mbx: Pointer to mailbox information structure * * This handler is meant to capture the indication from the PF that we * are ready to bring up the interface. **/ s32 fm10k_msg_lport_state_vf(struct fm10k_hw *hw, u32 **results, struct fm10k_mbx_info *mbx) { hw->mac.dglort_map = !results[FM10K_LPORT_STATE_MSG_READY] ? FM10K_DGLORTMAP_NONE : FM10K_DGLORTMAP_ZERO; return 0; } /** * fm10k_update_lport_state_vf - Update device state in lower device * @hw: pointer to the HW structure * @glort: unused * @count: number of logical ports to enable - unused (always 1) * @enable: boolean value indicating if this is an enable or disable request * * Notify the lower device of a state change. If the lower device is * enabled we can add filters, if it is disabled all filters for this * logical port are flushed. **/ static s32 fm10k_update_lport_state_vf(struct fm10k_hw *hw, u16 glort, u16 count, bool enable) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[2]; /* reset glort mask 0 as we have to wait to be enabled */ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE; /* generate port state request */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); if (!enable) fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_DISABLE); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } /** * fm10k_update_xcast_mode_vf - Request update of multicast mode * @hw: pointer to hardware structure * @glort: unused * @mode: integer value indicating mode being requested * * This function will attempt to request a higher mode for the port * so that it can enable either multicast, multicast promiscuous, or * promiscuous mode of operation. **/ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode) { struct fm10k_mbx_info *mbx = &hw->mbx; u32 msg[3]; if (mode > FM10K_XCAST_MODE_NONE) return FM10K_ERR_PARAM; /* generate message requesting to change xcast mode */ fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE); fm10k_tlv_attr_put_u8(msg, FM10K_LPORT_STATE_MSG_XCAST_MODE, mode); /* load onto outgoing mailbox */ return mbx->ops.enqueue_tx(hw, mbx, msg); } const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = { FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP), FM10K_TLV_ATTR_LAST }; /* currently there is no shared 1588 timestamp handler */ /** * fm10k_update_hw_stats_vf - Updates hardware related statistics of VF * @hw: pointer to hardware structure * @stats: pointer to statistics structure * * This function collects and aggregates per queue hardware statistics. **/ static void fm10k_update_hw_stats_vf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); } /** * fm10k_rebind_hw_stats_vf - Resets base for hardware statistics of VF * @hw: pointer to hardware structure * @stats: pointer to the stats structure to update * * This function resets the base for queue hardware statistics. **/ static void fm10k_rebind_hw_stats_vf(struct fm10k_hw *hw, struct fm10k_hw_stats *stats) { /* Unbind Queue Statistics */ fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); /* Reinitialize bases for all stats */ fm10k_update_hw_stats_vf(hw, stats); } /** * fm10k_configure_dglort_map_vf - Configures GLORT entry and queues * @hw: pointer to hardware structure * @dglort: pointer to dglort configuration structure * * Reads the configuration structure contained in dglort_cfg and uses * that information to then populate a DGLORTMAP/DEC entry and the queues * to which it has been assigned. **/ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw, struct fm10k_dglort_cfg *dglort) { /* verify the dglort pointer */ if (!dglort) return FM10K_ERR_PARAM; /* stub for now until we determine correct message for this */ return 0; } /** * fm10k_adjust_systime_vf - Adjust systime frequency * @hw: pointer to hardware structure * @ppb: adjustment rate in parts per billion * * This function takes an adjustment rate in parts per billion and will * verify that this value is 0 as the VF cannot support adjusting the * systime clock. * * If the ppb value is non-zero the return is ERR_PARAM else success **/ static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb) { /* The VF cannot adjust the clock frequency, however it should * already have a syntonic clock with whichever host interface is * running as the master for the host interface clock domain so * there should be not frequency adjustment necessary. */ return ppb ? FM10K_ERR_PARAM : 0; } /** * fm10k_read_systime_vf - Reads value of systime registers * @hw: pointer to the hardware structure * * Function reads the content of 2 registers, combined to represent a 64 bit * value measured in nanosecods. In order to guarantee the value is accurate * we check the 32 most significant bits both before and after reading the * 32 least significant bits to verify they didn't change as we were reading * the registers. **/ static u64 fm10k_read_systime_vf(struct fm10k_hw *hw) { u32 systime_l, systime_h, systime_tmp; systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); do { systime_tmp = systime_h; systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME); systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1); } while (systime_tmp != systime_h); return ((u64)systime_h << 32) | systime_l; } static const struct fm10k_msg_data fm10k_msg_data_vf[] = { FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test), FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf), FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf), FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error), }; static struct fm10k_mac_ops mac_ops_vf = { .get_bus_info = &fm10k_get_bus_info_generic, .reset_hw = &fm10k_reset_hw_vf, .init_hw = &fm10k_init_hw_vf, .start_hw = &fm10k_start_hw_generic, .stop_hw = &fm10k_stop_hw_vf, .is_slot_appropriate = &fm10k_is_slot_appropriate_vf, .update_vlan = &fm10k_update_vlan_vf, .read_mac_addr = &fm10k_read_mac_addr_vf, .update_uc_addr = &fm10k_update_uc_addr_vf, .update_mc_addr = &fm10k_update_mc_addr_vf, .update_xcast_mode = &fm10k_update_xcast_mode_vf, .update_int_moderator = &fm10k_update_int_moderator_vf, .update_lport_state = &fm10k_update_lport_state_vf, .update_hw_stats = &fm10k_update_hw_stats_vf, .rebind_hw_stats = &fm10k_rebind_hw_stats_vf, .configure_dglort_map = &fm10k_configure_dglort_map_vf, .get_host_state = &fm10k_get_host_state_generic, .adjust_systime = &fm10k_adjust_systime_vf, .read_systime = &fm10k_read_systime_vf, }; static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw) { fm10k_get_invariants_generic(hw); return fm10k_pfvf_mbx_init(hw, &hw->mbx, fm10k_msg_data_vf, 0); } struct fm10k_info fm10k_vf_info = { .mac = fm10k_mac_vf, .get_invariants = &fm10k_get_invariants_vf, .mac_ops = &mac_ops_vf, };
PhenomX1998/android_kernel_oneplus_msm8996
drivers/net/ethernet/intel/fm10k/fm10k_vf.c
C
gpl-2.0
17,310
/* linux/drivers/dma/pl330.c * * Copyright (C) 2010 Samsung Electronics Co. Ltd. * Jaswinder Singh <jassi.brar@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/io.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/amba/pl330.h> #define NR_DEFAULT_DESC 16 enum desc_status { /* In the DMAC pool */ FREE, /* * Allocted to some channel during prep_xxx * Also may be sitting on the work_list. */ PREP, /* * Sitting on the work_list and already submitted * to the PL330 core. Not more than two descriptors * of a channel can be BUSY at any time. */ BUSY, /* * Sitting on the channel work_list but xfer done * by PL330 core */ DONE, }; struct dma_pl330_chan { /* Schedule desc completion */ struct tasklet_struct task; /* DMA-Engine Channel */ struct dma_chan chan; /* Last completed cookie */ dma_cookie_t completed; /* List of to be xfered descriptors */ struct list_head work_list; /* Pointer to the DMAC that manages this channel, * NULL if the channel is available to be acquired. * As the parent, this DMAC also provides descriptors * to the channel. */ struct dma_pl330_dmac *dmac; /* To protect channel manipulation */ spinlock_t lock; /* Token of a hardware channel thread of PL330 DMAC * NULL if the channel is available to be acquired. */ void *pl330_chid; }; struct dma_pl330_dmac { struct pl330_info pif; /* DMA-Engine Device */ struct dma_device ddma; /* Pool of descriptors available for the DMAC's channels */ struct list_head desc_pool; /* To protect desc_pool manipulation */ spinlock_t pool_lock; /* Peripheral channels connected to this DMAC */ struct dma_pl330_chan peripherals[0]; /* keep at end */ }; struct dma_pl330_desc { /* To attach to a queue as child */ struct list_head node; /* Descriptor for the DMA Engine API */ struct dma_async_tx_descriptor txd; /* Xfer for PL330 core */ struct pl330_xfer px; struct pl330_reqcfg rqcfg; struct pl330_req req; enum desc_status status; /* The channel which currently holds this desc */ struct dma_pl330_chan *pchan; }; static inline struct dma_pl330_chan * to_pchan(struct dma_chan *ch) { if (!ch) return NULL; return container_of(ch, struct dma_pl330_chan, chan); } static inline struct dma_pl330_desc * to_desc(struct dma_async_tx_descriptor *tx) { return container_of(tx, struct dma_pl330_desc, txd); } static inline void free_desc_list(struct list_head *list) { struct dma_pl330_dmac *pdmac; struct dma_pl330_desc *desc; struct dma_pl330_chan *pch; unsigned long flags; if (list_empty(list)) return; /* Finish off the work list */ list_for_each_entry(desc, list, node) { dma_async_tx_callback callback; void *param; /* All desc in a list belong to same channel */ pch = desc->pchan; callback = desc->txd.callback; param = desc->txd.callback_param; if (callback) callback(param); desc->pchan = NULL; } pdmac = pch->dmac; spin_lock_irqsave(&pdmac->pool_lock, flags); list_splice_tail_init(list, &pdmac->desc_pool); spin_unlock_irqrestore(&pdmac->pool_lock, flags); } static inline void fill_queue(struct dma_pl330_chan *pch) { struct dma_pl330_desc *desc; int ret; list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ if (desc->status == BUSY) break; ret = pl330_submit_req(pch->pl330_chid, &desc->req); if (!ret) { desc->status = BUSY; break; } else if (ret == -EAGAIN) { /* QFull or DMAC Dying */ break; } else { /* Unacceptable request */ desc->status = DONE; dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", __func__, __LINE__, desc->txd.cookie); tasklet_schedule(&pch->task); } } } static void pl330_tasklet(unsigned long data) { struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; struct dma_pl330_desc *desc, *_dt; unsigned long flags; LIST_HEAD(list); spin_lock_irqsave(&pch->lock, flags); /* Pick up ripe tomatoes */ list_for_each_entry_safe(desc, _dt, &pch->work_list, node) if (desc->status == DONE) { pch->completed = desc->txd.cookie; list_move_tail(&desc->node, &list); } /* Try to submit a req imm. next to the last completed cookie */ fill_queue(pch); /* Make sure the PL330 Channel thread is active */ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); spin_unlock_irqrestore(&pch->lock, flags); free_desc_list(&list); } static void dma_pl330_rqcb(void *token, enum pl330_op_err err) { struct dma_pl330_desc *desc = token; struct dma_pl330_chan *pch = desc->pchan; unsigned long flags; /* If desc aborted */ if (!pch) return; spin_lock_irqsave(&pch->lock, flags); desc->status = DONE; spin_unlock_irqrestore(&pch->lock, flags); tasklet_schedule(&pch->task); } static int pl330_alloc_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_dmac *pdmac = pch->dmac; unsigned long flags; spin_lock_irqsave(&pch->lock, flags); pch->completed = chan->cookie = 1; pch->pl330_chid = pl330_request_channel(&pdmac->pif); if (!pch->pl330_chid) { spin_unlock_irqrestore(&pch->lock, flags); return 0; } tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); spin_unlock_irqrestore(&pch->lock, flags); return 1; } static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_desc *desc; unsigned long flags; /* Only supports DMA_TERMINATE_ALL */ if (cmd != DMA_TERMINATE_ALL) return -ENXIO; spin_lock_irqsave(&pch->lock, flags); /* FLUSH the PL330 Channel thread */ pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); /* Mark all desc done */ list_for_each_entry(desc, &pch->work_list, node) desc->status = DONE; spin_unlock_irqrestore(&pch->lock, flags); pl330_tasklet((unsigned long) pch); return 0; } static void pl330_free_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); unsigned long flags; spin_lock_irqsave(&pch->lock, flags); tasklet_kill(&pch->task); pl330_release_channel(pch->pl330_chid); pch->pl330_chid = NULL; spin_unlock_irqrestore(&pch->lock, flags); } static enum dma_status pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct dma_pl330_chan *pch = to_pchan(chan); dma_cookie_t last_done, last_used; int ret; last_done = pch->completed; last_used = chan->cookie; ret = dma_async_is_complete(cookie, last_done, last_used); dma_set_tx_state(txstate, last_done, last_used, 0); return ret; } static void pl330_issue_pending(struct dma_chan *chan) { pl330_tasklet((unsigned long) to_pchan(chan)); } /* * We returned the last one of the circular list of descriptor(s) * from prep_xxx, so the argument to submit corresponds to the last * descriptor of the list. */ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) { struct dma_pl330_desc *desc, *last = to_desc(tx); struct dma_pl330_chan *pch = to_pchan(tx->chan); dma_cookie_t cookie; unsigned long flags; spin_lock_irqsave(&pch->lock, flags); /* Assign cookies to all nodes */ cookie = tx->chan->cookie; while (!list_empty(&last->node)) { desc = list_entry(last->node.next, struct dma_pl330_desc, node); if (++cookie < 0) cookie = 1; desc->txd.cookie = cookie; list_move_tail(&desc->node, &pch->work_list); } if (++cookie < 0) cookie = 1; last->txd.cookie = cookie; list_add_tail(&last->node, &pch->work_list); tx->chan->cookie = cookie; spin_unlock_irqrestore(&pch->lock, flags); return cookie; } static inline void _init_desc(struct dma_pl330_desc *desc) { desc->pchan = NULL; desc->req.x = &desc->px; desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; desc->rqcfg.privileged = 0; desc->rqcfg.insnaccess = 0; desc->rqcfg.scctl = SCCTRL0; desc->rqcfg.dcctl = DCCTRL0; desc->req.cfg = &desc->rqcfg; desc->req.xfer_cb = dma_pl330_rqcb; desc->txd.tx_submit = pl330_tx_submit; INIT_LIST_HEAD(&desc->node); } /* Returns the number of descriptors added to the DMAC pool */ int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) { struct dma_pl330_desc *desc; unsigned long flags; int i; if (!pdmac) return 0; desc = kmalloc(count * sizeof(*desc), flg); if (!desc) return 0; spin_lock_irqsave(&pdmac->pool_lock, flags); for (i = 0; i < count; i++) { _init_desc(&desc[i]); list_add_tail(&desc[i].node, &pdmac->desc_pool); } spin_unlock_irqrestore(&pdmac->pool_lock, flags); return count; } static struct dma_pl330_desc * pluck_desc(struct dma_pl330_dmac *pdmac) { struct dma_pl330_desc *desc = NULL; unsigned long flags; if (!pdmac) return NULL; spin_lock_irqsave(&pdmac->pool_lock, flags); if (!list_empty(&pdmac->desc_pool)) { desc = list_entry(pdmac->desc_pool.next, struct dma_pl330_desc, node); list_del_init(&desc->node); desc->status = PREP; desc->txd.callback = NULL; } spin_unlock_irqrestore(&pdmac->pool_lock, flags); return desc; } static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) { struct dma_pl330_dmac *pdmac = pch->dmac; struct dma_pl330_peri *peri = pch->chan.private; struct dma_pl330_desc *desc; /* Pluck one desc from the pool of DMAC */ desc = pluck_desc(pdmac); /* If the DMAC pool is empty, alloc new */ if (!desc) { if (!add_desc(pdmac, GFP_ATOMIC, 1)) return NULL; /* Try again */ desc = pluck_desc(pdmac); if (!desc) { dev_err(pch->dmac->pif.dev, "%s:%d ALERT!\n", __func__, __LINE__); return NULL; } } /* Initialize the descriptor */ desc->pchan = pch; desc->txd.cookie = 0; async_tx_ack(&desc->txd); desc->req.rqtype = peri->rqtype; desc->req.peri = peri->peri_id; dma_async_tx_descriptor_init(&desc->txd, &pch->chan); return desc; } static inline void fill_px(struct pl330_xfer *px, dma_addr_t dst, dma_addr_t src, size_t len) { px->next = NULL; px->bytes = len; px->dst_addr = dst; px->src_addr = src; } static struct dma_pl330_desc * __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, dma_addr_t src, size_t len) { struct dma_pl330_desc *desc = pl330_get_desc(pch); if (!desc) { dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); return NULL; } /* * Ideally we should lookout for reqs bigger than * those that can be programmed with 256 bytes of * MC buffer, but considering a req size is seldom * going to be word-unaligned and more than 200MB, * we take it easy. * Also, should the limit is reached we'd rather * have the platform increase MC buffer size than * complicating this API driver. */ fill_px(&desc->px, dst, src, len); return desc; } /* Call after fixing burst size */ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) { struct dma_pl330_chan *pch = desc->pchan; struct pl330_info *pi = &pch->dmac->pif; int burst_len; burst_len = pi->pcfg.data_bus_width / 8; burst_len *= pi->pcfg.data_buf_dep; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ if (burst_len > 16) burst_len = 16; while (burst_len > 1) { if (!(len % (burst_len << desc->rqcfg.brst_size))) break; burst_len--; } return burst_len; } static struct dma_async_tx_descriptor * pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { struct dma_pl330_desc *desc; struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_peri *peri = chan->private; struct pl330_info *pi; int burst; if (unlikely(!pch || !len || !peri)) return NULL; if (peri->rqtype != MEMTOMEM) return NULL; pi = &pch->dmac->pif; desc = __pl330_prep_dma_memcpy(pch, dst, src, len); if (!desc) return NULL; desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 1; /* Select max possible burst size */ burst = pi->pcfg.data_bus_width / 8; while (burst > 1) { if (!(len % burst)) break; burst /= 2; } desc->rqcfg.brst_size = 0; while (burst != (1 << desc->rqcfg.brst_size)) desc->rqcfg.brst_size++; desc->rqcfg.brst_len = get_burst_len(desc, len); desc->txd.flags = flags; return &desc->txd; } static struct dma_async_tx_descriptor * pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_data_direction direction, unsigned long flg) { struct dma_pl330_desc *first, *desc = NULL; struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_peri *peri = chan->private; struct scatterlist *sg; unsigned long flags; int i, burst_size; dma_addr_t addr; if (unlikely(!pch || !sgl || !sg_len)) return NULL; /* Make sure the direction is consistent */ if ((direction == DMA_TO_DEVICE && peri->rqtype != MEMTODEV) || (direction == DMA_FROM_DEVICE && peri->rqtype != DEVTOMEM)) { dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n", __func__, __LINE__); return NULL; } addr = peri->fifo_addr; burst_size = peri->burst_sz; first = NULL; for_each_sg(sgl, sg, sg_len, i) { desc = pl330_get_desc(pch); if (!desc) { struct dma_pl330_dmac *pdmac = pch->dmac; dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); if (!first) return NULL; spin_lock_irqsave(&pdmac->pool_lock, flags); while (!list_empty(&first->node)) { desc = list_entry(first->node.next, struct dma_pl330_desc, node); list_move_tail(&desc->node, &pdmac->desc_pool); } list_move_tail(&first->node, &pdmac->desc_pool); spin_unlock_irqrestore(&pdmac->pool_lock, flags); return NULL; } if (!first) first = desc; else list_add_tail(&desc->node, &first->node); if (direction == DMA_TO_DEVICE) { desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; fill_px(&desc->px, addr, sg_dma_address(sg), sg_dma_len(sg)); } else { desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; fill_px(&desc->px, sg_dma_address(sg), addr, sg_dma_len(sg)); } desc->rqcfg.brst_size = burst_size; desc->rqcfg.brst_len = 1; } /* Return the last desc in the chain */ desc->txd.flags = flg; return &desc->txd; } static irqreturn_t pl330_irq_handler(int irq, void *data) { if (pl330_update(data)) return IRQ_HANDLED; else return IRQ_NONE; } static int __devinit pl330_probe(struct amba_device *adev, struct amba_id *id) { struct dma_pl330_platdata *pdat; struct dma_pl330_dmac *pdmac; struct dma_pl330_chan *pch; struct pl330_info *pi; struct dma_device *pd; struct resource *res; int i, ret, irq; pdat = adev->dev.platform_data; if (!pdat || !pdat->nr_valid_peri) { dev_err(&adev->dev, "platform data missing\n"); return -ENODEV; } /* Allocate a new DMAC and its Channels */ pdmac = kzalloc(pdat->nr_valid_peri * sizeof(*pch) + sizeof(*pdmac), GFP_KERNEL); if (!pdmac) { dev_err(&adev->dev, "unable to allocate mem\n"); return -ENOMEM; } pi = &pdmac->pif; pi->dev = &adev->dev; pi->pl330_data = NULL; pi->mcbufsz = pdat->mcbuf_sz; res = &adev->res; request_mem_region(res->start, resource_size(res), "dma-pl330"); pi->base = ioremap(res->start, resource_size(res)); if (!pi->base) { ret = -ENXIO; goto probe_err1; } irq = adev->irq[0]; ret = request_irq(irq, pl330_irq_handler, 0, dev_name(&adev->dev), pi); if (ret) goto probe_err2; ret = pl330_add(pi); if (ret) goto probe_err3; INIT_LIST_HEAD(&pdmac->desc_pool); spin_lock_init(&pdmac->pool_lock); /* Create a descriptor pool of default size */ if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) dev_warn(&adev->dev, "unable to allocate desc\n"); pd = &pdmac->ddma; INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ for (i = 0; i < pdat->nr_valid_peri; i++) { struct dma_pl330_peri *peri = &pdat->peri[i]; pch = &pdmac->peripherals[i]; switch (peri->rqtype) { case MEMTOMEM: dma_cap_set(DMA_MEMCPY, pd->cap_mask); break; case MEMTODEV: case DEVTOMEM: dma_cap_set(DMA_SLAVE, pd->cap_mask); break; default: dev_err(&adev->dev, "DEVTODEV Not Supported\n"); continue; } INIT_LIST_HEAD(&pch->work_list); spin_lock_init(&pch->lock); pch->pl330_chid = NULL; pch->chan.private = peri; pch->chan.device = pd; pch->chan.chan_id = i; pch->dmac = pdmac; /* Add the channel to the DMAC list */ pd->chancnt++; list_add_tail(&pch->chan.device_node, &pd->channels); } pd->dev = &adev->dev; pd->device_alloc_chan_resources = pl330_alloc_chan_resources; pd->device_free_chan_resources = pl330_free_chan_resources; pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; pd->device_tx_status = pl330_tx_status; pd->device_prep_slave_sg = pl330_prep_slave_sg; pd->device_control = pl330_control; pd->device_issue_pending = pl330_issue_pending; ret = dma_async_device_register(pd); if (ret) { dev_err(&adev->dev, "unable to register DMAC\n"); goto probe_err4; } amba_set_drvdata(adev, pdmac); dev_info(&adev->dev, "Loaded driver for PL330 DMAC-%d\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", pi->pcfg.data_buf_dep, pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, pi->pcfg.num_peri, pi->pcfg.num_events); return 0; probe_err4: pl330_del(pi); probe_err3: free_irq(irq, pi); probe_err2: iounmap(pi->base); probe_err1: release_mem_region(res->start, resource_size(res)); kfree(pdmac); return ret; } static int __devexit pl330_remove(struct amba_device *adev) { struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; struct pl330_info *pi; struct resource *res; int irq; if (!pdmac) return 0; amba_set_drvdata(adev, NULL); /* Idle the DMAC */ list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, chan.device_node) { /* Remove the channel */ list_del(&pch->chan.device_node); /* Flush the channel */ pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); pl330_free_chan_resources(&pch->chan); } pi = &pdmac->pif; pl330_del(pi); irq = adev->irq[0]; free_irq(irq, pi); iounmap(pi->base); res = &adev->res; release_mem_region(res->start, resource_size(res)); kfree(pdmac); return 0; } static struct amba_id pl330_ids[] = { { .id = 0x00041330, .mask = 0x000fffff, }, { 0, 0 }, }; static struct amba_driver pl330_driver = { .drv = { .owner = THIS_MODULE, .name = "dma-pl330", }, .id_table = pl330_ids, .probe = pl330_probe, .remove = pl330_remove, }; static int __init pl330_init(void) { return amba_driver_register(&pl330_driver); } module_init(pl330_init); static void __exit pl330_exit(void) { amba_driver_unregister(&pl330_driver); return; } module_exit(pl330_exit); MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); MODULE_DESCRIPTION("API Driver for PL330 DMAC"); MODULE_LICENSE("GPL");
kangtastic/kgb
drivers/dma/pl330.c
C
gpl-2.0
19,181